Java Code Examples for org.junit.Assume#assumeTrue()

The following examples show how to use org.junit.Assume#assumeTrue() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: EnumTest.java    From jcifs-ng with GNU Lesser General Public License v2.1 6 votes vote down vote up
@Ignore ( "This causes a connection to whatever local master browser is available, config may be incompatible with it" )
@Test
public void testBrowse () throws MalformedURLException, CIFSException {
    CIFSContext ctx = withAnonymousCredentials();
    try ( SmbFile smbFile = new SmbFile("smb://", ctx) ) {
        try ( CloseableIterator<SmbResource> it = smbFile.children() ) {
            while ( it.hasNext() ) {
                try ( SmbResource serv = it.next() ) {
                    System.err.println(serv.getName());
                }
            }
        }
    }
    catch ( SmbUnsupportedOperationException e ) {
        Assume.assumeTrue("Browsing unsupported", false);
    }
}
 
Example 2
Source File: ContinuousFileProcessingTest.java    From flink with Apache License 2.0 6 votes vote down vote up
@BeforeClass
public static void createHDFS() {
	Assume.assumeTrue("HDFS cluster cannot be start on Windows without extensions.", !OperatingSystem.isWindows());

	try {
		File hdfsDir = tempFolder.newFolder();

		org.apache.hadoop.conf.Configuration hdConf = new org.apache.hadoop.conf.Configuration();
		hdConf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, hdfsDir.getAbsolutePath());
		hdConf.set("dfs.block.size", String.valueOf(1048576)); // this is the minimum we can set.

		MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(hdConf);
		hdfsCluster = builder.build();

		hdfsURI = "hdfs://" + hdfsCluster.getURI().getHost() + ":" + hdfsCluster.getNameNodePort() + "/";
		hdfs = new org.apache.hadoop.fs.Path(hdfsURI).getFileSystem(hdConf);

	} catch (Throwable e) {
		e.printStackTrace();
		Assert.fail("Test failed " + e.getMessage());
	}
}
 
Example 3
Source File: DFSTestUtil.java    From big-c with Apache License 2.0 5 votes vote down vote up
public ShortCircuitTestContext(String testName) {
  this.testName = testName;
  this.sockDir = new TemporarySocketDirectory();
  DomainSocket.disableBindPathValidation();
  formerTcpReadsDisabled = DFSInputStream.tcpReadsDisabledForTesting;
  Assume.assumeTrue(DomainSocket.getLoadingFailureReason() == null);
}
 
Example 4
Source File: FileAttributesTest.java    From jcifs with GNU Lesser General Public License v2.1 5 votes vote down vote up
@Test
public void testGetGroup () throws IOException {
    try ( SmbResource f = getDefaultShareRoot() ) {
        try {
            jcifs.SID security = f.getOwnerGroup();
            assertNotNull(security);
        }
        catch ( SmbUnsupportedOperationException e ) {
            Assume.assumeTrue("No Ntsmbs", false);
        }
    }
}
 
Example 5
Source File: ALPNTest.java    From wildfly-openssl with Apache License 2.0 5 votes vote down vote up
@Test
public void testALPNFailure() throws IOException, NoSuchAlgorithmException, InterruptedException {
    Assume.assumeTrue(OpenSSLEngine.isAlpnSupported());
    try (ServerSocket serverSocket = SSLTestUtils.createServerSocket()) {
        final AtomicReference<byte[]> sessionID = new AtomicReference<>();
        final SSLContext sslContext = SSLTestUtils.createSSLContext("openssl.TLSv1.2");
        final SSLContext clientSslContext = SSLTestUtils.createClientSSLContext("openssl.TLSv1.2");
        final AtomicReference<OpenSSLEngine> engineAtomicReference = new AtomicReference<>();
        Thread acceptThread = new Thread(new EchoRunnable(serverSocket, sslContext, sessionID, (engine -> {
            OpenSSLEngine openSSLEngine = (OpenSSLEngine) engine;
            openSSLEngine.setApplicationProtocols("h2", "h2/13", "http");
            engineAtomicReference.set(openSSLEngine);
            return openSSLEngine;
        })));
        acceptThread.start();
        final OpenSSLSocket socket = (OpenSSLSocket) clientSslContext.getSocketFactory().createSocket();
        socket.connect(SSLTestUtils.createSocketAddress());
        socket.getOutputStream().write(MESSAGE.getBytes(StandardCharsets.US_ASCII));
        byte[] data = new byte[100];
        int read = socket.getInputStream().read(data);

        Assert.assertEquals(MESSAGE, new String(data, 0, read));
        Assert.assertNull("server side", engineAtomicReference.get().getSelectedApplicationProtocol());
        Assert.assertNull("client side", socket.getSelectedApplicationProtocol());
        serverSocket.close();
        acceptThread.join();
    }
}
 
Example 6
Source File: TestBlockReaderLocalLegacy.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Test
public void testBothOldAndNewShortCircuitConfigured() throws Exception {
  final short REPL_FACTOR = 1;
  final int FILE_LENGTH = 512;
  Assume.assumeTrue(null == DomainSocket.getLoadingFailureReason());
  TemporarySocketDirectory socketDir = new TemporarySocketDirectory();
  HdfsConfiguration conf = getConfiguration(socketDir);
  MiniDFSCluster cluster =
      new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
  cluster.waitActive();
  socketDir.close();
  FileSystem fs = cluster.getFileSystem();

  Path path = new Path("/foo");
  byte orig[] = new byte[FILE_LENGTH];
  for (int i = 0; i < orig.length; i++) {
    orig[i] = (byte)(i%10);
  }
  FSDataOutputStream fos = fs.create(path, (short)1);
  fos.write(orig);
  fos.close();
  DFSTestUtil.waitReplication(fs, path, REPL_FACTOR);
  FSDataInputStream fis = cluster.getFileSystem().open(path);
  byte buf[] = new byte[FILE_LENGTH];
  IOUtils.readFully(fis, buf, 0, FILE_LENGTH);
  fis.close();
  Assert.assertArrayEquals(orig, buf);
  Arrays.equals(orig, buf);
  cluster.shutdown();
}
 
Example 7
Source File: TestLinuxContainerExecutor.java    From hadoop with Apache License 2.0 5 votes vote down vote up
@Test(timeout = 10000)
public void testPostExecuteAfterReacquisition() throws Exception {
  Assume.assumeTrue(shouldRun());
  // make up some bogus container ID
  ApplicationId appId = ApplicationId.newInstance(12345, 67890);
  ApplicationAttemptId attemptId =
      ApplicationAttemptId.newInstance(appId, 54321);
  ContainerId cid = ContainerId.newContainerId(attemptId, 9876);

  Configuration conf = new YarnConfiguration();
  conf.setClass(YarnConfiguration.NM_LINUX_CONTAINER_RESOURCES_HANDLER,
    TestResourceHandler.class, LCEResourcesHandler.class);
  LinuxContainerExecutor lce = new LinuxContainerExecutor();
  lce.setConf(conf);
  try {
    lce.init();
  } catch (IOException e) {
    // expected if LCE isn't setup right, but not necessary for this test
  }

  Container container = mock(Container.class);
  ContainerLaunchContext context = mock(ContainerLaunchContext.class);
  HashMap<String, String> env = new HashMap<>();

  when(container.getLaunchContext()).thenReturn(context);
  when(context.getEnvironment()).thenReturn(env);

  lce.reacquireContainer(new ContainerReacquisitionContext.Builder()
      .setContainer(container)
      .setUser("foouser")
      .setContainerId(cid)
      .build());
  assertTrue("postExec not called after reacquisition",
      TestResourceHandler.postExecContainers.contains(cid));
}
 
Example 8
Source File: LifecycleIT.java    From spydra with Apache License 2.0 5 votes vote down vote up
@Test
public void testLifecycle() throws Exception {
  // The way that the haddop credentials provider is buggy as it tries to contact the metadata
  // service in gcp if we use the default account
  Assume.assumeTrue("Skipping lifecycle test, not running on gce and "
                    + "GOOGLE_APPLICATION_CREDENTIALS not set",
      hasApplicationJsonOrRunningOnGce());
  SpydraArgument testArgs = SpydraArgumentUtil.loadArguments("integration-test-config.json");
  SpydraArgument arguments = SpydraArgumentUtil
      .dataprocConfiguration(CLIENT_ID, testArgs.getLogBucket(), testArgs.getRegion());
  arguments.getCluster().numWorkers(3);
  arguments.getSubmit().jar(getExamplesJarPath());
  arguments.getSubmit().setJobArgs(Arrays.asList("pi", "1", "1"));

  // TODO We should test the init action as well but the uploading before running the test is tricky
  // We could upload it manually to a test bucket here and set the right things
  arguments.getCluster().getOptions().remove(SpydraArgument.OPTION_INIT_ACTIONS);

  // Merge to get all other custom test arguments
  arguments = SpydraArgument.merge(arguments, testArgs);

  LOGGER.info("Using following service account to run gcloud commands locally: " +
      arguments.getCluster().getOptions().get(SpydraArgument.OPTION_ACCOUNT));
  Submitter submitter = Submitter.getSubmitter(arguments);
  assertTrue("job wasn't successful", submitter.executeJob(arguments));

  assertTrue(isClusterCollected(arguments));

  URI doneUri = URI.create(arguments.clusterProperties().getProperty(
      "mapred:mapreduce.jobhistory.done-dir"));
  LOGGER.info("Checking that we have two files in: " + doneUri);
  assertEquals(2, getFileCount(doneUri));
  URI intermediateUri = URI.create(arguments.clusterProperties().getProperty(
      "mapred:mapreduce.jobhistory.intermediate-done-dir"));
  LOGGER.info("Checking that we do not have any files in: " + intermediateUri);
  assertEquals(0, getFileCount(intermediateUri));
}
 
Example 9
Source File: TestSparkParquetReader.java    From iceberg with Apache License 2.0 5 votes vote down vote up
protected void writeAndValidate(Schema schema) throws IOException {
  Assume.assumeTrue("Parquet Avro cannot write non-string map keys", null == TypeUtil.find(schema,
      type -> type.isMapType() && type.asMapType().keyType() != Types.StringType.get()));

  List<GenericData.Record> expected = RandomData.generateList(schema, 100, 0L);

  File testFile = temp.newFile();
  Assert.assertTrue("Delete should succeed", testFile.delete());

  try (FileAppender<GenericData.Record> writer = Parquet.write(Files.localOutput(testFile))
      .schema(schema)
      .named("test")
      .build()) {
    writer.addAll(expected);
  }

  try (CloseableIterable<InternalRow> reader = Parquet.read(Files.localInput(testFile))
      .project(schema)
      .createReaderFunc(type -> SparkParquetReaders.buildReader(schema, type))
      .build()) {
    Iterator<InternalRow> rows = reader.iterator();
    for (int i = 0; i < expected.size(); i += 1) {
      Assert.assertTrue("Should have expected number of rows", rows.hasNext());
      assertEqualsUnsafe(schema.asStruct(), expected.get(i), rows.next());
    }
    Assert.assertFalse("Should not have extra rows", rows.hasNext());
  }
}
 
Example 10
Source File: EnumTest.java    From jcifs with GNU Lesser General Public License v2.1 5 votes vote down vote up
@Test
public void testDomainSeverEnum () throws MalformedURLException, CIFSException {
    try ( SmbFile smbFile = new SmbFile("smb://" + getTestDomain(), withTestNTLMCredentials(getContext())) ) {
        String[] list = smbFile.list();
        assertNotNull(list);
        log.debug(Arrays.toString(list));
    }
    catch ( SmbUnsupportedOperationException e ) {
        Assume.assumeTrue(false);
    }
}
 
Example 11
Source File: PngReaderTest.java    From JQF with BSD 2-Clause "Simplified" License 5 votes vote down vote up
@Fuzz
public void read(ImageInputStream input) throws IOException {
    // Decode image from input stream
    reader.setInput(input);
    // Bound dimensions
    Assume.assumeTrue(reader.getHeight(0) < 1024);
    Assume.assumeTrue(reader.getWidth(0) < 1024);
    // Parse PNG
    reader.read(0);
}
 
Example 12
Source File: SniClientTest.java    From netty-4.1.22 with Apache License 2.0 4 votes vote down vote up
@Test(timeout = 30000)
public void testSniSNIMatcherMatchesClientOpenSslServerOpenSsl() throws Exception {
    Assume.assumeTrue(PlatformDependent.javaVersion() >= 8);
    Assume.assumeTrue(OpenSsl.isAvailable());
    SniClientJava8TestUtil.testSniClient(SslProvider.OPENSSL, SslProvider.OPENSSL, true);
}
 
Example 13
Source File: TestCometProcessor.java    From Tomcat7.0.67 with Apache License 2.0 4 votes vote down vote up
@Test
public void testAsyncClose() throws Exception {
    Assume.assumeTrue(
            "This test is skipped, because this connector does not support Comet.",
            isCometSupported());

    // Setup Tomcat instance
    Tomcat tomcat = getTomcatInstance();
    // No file system docBase required
    Context root = tomcat.addContext("", null);
    Tomcat.addServlet(root, "comet", new SimpleCometServlet());
    root.addServletMapping("/comet", "comet");
    Tomcat.addServlet(root, "hello", new HelloWorldServlet());
    root.addServletMapping("/hello", "hello");
    root.getPipeline().addValve(new AsyncCometCloseValve());
    tomcat.getConnector().setProperty("connectionTimeout", "5000");
    tomcat.start();

    // Create connection to Comet servlet
    final Socket socket =
        SocketFactory.getDefault().createSocket("localhost", getPort());
    socket.setSoTimeout(5000);

    final OutputStream os = socket.getOutputStream();
    String requestLine = "POST http://localhost:" + getPort() +
            "/comet HTTP/1.1\r\n";
    os.write(requestLine.getBytes());
    os.write("transfer-encoding: chunked\r\n".getBytes());
    os.write("\r\n".getBytes());

    InputStream is = socket.getInputStream();
    ResponseReaderThread readThread = new ResponseReaderThread(is);
    readThread.start();

    // Wait for the comet request/response to finish
    int count = 0;
    while (count < 10 && !readThread.getResponse().endsWith("0\r\n\r\n")) {
        Thread.sleep(500);
        count++;
    }

    if (count == 10) {
        fail("Comet request did not complete");
    }

    // Send a standard HTTP request on the same connection
    requestLine = "GET http://localhost:" + getPort() +
            "/hello HTTP/1.1\r\n";
    os.write(requestLine.getBytes());
    os.write("\r\n".getBytes());

    // Check for the expected response
    count = 0;
    while (count < 10 && !readThread.getResponse().contains(
            HelloWorldServlet.RESPONSE_TEXT)) {
        Thread.sleep(500);
        count++;
    }

    if (count == 10) {
        fail("Non-comet request did not complete");
    }

    readThread.join();
    os.close();
    is.close();
}
 
Example 14
Source File: HadoopRecoverableWriterTest.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
@BeforeClass
public static void verifyOS() {
	Assume.assumeTrue("HDFS cluster cannot be started on Windows without extensions.", !OperatingSystem.isWindows());
}
 
Example 15
Source File: HadoopMapredITCase.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
@Before
public void checkOperatingSystem() {
	// FLINK-5164 - see https://wiki.apache.org/hadoop/WindowsProblems
	Assume.assumeTrue("This test can't run successfully on Windows.", !OperatingSystem.isWindows());
}
 
Example 16
Source File: OSSTestCredentials.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
public static void assumeCredentialsAvailable() {
	Assume.assumeTrue("No OSS credentials available in this test's environment", credentialsAvailable());
}
 
Example 17
Source File: EnDecryptionTest.java    From kafka-end-2-end-encryption with Apache License 2.0 4 votes vote down vote up
@Test
public void testAes256() throws Exception {
    Assume.assumeTrue(Cipher.getMaxAllowedKeyLength("AES") >= 256);
    testBasic("", 256, -1);
}
 
Example 18
Source File: JaxrsClientFilterTest.java    From opencensus-java with Apache License 2.0 4 votes vote down vote up
@Before
public void setUp() {
  // Mockito in this test depends on some class that's only available on JDK 1.8:
  // TypeDescription$Generic$AnnotationReader$Dispatcher$ForJava8CapableVm
  Assume.assumeTrue(System.getProperty("java.version").startsWith("1.8"));
}
 
Example 19
Source File: TestSecureNameNodeWithExternalKdc.java    From big-c with Apache License 2.0 4 votes vote down vote up
@Before
public void testExternalKdcRunning() {
  // Tests are skipped if external KDC is not running.
  Assume.assumeTrue(isExternalKdcRunning());
}
 
Example 20
Source File: LittleEndianUnsafeDirectByteBufTest.java    From netty-4.1.22 with Apache License 2.0 4 votes vote down vote up
@Before
@Override
public void init() {
    Assume.assumeTrue("sun.misc.Unsafe not found, skip tests", PlatformDependent.hasUnsafe());
    super.init();
}