Java Code Examples for org.apache.hadoop.hdfs.MiniDFSCluster.Builder#build()

The following examples show how to use org.apache.hadoop.hdfs.MiniDFSCluster.Builder#build() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: HdfsSortedOplogOrganizerJUnitTest.java    From gemfirexd-oss with Apache License 2.0 6 votes vote down vote up
private MiniDFSCluster initMiniHACluster(int nn1port, int nn2port)
    throws IOException {
  Configuration confForMiniDFS = new Configuration();
  
  Builder builder = new MiniDFSCluster.Builder(confForMiniDFS)
  .nnTopology(new MiniDFSNNTopology()
  .addNameservice(new MiniDFSNNTopology.NSConf("ns1")
  .addNN(new MiniDFSNNTopology.NNConf("nn1").setIpcPort(nn1port))
  .addNN(new MiniDFSNNTopology.NNConf("nn2").setIpcPort(nn2port))
      ))
      .numDataNodes(1);
  
  MiniDFSCluster cluster = builder.build();
  cluster.waitActive();

  NameNode nnode1 = cluster.getNameNode(0);
  assertTrue(nnode1.isStandbyState());
  NameNode nnode2 = cluster.getNameNode(1);
  assertTrue(nnode2.isStandbyState());

  cluster.transitionToActive(0);
  assertFalse(nnode1.isStandbyState());
  return cluster;
}
 
Example 2
Source File: HdfsSortedOplogOrganizerJUnitTest.java    From gemfirexd-oss with Apache License 2.0 6 votes vote down vote up
private MiniDFSCluster initMiniHACluster(int nn1port, int nn2port)
    throws IOException {
  Configuration confForMiniDFS = new Configuration();
  
  Builder builder = new MiniDFSCluster.Builder(confForMiniDFS)
  .nnTopology(new MiniDFSNNTopology()
  .addNameservice(new MiniDFSNNTopology.NSConf("ns1")
  .addNN(new MiniDFSNNTopology.NNConf("nn1").setIpcPort(nn1port))
  .addNN(new MiniDFSNNTopology.NNConf("nn2").setIpcPort(nn2port))
      ))
      .numDataNodes(1);
  
  MiniDFSCluster cluster = builder.build();
  cluster.waitActive();

  NameNode nnode1 = cluster.getNameNode(0);
  assertTrue(nnode1.isStandbyState());
  NameNode nnode2 = cluster.getNameNode(1);
  assertTrue(nnode2.isStandbyState());

  cluster.transitionToActive(0);
  assertFalse(nnode1.isStandbyState());
  return cluster;
}
 
Example 3
Source File: CreateHDFSStoreTest.java    From gemfirexd-oss with Apache License 2.0 5 votes vote down vote up
public static MiniDFSCluster initMiniCluster(int port, int numDN, HashMap<String, String> map) throws Exception {
  System.setProperty("test.build.data", "hdfs-test-cluster");
  Configuration hconf = new HdfsConfiguration();
  for (Entry<String, String> entry : map.entrySet()) {
    hconf.set(entry.getKey(), entry.getValue());
  }

  Builder builder = new MiniDFSCluster.Builder(hconf);
  builder.numDataNodes(numDN);
  builder.nameNodePort(port);
  MiniDFSCluster cluster = builder.build();
  return cluster;
}
 
Example 4
Source File: BaseHoplogTestCase.java    From gemfirexd-oss with Apache License 2.0 5 votes vote down vote up
public static MiniDFSCluster initMiniCluster(int port, int numDN, HashMap<String, String> map) throws Exception {
  System.setProperty("test.build.data", "hdfs-test-cluster");
  Configuration hconf = new HdfsConfiguration();
  for (Entry<String, String> entry : map.entrySet()) {
    hconf.set(entry.getKey(), entry.getValue());
  }

  hconf.set("dfs.namenode.fs-limits.min-block-size", "1024");
  
  Builder builder = new MiniDFSCluster.Builder(hconf);
  builder.numDataNodes(numDN);
  builder.nameNodePort(port);
  MiniDFSCluster cluster = builder.build();
  return cluster;
}
 
Example 5
Source File: HdfsErrorHandlingJunitTest.java    From gemfirexd-oss with Apache License 2.0 5 votes vote down vote up
private void initMiniCluster(Configuration hconf, int numDataNodes)
    throws IOException {
  Builder builder = new MiniDFSCluster.Builder(hconf);
  builder.numDataNodes(numDataNodes);
  builder.nameNodePort(CLUSTER_PORT);
  cluster = builder.build();
}
 
Example 6
Source File: CreateHDFSStoreTest.java    From gemfirexd-oss with Apache License 2.0 5 votes vote down vote up
public static MiniDFSCluster initMiniCluster(int port, int numDN, HashMap<String, String> map) throws Exception {
  System.setProperty("test.build.data", "hdfs-test-cluster");
  Configuration hconf = new HdfsConfiguration();
  for (Entry<String, String> entry : map.entrySet()) {
    hconf.set(entry.getKey(), entry.getValue());
  }

  Builder builder = new MiniDFSCluster.Builder(hconf);
  builder.numDataNodes(numDN);
  builder.nameNodePort(port);
  MiniDFSCluster cluster = builder.build();
  return cluster;
}
 
Example 7
Source File: BaseHoplogTestCase.java    From gemfirexd-oss with Apache License 2.0 5 votes vote down vote up
public static MiniDFSCluster initMiniCluster(int port, int numDN, HashMap<String, String> map) throws Exception {
  System.setProperty("test.build.data", "hdfs-test-cluster");
  Configuration hconf = new HdfsConfiguration();
  for (Entry<String, String> entry : map.entrySet()) {
    hconf.set(entry.getKey(), entry.getValue());
  }

  hconf.set("dfs.namenode.fs-limits.min-block-size", "1024");
  
  Builder builder = new MiniDFSCluster.Builder(hconf);
  builder.numDataNodes(numDN);
  builder.nameNodePort(port);
  MiniDFSCluster cluster = builder.build();
  return cluster;
}
 
Example 8
Source File: HdfsErrorHandlingJunitTest.java    From gemfirexd-oss with Apache License 2.0 5 votes vote down vote up
private void initMiniCluster(Configuration hconf, int numDataNodes)
    throws IOException {
  Builder builder = new MiniDFSCluster.Builder(hconf);
  builder.numDataNodes(numDataNodes);
  builder.nameNodePort(CLUSTER_PORT);
  cluster = builder.build();
}
 
Example 9
Source File: DDLPersistenceHDFSTest.java    From gemfirexd-oss with Apache License 2.0 4 votes vote down vote up
public void testInsertWithHDFSDown() throws Exception {
    int clusterPort = AvailablePortHelper.getRandomAvailableTCPPort();
    System.setProperty("test.build.data", HDFS_DIR);
    Configuration hconf = new HdfsConfiguration();
    // hconf.set("hadoop.log.dir", "/tmp/hdfs/logs");
    hconf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
    Builder builder = new MiniDFSCluster.Builder(hconf);
    builder.numDataNodes(2);
    builder.nameNodePort(clusterPort);
    MiniDFSCluster cluster = builder.build();
    
    Properties props = new Properties();
    int mcastPort = AvailablePort.getRandomAvailablePort(AvailablePort.JGROUPS);
    props.put("mcast-port", String.valueOf(mcastPort));    
    Connection conn = TestUtil.getConnection(props);
    Statement st = conn.createStatement();
    st.execute("create schema emp");
    st.execute("set schema emp");
    addExpectedException(ConnectException.class);
    st.execute("create hdfsstore myhdfs namenode 'hdfs://localhost:" + clusterPort
        + "' homedir '" + HDFS_DIR + "' BATCHTIMEINTERVAL 1 milliseconds");
    GemFireCacheImpl.getInstance().getLogger().info("<ExpectedException action=add>" + expectedExceptions + "</ExpectedException>");
    st.execute("create table mytab (col1 int primary key) hdfsstore (myhdfs) eviction by criteria (col1 < 1000) evict incoming");
    st.execute("insert into mytab values (5)");

    //Wait for data to be flushed to hdfs
    Thread.sleep(5000);

    //query hdfs, which will open a reader
    st.execute("select * from mytab  -- GEMFIREXD-PROPERTIES queryHDFS=true \n where col1=5");
    cluster.shutdownNameNodes();

//    try {
//      st.execute("insert into mytab values (118)");
//      fail("expected exception in connecting to unavailable HDFS store");
//    } catch (SQLException e) {
//      if (!"X0Z30".equals(e.getSQLState())) {
//        throw e;
//      }
//      if (!HDFSIOException.class.equals(e.getCause().getClass())) {
//        throw e;
//      }
//    }

    cluster.restartNameNode();
    cluster.restartDataNodes();

    //Wait for namenode to leave safe mode
    Thread.sleep(10000);

    st.execute("insert into mytab values (118)");
    
    //query hdfs to trigger scan
    st.execute("select * from mytab  -- GEMFIREXD-PROPERTIES queryHDFS=true \n");

    GemFireCacheImpl.getInstance().getLogger().info("<ExpectedException action=remove>" + expectedExceptions + "</ExpectedException>");
    st.execute("drop table mytab");
    st.execute("drop hdfsstore myhdfs");
    cluster.shutdownDataNodes();
    cluster.shutdownNameNodes();
    TestUtil.shutDown();
  }
 
Example 10
Source File: DDLPersistenceHDFSTest.java    From gemfirexd-oss with Apache License 2.0 4 votes vote down vote up
public void testInsertWithHDFSDown() throws Exception {
    int clusterPort = AvailablePortHelper.getRandomAvailableTCPPort();
    System.setProperty("test.build.data", HDFS_DIR);
    Configuration hconf = new HdfsConfiguration();
    // hconf.set("hadoop.log.dir", "/tmp/hdfs/logs");
    hconf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
    Builder builder = new MiniDFSCluster.Builder(hconf);
    builder.numDataNodes(2);
    builder.nameNodePort(clusterPort);
    MiniDFSCluster cluster = builder.build();
    
    Properties props = new Properties();
    int mcastPort = AvailablePort.getRandomAvailablePort(AvailablePort.JGROUPS);
    props.put("mcast-port", String.valueOf(mcastPort));    
    Connection conn = TestUtil.getConnection(props);
    Statement st = conn.createStatement();
    st.execute("create schema emp");
    st.execute("set schema emp");
    addExpectedException(ConnectException.class);
    st.execute("create hdfsstore myhdfs namenode 'hdfs://localhost:" + clusterPort
        + "' homedir '" + HDFS_DIR + "' BATCHTIMEINTERVAL 1 milliseconds");
    GemFireCacheImpl.getInstance().getLogger().info("<ExpectedException action=add>" + expectedExceptions + "</ExpectedException>");
    st.execute("create table mytab (col1 int primary key) hdfsstore (myhdfs) eviction by criteria (col1 < 1000) evict incoming");
    st.execute("insert into mytab values (5)");

    //Wait for data to be flushed to hdfs
    Thread.sleep(5000);

    //query hdfs, which will open a reader
    st.execute("select * from mytab  -- GEMFIREXD-PROPERTIES queryHDFS=true \n where col1=5");
    cluster.shutdownNameNodes();

//    try {
//      st.execute("insert into mytab values (118)");
//      fail("expected exception in connecting to unavailable HDFS store");
//    } catch (SQLException e) {
//      if (!"X0Z30".equals(e.getSQLState())) {
//        throw e;
//      }
//      if (!HDFSIOException.class.equals(e.getCause().getClass())) {
//        throw e;
//      }
//    }

    cluster.restartNameNode();
    cluster.restartDataNodes();

    //Wait for namenode to leave safe mode
    Thread.sleep(10000);

    st.execute("insert into mytab values (118)");
    
    //query hdfs to trigger scan
    st.execute("select * from mytab  -- GEMFIREXD-PROPERTIES queryHDFS=true \n");

    GemFireCacheImpl.getInstance().getLogger().info("<ExpectedException action=remove>" + expectedExceptions + "</ExpectedException>");
    st.execute("drop table mytab");
    st.execute("drop hdfsstore myhdfs");
    cluster.shutdownDataNodes();
    cluster.shutdownNameNodes();
    TestUtil.shutDown();
  }