org.apache.hadoop.hdfs.MiniDFSCluster.Builder Java Examples

The following examples show how to use org.apache.hadoop.hdfs.MiniDFSCluster.Builder. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: HdfsSortedOplogOrganizerJUnitTest.java    From gemfirexd-oss with Apache License 2.0 6 votes vote down vote up
private MiniDFSCluster initMiniHACluster(int nn1port, int nn2port)
    throws IOException {
  Configuration confForMiniDFS = new Configuration();
  
  Builder builder = new MiniDFSCluster.Builder(confForMiniDFS)
  .nnTopology(new MiniDFSNNTopology()
  .addNameservice(new MiniDFSNNTopology.NSConf("ns1")
  .addNN(new MiniDFSNNTopology.NNConf("nn1").setIpcPort(nn1port))
  .addNN(new MiniDFSNNTopology.NNConf("nn2").setIpcPort(nn2port))
      ))
      .numDataNodes(1);
  
  MiniDFSCluster cluster = builder.build();
  cluster.waitActive();

  NameNode nnode1 = cluster.getNameNode(0);
  assertTrue(nnode1.isStandbyState());
  NameNode nnode2 = cluster.getNameNode(1);
  assertTrue(nnode2.isStandbyState());

  cluster.transitionToActive(0);
  assertFalse(nnode1.isStandbyState());
  return cluster;
}
 
Example #2
Source File: TestCopyFiles.java    From hadoop with Apache License 2.0 6 votes vote down vote up
/** tests basedir option copying files from dfs file system to dfs file system */
public void testBasedir() throws Exception {
  String namenode = null;
  MiniDFSCluster cluster = null;
  try {
    Configuration conf = new Configuration();
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
    final FileSystem hdfs = cluster.getFileSystem();
    namenode = FileSystem.getDefaultUri(conf).toString();
    if (namenode.startsWith("hdfs://")) {
      MyFile[] files = createFiles(URI.create(namenode), "/basedir/middle/srcdat");
      ToolRunner.run(new DistCpV1(conf), new String[] {
                                       "-basedir",
                                       "/basedir",
                                       namenode+"/basedir/middle/srcdat",
                                       namenode+"/destdat"});
      assertTrue("Source and destination directories do not match.",
                 checkFiles(hdfs, "/destdat/middle/srcdat", files));
      deldir(hdfs, "/destdat");
      deldir(hdfs, "/basedir");
      deldir(hdfs, "/logs");
    }
  } finally {
    if (cluster != null) { cluster.shutdown(); }
  }
}
 
Example #3
Source File: TestCopyFiles.java    From big-c with Apache License 2.0 6 votes vote down vote up
/** tests basedir option copying files from dfs file system to dfs file system */
public void testBasedir() throws Exception {
  String namenode = null;
  MiniDFSCluster cluster = null;
  try {
    Configuration conf = new Configuration();
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
    final FileSystem hdfs = cluster.getFileSystem();
    namenode = FileSystem.getDefaultUri(conf).toString();
    if (namenode.startsWith("hdfs://")) {
      MyFile[] files = createFiles(URI.create(namenode), "/basedir/middle/srcdat");
      ToolRunner.run(new DistCpV1(conf), new String[] {
                                       "-basedir",
                                       "/basedir",
                                       namenode+"/basedir/middle/srcdat",
                                       namenode+"/destdat"});
      assertTrue("Source and destination directories do not match.",
                 checkFiles(hdfs, "/destdat/middle/srcdat", files));
      deldir(hdfs, "/destdat");
      deldir(hdfs, "/basedir");
      deldir(hdfs, "/logs");
    }
  } finally {
    if (cluster != null) { cluster.shutdown(); }
  }
}
 
Example #4
Source File: HdfsSortedOplogOrganizerJUnitTest.java    From gemfirexd-oss with Apache License 2.0 6 votes vote down vote up
private MiniDFSCluster initMiniHACluster(int nn1port, int nn2port)
    throws IOException {
  Configuration confForMiniDFS = new Configuration();
  
  Builder builder = new MiniDFSCluster.Builder(confForMiniDFS)
  .nnTopology(new MiniDFSNNTopology()
  .addNameservice(new MiniDFSNNTopology.NSConf("ns1")
  .addNN(new MiniDFSNNTopology.NNConf("nn1").setIpcPort(nn1port))
  .addNN(new MiniDFSNNTopology.NNConf("nn2").setIpcPort(nn2port))
      ))
      .numDataNodes(1);
  
  MiniDFSCluster cluster = builder.build();
  cluster.waitActive();

  NameNode nnode1 = cluster.getNameNode(0);
  assertTrue(nnode1.isStandbyState());
  NameNode nnode2 = cluster.getNameNode(1);
  assertTrue(nnode2.isStandbyState());

  cluster.transitionToActive(0);
  assertFalse(nnode1.isStandbyState());
  return cluster;
}
 
Example #5
Source File: TestDataNodeRollingUpgrade.java    From big-c with Apache License 2.0 5 votes vote down vote up
private void startCluster() throws IOException {
  conf = new HdfsConfiguration();
  conf.setInt("dfs.blocksize", 1024*1024);
  cluster = new Builder(conf).numDataNodes(REPL_FACTOR).build();
  cluster.waitActive();
  fs = cluster.getFileSystem();
  nn = cluster.getNameNode(0);
  assertNotNull(nn);
  dn0 = cluster.getDataNodes().get(0);
  assertNotNull(dn0);
  blockPoolId = cluster.getNameNode(0).getNamesystem().getBlockPoolId();
}
 
Example #6
Source File: HDFSFileStreamSourceTest.java    From samoa with Apache License 2.0 5 votes vote down vote up
@Before
public void setUp() throws Exception {
	// Start MiniDFSCluster
	MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(new Configuration()).hosts(HOSTS).numDataNodes(1).format(true);
	hdfsCluster = builder.build();
	hdfsCluster.waitActive();
	hdfsURI = "hdfs://localhost:"+ hdfsCluster.getNameNodePort();
	
	// Construct stream source
	streamSource = new HDFSFileStreamSource();
	
	// General config
	config = new Configuration();
	config.set("fs.defaultFS",hdfsURI);
}
 
Example #7
Source File: HdfsErrorHandlingJunitTest.java    From gemfirexd-oss with Apache License 2.0 5 votes vote down vote up
private void initMiniCluster(Configuration hconf, int numDataNodes)
    throws IOException {
  Builder builder = new MiniDFSCluster.Builder(hconf);
  builder.numDataNodes(numDataNodes);
  builder.nameNodePort(CLUSTER_PORT);
  cluster = builder.build();
}
 
Example #8
Source File: BaseHoplogTestCase.java    From gemfirexd-oss with Apache License 2.0 5 votes vote down vote up
public static MiniDFSCluster initMiniCluster(int port, int numDN, HashMap<String, String> map) throws Exception {
  System.setProperty("test.build.data", "hdfs-test-cluster");
  Configuration hconf = new HdfsConfiguration();
  for (Entry<String, String> entry : map.entrySet()) {
    hconf.set(entry.getKey(), entry.getValue());
  }

  hconf.set("dfs.namenode.fs-limits.min-block-size", "1024");
  
  Builder builder = new MiniDFSCluster.Builder(hconf);
  builder.numDataNodes(numDN);
  builder.nameNodePort(port);
  MiniDFSCluster cluster = builder.build();
  return cluster;
}
 
Example #9
Source File: CreateHDFSStoreTest.java    From gemfirexd-oss with Apache License 2.0 5 votes vote down vote up
public static MiniDFSCluster initMiniCluster(int port, int numDN, HashMap<String, String> map) throws Exception {
  System.setProperty("test.build.data", "hdfs-test-cluster");
  Configuration hconf = new HdfsConfiguration();
  for (Entry<String, String> entry : map.entrySet()) {
    hconf.set(entry.getKey(), entry.getValue());
  }

  Builder builder = new MiniDFSCluster.Builder(hconf);
  builder.numDataNodes(numDN);
  builder.nameNodePort(port);
  MiniDFSCluster cluster = builder.build();
  return cluster;
}
 
Example #10
Source File: TestCopyFiles.java    From big-c with Apache License 2.0 5 votes vote down vote up
/** test globbing  */
public void testGlobbing() throws Exception {
  String namenode = null;
  MiniDFSCluster cluster = null;
  try {
    Configuration conf = new Configuration();
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
    final FileSystem hdfs = cluster.getFileSystem();
    namenode = FileSystem.getDefaultUri(conf).toString();
    if (namenode.startsWith("hdfs://")) {
      MyFile[] files = createFiles(URI.create(namenode), "/srcdat");
      ToolRunner.run(new DistCpV1(conf), new String[] {
                                       "-log",
                                       namenode+"/logs",
                                       namenode+"/srcdat/*",
                                       namenode+"/destdat"});
      assertTrue("Source and destination directories do not match.",
                 checkFiles(hdfs, "/destdat", files));
      FileSystem fs = FileSystem.get(URI.create(namenode+"/logs"), conf);
      assertTrue("Log directory does not exist.",
                 fs.exists(new Path(namenode+"/logs")));
      deldir(hdfs, "/destdat");
      deldir(hdfs, "/srcdat");
      deldir(hdfs, "/logs");
    }
  } finally {
    if (cluster != null) { cluster.shutdown(); }
  }
}
 
Example #11
Source File: TestCopyFiles.java    From big-c with Apache License 2.0 5 votes vote down vote up
/**
 * verify that -delete option works for other {@link FileSystem}
 * implementations. See MAPREDUCE-1285 */
public void testDeleteLocal() throws Exception {
  MiniDFSCluster cluster = null;
  try {
    Configuration conf = new Configuration();
    final FileSystem localfs = FileSystem.get(LOCAL_FS, conf);
    cluster = new MiniDFSCluster.Builder(conf).build();
    final FileSystem hdfs = cluster.getFileSystem();
    final String namenode = FileSystem.getDefaultUri(conf).toString();
    if (namenode.startsWith("hdfs://")) {
      MyFile[] files = createFiles(URI.create(namenode), "/srcdat");
      String destdir = TEST_ROOT_DIR + "/destdat";
      MyFile[] localFiles = createFiles(localfs, destdir);
      ToolRunner.run(new DistCpV1(conf), new String[] {
                                       "-delete",
                                       "-update",
                                       "-log",
                                       "/logs",
                                       namenode+"/srcdat",
                                       "file:///"+TEST_ROOT_DIR+"/destdat"});
      assertTrue("Source and destination directories do not match.",
                 checkFiles(localfs, destdir, files));
      assertTrue("Log directory does not exist.",
                  hdfs.exists(new Path("/logs")));
      deldir(localfs, destdir);
      deldir(hdfs, "/logs");
      deldir(hdfs, "/srcdat");
    }
  } finally {
    if (cluster != null) { cluster.shutdown(); }
  }
}
 
Example #12
Source File: TestCopyFiles.java    From big-c with Apache License 2.0 5 votes vote down vote up
/** copy files from dfs file system to local file system */
public void testCopyFromDfsToLocal() throws Exception {
  MiniDFSCluster cluster = null;
  try {
    Configuration conf = new Configuration();
    final FileSystem localfs = FileSystem.get(LOCAL_FS, conf);
    cluster = new MiniDFSCluster.Builder(conf).build();
    final FileSystem hdfs = cluster.getFileSystem();
    final String namenode = FileSystem.getDefaultUri(conf).toString();
    if (namenode.startsWith("hdfs://")) {
      MyFile[] files = createFiles(URI.create(namenode), "/srcdat");
      ToolRunner.run(new DistCpV1(conf), new String[] {
                                       "-log",
                                       "/logs",
                                       namenode+"/srcdat",
                                       "file:///"+TEST_ROOT_DIR+"/destdat"});
      assertTrue("Source and destination directories do not match.",
                 checkFiles(localfs, TEST_ROOT_DIR+"/destdat", files));
      assertTrue("Log directory does not exist.",
                  hdfs.exists(new Path("/logs")));
      deldir(localfs, TEST_ROOT_DIR+"/destdat");
      deldir(hdfs, "/logs");
      deldir(hdfs, "/srcdat");
    }
  } finally {
    if (cluster != null) { cluster.shutdown(); }
  }
}
 
Example #13
Source File: TestCopyFiles.java    From big-c with Apache License 2.0 5 votes vote down vote up
/** copy files from local file system to dfs file system */
public void testCopyFromLocalToDfs() throws Exception {
  MiniDFSCluster cluster = null;
  try {
    Configuration conf = new Configuration();
    cluster = new MiniDFSCluster.Builder(conf).build();
    final FileSystem hdfs = cluster.getFileSystem();
    final String namenode = hdfs.getUri().toString();
    if (namenode.startsWith("hdfs://")) {
      MyFile[] files = createFiles(LOCAL_FS, TEST_ROOT_DIR+"/srcdat");
      ToolRunner.run(new DistCpV1(conf), new String[] {
                                       "-log",
                                       namenode+"/logs",
                                       "file:///"+TEST_ROOT_DIR+"/srcdat",
                                       namenode+"/destdat"});
      assertTrue("Source and destination directories do not match.",
                 checkFiles(cluster.getFileSystem(), "/destdat", files));
      assertTrue("Log directory does not exist.",
                  hdfs.exists(new Path(namenode+"/logs")));
      deldir(hdfs, "/destdat");
      deldir(hdfs, "/logs");
      deldir(FileSystem.get(LOCAL_FS, conf), TEST_ROOT_DIR+"/srcdat");
    }
  } finally {
    if (cluster != null) { cluster.shutdown(); }
  }
}
 
Example #14
Source File: TestCopyFiles.java    From big-c with Apache License 2.0 5 votes vote down vote up
/** copy empty directory on dfs file system */
public void testEmptyDir() throws Exception {
  String namenode = null;
  MiniDFSCluster cluster = null;
  try {
    Configuration conf = new Configuration();
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
    final FileSystem hdfs = cluster.getFileSystem();
    namenode = FileSystem.getDefaultUri(conf).toString();
    if (namenode.startsWith("hdfs://")) {
      
      FileSystem fs = FileSystem.get(URI.create(namenode), new Configuration());
      fs.mkdirs(new Path("/empty"));

      ToolRunner.run(new DistCpV1(conf), new String[] {
                                       "-log",
                                       namenode+"/logs",
                                       namenode+"/empty",
                                       namenode+"/dest"});
      fs = FileSystem.get(URI.create(namenode+"/destdat"), conf);
      assertTrue("Destination directory does not exist.",
                 fs.exists(new Path(namenode+"/dest")));
      deldir(hdfs, "/dest");
      deldir(hdfs, "/empty");
      deldir(hdfs, "/logs");
    }
  } finally {
    if (cluster != null) { cluster.shutdown(); }
  }
}
 
Example #15
Source File: TestCopyFiles.java    From big-c with Apache License 2.0 5 votes vote down vote up
/** copy files from dfs file system to dfs file system */
public void testCopyFromDfsToDfs() throws Exception {
  String namenode = null;
  MiniDFSCluster cluster = null;
  try {
    Configuration conf = new Configuration();
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
    final FileSystem hdfs = cluster.getFileSystem();
    namenode = FileSystem.getDefaultUri(conf).toString();
    if (namenode.startsWith("hdfs://")) {
      MyFile[] files = createFiles(URI.create(namenode), "/srcdat");
      ToolRunner.run(new DistCpV1(conf), new String[] {
                                       "-log",
                                       namenode+"/logs",
                                       namenode+"/srcdat",
                                       namenode+"/destdat"});
      assertTrue("Source and destination directories do not match.",
                 checkFiles(hdfs, "/destdat", files));
      FileSystem fs = FileSystem.get(URI.create(namenode+"/logs"), conf);
      assertTrue("Log directory does not exist.",
                 fs.exists(new Path(namenode+"/logs")));
      deldir(hdfs, "/destdat");
      deldir(hdfs, "/srcdat");
      deldir(hdfs, "/logs");
    }
  } finally {
    if (cluster != null) { cluster.shutdown(); }
  }
}
 
Example #16
Source File: TestDataNodeRollingUpgrade.java    From hadoop with Apache License 2.0 5 votes vote down vote up
private void startCluster() throws IOException {
  conf = new HdfsConfiguration();
  conf.setInt("dfs.blocksize", 1024*1024);
  cluster = new Builder(conf).numDataNodes(REPL_FACTOR).build();
  cluster.waitActive();
  fs = cluster.getFileSystem();
  nn = cluster.getNameNode(0);
  assertNotNull(nn);
  dn0 = cluster.getDataNodes().get(0);
  assertNotNull(dn0);
  blockPoolId = cluster.getNameNode(0).getNamesystem().getBlockPoolId();
}
 
Example #17
Source File: BaseHoplogTestCase.java    From gemfirexd-oss with Apache License 2.0 5 votes vote down vote up
public static MiniDFSCluster initMiniCluster(int port, int numDN, HashMap<String, String> map) throws Exception {
  System.setProperty("test.build.data", "hdfs-test-cluster");
  Configuration hconf = new HdfsConfiguration();
  for (Entry<String, String> entry : map.entrySet()) {
    hconf.set(entry.getKey(), entry.getValue());
  }

  hconf.set("dfs.namenode.fs-limits.min-block-size", "1024");
  
  Builder builder = new MiniDFSCluster.Builder(hconf);
  builder.numDataNodes(numDN);
  builder.nameNodePort(port);
  MiniDFSCluster cluster = builder.build();
  return cluster;
}
 
Example #18
Source File: TestCopyFiles.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/** copy files from dfs file system to dfs file system */
public void testCopyFromDfsToDfs() throws Exception {
  String namenode = null;
  MiniDFSCluster cluster = null;
  try {
    Configuration conf = new Configuration();
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
    final FileSystem hdfs = cluster.getFileSystem();
    namenode = FileSystem.getDefaultUri(conf).toString();
    if (namenode.startsWith("hdfs://")) {
      MyFile[] files = createFiles(URI.create(namenode), "/srcdat");
      ToolRunner.run(new DistCpV1(conf), new String[] {
                                       "-log",
                                       namenode+"/logs",
                                       namenode+"/srcdat",
                                       namenode+"/destdat"});
      assertTrue("Source and destination directories do not match.",
                 checkFiles(hdfs, "/destdat", files));
      FileSystem fs = FileSystem.get(URI.create(namenode+"/logs"), conf);
      assertTrue("Log directory does not exist.",
                 fs.exists(new Path(namenode+"/logs")));
      deldir(hdfs, "/destdat");
      deldir(hdfs, "/srcdat");
      deldir(hdfs, "/logs");
    }
  } finally {
    if (cluster != null) { cluster.shutdown(); }
  }
}
 
Example #19
Source File: TestCopyFiles.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/** copy empty directory on dfs file system */
public void testEmptyDir() throws Exception {
  String namenode = null;
  MiniDFSCluster cluster = null;
  try {
    Configuration conf = new Configuration();
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
    final FileSystem hdfs = cluster.getFileSystem();
    namenode = FileSystem.getDefaultUri(conf).toString();
    if (namenode.startsWith("hdfs://")) {
      
      FileSystem fs = FileSystem.get(URI.create(namenode), new Configuration());
      fs.mkdirs(new Path("/empty"));

      ToolRunner.run(new DistCpV1(conf), new String[] {
                                       "-log",
                                       namenode+"/logs",
                                       namenode+"/empty",
                                       namenode+"/dest"});
      fs = FileSystem.get(URI.create(namenode+"/destdat"), conf);
      assertTrue("Destination directory does not exist.",
                 fs.exists(new Path(namenode+"/dest")));
      deldir(hdfs, "/dest");
      deldir(hdfs, "/empty");
      deldir(hdfs, "/logs");
    }
  } finally {
    if (cluster != null) { cluster.shutdown(); }
  }
}
 
Example #20
Source File: TestCopyFiles.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/** copy files from local file system to dfs file system */
public void testCopyFromLocalToDfs() throws Exception {
  MiniDFSCluster cluster = null;
  try {
    Configuration conf = new Configuration();
    cluster = new MiniDFSCluster.Builder(conf).build();
    final FileSystem hdfs = cluster.getFileSystem();
    final String namenode = hdfs.getUri().toString();
    if (namenode.startsWith("hdfs://")) {
      MyFile[] files = createFiles(LOCAL_FS, TEST_ROOT_DIR+"/srcdat");
      ToolRunner.run(new DistCpV1(conf), new String[] {
                                       "-log",
                                       namenode+"/logs",
                                       "file:///"+TEST_ROOT_DIR+"/srcdat",
                                       namenode+"/destdat"});
      assertTrue("Source and destination directories do not match.",
                 checkFiles(cluster.getFileSystem(), "/destdat", files));
      assertTrue("Log directory does not exist.",
                  hdfs.exists(new Path(namenode+"/logs")));
      deldir(hdfs, "/destdat");
      deldir(hdfs, "/logs");
      deldir(FileSystem.get(LOCAL_FS, conf), TEST_ROOT_DIR+"/srcdat");
    }
  } finally {
    if (cluster != null) { cluster.shutdown(); }
  }
}
 
Example #21
Source File: TestCopyFiles.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/** copy files from dfs file system to local file system */
public void testCopyFromDfsToLocal() throws Exception {
  MiniDFSCluster cluster = null;
  try {
    Configuration conf = new Configuration();
    final FileSystem localfs = FileSystem.get(LOCAL_FS, conf);
    cluster = new MiniDFSCluster.Builder(conf).build();
    final FileSystem hdfs = cluster.getFileSystem();
    final String namenode = FileSystem.getDefaultUri(conf).toString();
    if (namenode.startsWith("hdfs://")) {
      MyFile[] files = createFiles(URI.create(namenode), "/srcdat");
      ToolRunner.run(new DistCpV1(conf), new String[] {
                                       "-log",
                                       "/logs",
                                       namenode+"/srcdat",
                                       "file:///"+TEST_ROOT_DIR+"/destdat"});
      assertTrue("Source and destination directories do not match.",
                 checkFiles(localfs, TEST_ROOT_DIR+"/destdat", files));
      assertTrue("Log directory does not exist.",
                  hdfs.exists(new Path("/logs")));
      deldir(localfs, TEST_ROOT_DIR+"/destdat");
      deldir(hdfs, "/logs");
      deldir(hdfs, "/srcdat");
    }
  } finally {
    if (cluster != null) { cluster.shutdown(); }
  }
}
 
Example #22
Source File: TestCopyFiles.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/**
 * verify that -delete option works for other {@link FileSystem}
 * implementations. See MAPREDUCE-1285 */
public void testDeleteLocal() throws Exception {
  MiniDFSCluster cluster = null;
  try {
    Configuration conf = new Configuration();
    final FileSystem localfs = FileSystem.get(LOCAL_FS, conf);
    cluster = new MiniDFSCluster.Builder(conf).build();
    final FileSystem hdfs = cluster.getFileSystem();
    final String namenode = FileSystem.getDefaultUri(conf).toString();
    if (namenode.startsWith("hdfs://")) {
      MyFile[] files = createFiles(URI.create(namenode), "/srcdat");
      String destdir = TEST_ROOT_DIR + "/destdat";
      MyFile[] localFiles = createFiles(localfs, destdir);
      ToolRunner.run(new DistCpV1(conf), new String[] {
                                       "-delete",
                                       "-update",
                                       "-log",
                                       "/logs",
                                       namenode+"/srcdat",
                                       "file:///"+TEST_ROOT_DIR+"/destdat"});
      assertTrue("Source and destination directories do not match.",
                 checkFiles(localfs, destdir, files));
      assertTrue("Log directory does not exist.",
                  hdfs.exists(new Path("/logs")));
      deldir(localfs, destdir);
      deldir(hdfs, "/logs");
      deldir(hdfs, "/srcdat");
    }
  } finally {
    if (cluster != null) { cluster.shutdown(); }
  }
}
 
Example #23
Source File: TestCopyFiles.java    From hadoop with Apache License 2.0 5 votes vote down vote up
/** test globbing  */
public void testGlobbing() throws Exception {
  String namenode = null;
  MiniDFSCluster cluster = null;
  try {
    Configuration conf = new Configuration();
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
    final FileSystem hdfs = cluster.getFileSystem();
    namenode = FileSystem.getDefaultUri(conf).toString();
    if (namenode.startsWith("hdfs://")) {
      MyFile[] files = createFiles(URI.create(namenode), "/srcdat");
      ToolRunner.run(new DistCpV1(conf), new String[] {
                                       "-log",
                                       namenode+"/logs",
                                       namenode+"/srcdat/*",
                                       namenode+"/destdat"});
      assertTrue("Source and destination directories do not match.",
                 checkFiles(hdfs, "/destdat", files));
      FileSystem fs = FileSystem.get(URI.create(namenode+"/logs"), conf);
      assertTrue("Log directory does not exist.",
                 fs.exists(new Path(namenode+"/logs")));
      deldir(hdfs, "/destdat");
      deldir(hdfs, "/srcdat");
      deldir(hdfs, "/logs");
    }
  } finally {
    if (cluster != null) { cluster.shutdown(); }
  }
}
 
Example #24
Source File: CreateHDFSStoreTest.java    From gemfirexd-oss with Apache License 2.0 5 votes vote down vote up
public static MiniDFSCluster initMiniCluster(int port, int numDN, HashMap<String, String> map) throws Exception {
  System.setProperty("test.build.data", "hdfs-test-cluster");
  Configuration hconf = new HdfsConfiguration();
  for (Entry<String, String> entry : map.entrySet()) {
    hconf.set(entry.getKey(), entry.getValue());
  }

  Builder builder = new MiniDFSCluster.Builder(hconf);
  builder.numDataNodes(numDN);
  builder.nameNodePort(port);
  MiniDFSCluster cluster = builder.build();
  return cluster;
}
 
Example #25
Source File: HdfsErrorHandlingJunitTest.java    From gemfirexd-oss with Apache License 2.0 5 votes vote down vote up
private void initMiniCluster(Configuration hconf, int numDataNodes)
    throws IOException {
  Builder builder = new MiniDFSCluster.Builder(hconf);
  builder.numDataNodes(numDataNodes);
  builder.nameNodePort(CLUSTER_PORT);
  cluster = builder.build();
}
 
Example #26
Source File: TestCopyFiles.java    From hadoop with Apache License 2.0 4 votes vote down vote up
/** test -delete */
public void testDelete() throws Exception {
  final Configuration conf = new Configuration();
  conf.setInt("fs.trash.interval", 60);
  MiniDFSCluster cluster = null;
  try {
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
    final URI nnURI = FileSystem.getDefaultUri(conf);
    final String nnUri = nnURI.toString();
    final FileSystem fs = FileSystem.get(URI.create(nnUri), conf);

    final DistCpV1 distcp = new DistCpV1(conf);
    final FsShell shell = new FsShell(conf);  

    final String srcrootdir = "/src_root";
    final String dstrootdir = "/dst_root";

    {
      //create source files
      createFiles(nnURI, srcrootdir);
      String srcresults = execCmd(shell, "-lsr", srcrootdir);
      srcresults = removePrefix(srcresults, srcrootdir);
      System.out.println("srcresults=" +  srcresults);

      //create some files in dst
      createFiles(nnURI, dstrootdir);
      System.out.println("dstrootdir=" +  dstrootdir);
      shell.run(new String[]{"-lsr", dstrootdir});

      //run distcp
      ToolRunner.run(distcp,
          new String[]{"-delete", "-update", "-log", "/log",
                       nnUri+srcrootdir, nnUri+dstrootdir});

      //make sure src and dst contains the same files
      String dstresults = execCmd(shell, "-lsr", dstrootdir);
      dstresults = removePrefix(dstresults, dstrootdir);
      System.out.println("first dstresults=" +  dstresults);
      assertEquals(srcresults, dstresults);

      //create additional file in dst
      create(fs, new Path(dstrootdir, "foo"));
      create(fs, new Path(dstrootdir, "foobar"));

      //run distcp again
      ToolRunner.run(distcp,
          new String[]{"-delete", "-update", "-log", "/log2",
                       nnUri+srcrootdir, nnUri+dstrootdir});
      
      //make sure src and dst contains the same files
      dstresults = execCmd(shell, "-lsr", dstrootdir);
      dstresults = removePrefix(dstresults, dstrootdir);
      System.out.println("second dstresults=" +  dstresults);
      assertEquals(srcresults, dstresults);
      // verify that files removed in -delete were moved to the trash
      // regrettably, this test will break if Trash changes incompatibly
      assertTrue(fs.exists(new Path(fs.getHomeDirectory(),
              ".Trash/Current" + dstrootdir + "/foo")));
      assertTrue(fs.exists(new Path(fs.getHomeDirectory(),
              ".Trash/Current" + dstrootdir + "/foobar")));

      //cleanup
      deldir(fs, dstrootdir);
      deldir(fs, srcrootdir);
    }
  } finally {
    if (cluster != null) { cluster.shutdown(); }
  }
}
 
Example #27
Source File: TestCopyFiles.java    From big-c with Apache License 2.0 4 votes vote down vote up
public void testCopyDfsToDfsUpdateOverwrite() throws Exception {
  MiniDFSCluster cluster = null;
  try {
    Configuration conf = new Configuration();
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
    final FileSystem hdfs = cluster.getFileSystem();
    final String namenode = hdfs.getUri().toString();
    if (namenode.startsWith("hdfs://")) {
      MyFile[] files = createFiles(URI.create(namenode), "/srcdat");
      ToolRunner.run(new DistCpV1(conf), new String[] {
                                       "-p",
                                       "-log",
                                       namenode+"/logs",
                                       namenode+"/srcdat",
                                       namenode+"/destdat"});
      assertTrue("Source and destination directories do not match.",
                 checkFiles(hdfs, "/destdat", files));
      FileSystem fs = FileSystem.get(URI.create(namenode+"/logs"), conf);
      assertTrue("Log directory does not exist.",
                  fs.exists(new Path(namenode+"/logs")));

      FileStatus[] dchkpoint = getFileStatus(hdfs, "/destdat", files);
      final int nupdate = NFILES>>2;
      updateFiles(cluster.getFileSystem(), "/srcdat", files, nupdate);
      deldir(hdfs, "/logs");

      ToolRunner.run(new DistCpV1(conf), new String[] {
                                       "-prbugp", // no t to avoid preserving mod. times
                                       "-update",
                                       "-log",
                                       namenode+"/logs",
                                       namenode+"/srcdat",
                                       namenode+"/destdat"});
      assertTrue("Source and destination directories do not match.",
                 checkFiles(hdfs, "/destdat", files));
      assertTrue("Update failed to replicate all changes in src",
               checkUpdate(hdfs, dchkpoint, "/destdat", files, nupdate));

      deldir(hdfs, "/logs");
      ToolRunner.run(new DistCpV1(conf), new String[] {
                                       "-prbugp", // no t to avoid preserving mod. times
                                       "-overwrite",
                                       "-log",
                                       namenode+"/logs",
                                       namenode+"/srcdat",
                                       namenode+"/destdat"});
      assertTrue("Source and destination directories do not match.",
                 checkFiles(hdfs, "/destdat", files));
      assertTrue("-overwrite didn't.",
               checkUpdate(hdfs, dchkpoint, "/destdat", files, NFILES));

      deldir(hdfs, "/destdat");
      deldir(hdfs, "/srcdat");
      deldir(hdfs, "/logs");
    }
  } finally {
    if (cluster != null) { cluster.shutdown(); }
  }
}
 
Example #28
Source File: TestCopyFiles.java    From big-c with Apache License 2.0 4 votes vote down vote up
public void testCopyDfsToDfsUpdateWithSkipCRC() throws Exception {
  MiniDFSCluster cluster = null;
  try {
    Configuration conf = new Configuration();
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
    final FileSystem hdfs = cluster.getFileSystem();
    final String namenode = hdfs.getUri().toString();
    
    FileSystem fs = FileSystem.get(URI.create(namenode), new Configuration());
    // Create two files of the same name, same length but different
    // contents
    final String testfilename = "test";
    final String srcData = "act act act";
    final String destData = "cat cat cat";
    
    if (namenode.startsWith("hdfs://")) {
      deldir(hdfs,"/logs");
      
      Path srcPath = new Path("/srcdat", testfilename);
      Path destPath = new Path("/destdat", testfilename);
      FSDataOutputStream out = fs.create(srcPath, true);
      out.writeUTF(srcData);
      out.close();

      out = fs.create(destPath, true);
      out.writeUTF(destData);
      out.close();
      
      // Run with -skipcrccheck option
      ToolRunner.run(new DistCpV1(conf), new String[] {
        "-p",
        "-update",
        "-skipcrccheck",
        "-log",
        namenode+"/logs",
        namenode+"/srcdat",
        namenode+"/destdat"});
      
      // File should not be overwritten
      FSDataInputStream in = hdfs.open(destPath);
      String s = in.readUTF();
      System.out.println("Dest had: " + s);
      assertTrue("Dest got over written even with skip crc",
          s.equalsIgnoreCase(destData));
      in.close();
      
      deldir(hdfs, "/logs");

      // Run without the option        
      ToolRunner.run(new DistCpV1(conf), new String[] {
        "-p",
        "-update",
        "-log",
        namenode+"/logs",
        namenode+"/srcdat",
        namenode+"/destdat"});
      
      // File should be overwritten
      in = hdfs.open(destPath);
      s = in.readUTF();
      System.out.println("Dest had: " + s);

      assertTrue("Dest did not get overwritten without skip crc",
          s.equalsIgnoreCase(srcData));
      in.close();

      deldir(hdfs, "/destdat");
      deldir(hdfs, "/srcdat");
      deldir(hdfs, "/logs");
     }
  } finally {
    if (cluster != null) { cluster.shutdown(); }
  }
}
 
Example #29
Source File: TestCopyFiles.java    From hadoop with Apache License 2.0 4 votes vote down vote up
public void testHftpAccessControl() throws Exception {
  MiniDFSCluster cluster = null;
  try {
    final UserGroupInformation DFS_UGI = createUGI("dfs", true); 
    final UserGroupInformation USER_UGI = createUGI("user", false); 

    //start cluster by DFS_UGI
    final Configuration dfsConf = new Configuration();
    cluster = new MiniDFSCluster.Builder(dfsConf).numDataNodes(2).build();
    cluster.waitActive();

    final String httpAdd = dfsConf.get("dfs.http.address");
    final URI nnURI = FileSystem.getDefaultUri(dfsConf);
    final String nnUri = nnURI.toString();
    FileSystem fs1 = DFS_UGI.doAs(new PrivilegedExceptionAction<FileSystem>() {
      public FileSystem run() throws IOException {
        return FileSystem.get(nnURI, dfsConf);
      }
    });
    final Path home = 
      createHomeDirectory(fs1, USER_UGI);
    
    //now, login as USER_UGI
    final Configuration userConf = new Configuration();
    final FileSystem fs = 
      USER_UGI.doAs(new PrivilegedExceptionAction<FileSystem>() {
      public FileSystem run() throws IOException {
        return FileSystem.get(nnURI, userConf);
      }
    });
    
    final Path srcrootpath = new Path(home, "src_root"); 
    final String srcrootdir =  srcrootpath.toString();
    final Path dstrootpath = new Path(home, "dst_root"); 
    final String dstrootdir =  dstrootpath.toString();
    final DistCpV1 distcp = USER_UGI.doAs(new PrivilegedExceptionAction<DistCpV1>() {
      public DistCpV1 run() {
        return new DistCpV1(userConf);
      }
    });

    FileSystem.mkdirs(fs, srcrootpath, new FsPermission((short)0700));
    final String[] args = {"hftp://"+httpAdd+srcrootdir, nnUri+dstrootdir};

    { //copy with permission 000, should fail
      fs.setPermission(srcrootpath, new FsPermission((short)0));
      USER_UGI.doAs(new PrivilegedExceptionAction<Void>() {
        public Void run() throws Exception {
          assertEquals(-3, ToolRunner.run(distcp, args));
          return null;
        }
      });
    }
  } finally {
    if (cluster != null) { cluster.shutdown(); }
  }
}
 
Example #30
Source File: TestCopyFiles.java    From big-c with Apache License 2.0 4 votes vote down vote up
public void testMapCount() throws Exception {
  String namenode = null;
  MiniDFSCluster dfs = null;
  MiniDFSCluster mr = null;
  try {
    Configuration conf = new Configuration();
    
    dfs= new MiniDFSCluster.Builder(conf).numDataNodes(3).format(true).build();
    
    FileSystem fs = dfs.getFileSystem();
    final FsShell shell = new FsShell(conf);
    namenode = fs.getUri().toString();
    MyFile[] files = createFiles(fs.getUri(), "/srcdat");
    long totsize = 0;
    for (MyFile f : files) {
      totsize += f.getSize();
    }
    
    Configuration job = new JobConf(conf);
    job.setLong("distcp.bytes.per.map", totsize / 3);
    ToolRunner.run(new DistCpV1(job),
        new String[] {"-m", "100",
                      "-log",
                      namenode+"/logs",
                      namenode+"/srcdat",
                      namenode+"/destdat"});
    assertTrue("Source and destination directories do not match.",
               checkFiles(fs, "/destdat", files));

    String logdir = namenode + "/logs";
    System.out.println(execCmd(shell, "-lsr", logdir));
    FileStatus[] logs = fs.listStatus(new Path(logdir));
    // rare case where splits are exact, logs.length can be 4
    assertTrue( logs.length == 2);

    deldir(fs, "/destdat");
    deldir(fs, "/logs");
    ToolRunner.run(new DistCpV1(job),
        new String[] {"-m", "1",
                      "-log",
                      namenode+"/logs",
                      namenode+"/srcdat",
                      namenode+"/destdat"});

    System.out.println(execCmd(shell, "-lsr", logdir));
    logs = fs.globStatus(new Path(namenode+"/logs/part*"));
    assertTrue("Unexpected map count, logs.length=" + logs.length,
        logs.length == 1);
  } finally {
    if (dfs != null) { dfs.shutdown(); }
    if (mr != null) { mr.shutdown(); }
  }
}