Java Code Examples for org.apache.hadoop.fs.FileSystem.create()

The following are Jave code examples for showing how to use create() of the org.apache.hadoop.fs.FileSystem class. You can vote up the examples you like. Your votes will be used in our system to get more good examples.
+ Save this method
Example 1
Project: hadoop   File: TestDecommissioningStatus.java   View Source Code Vote up 7 votes
private static void writeConfigFile(FileSystem fs, Path name,
    ArrayList<String> nodes) throws IOException {

  // delete if it already exists
  if (fs.exists(name)) {
    fs.delete(name, true);
  }

  FSDataOutputStream stm = fs.create(name);

  if (nodes != null) {
    for (Iterator<String> it = nodes.iterator(); it.hasNext();) {
      String node = it.next();
      stm.writeBytes(node);
      stm.writeBytes("\n");
    }
  }
  stm.close();
}
 
Example 2
Project: hadoop   File: TestFileAppendRestart.java   View Source Code Vote up 7 votes
private void writeAndAppend(FileSystem fs, Path p,
    int lengthForCreate, int lengthForAppend) throws IOException {
  // Creating a file with 4096 blockSize to write multiple blocks
  FSDataOutputStream stream = fs.create(
      p, true, BLOCK_SIZE, (short) 1, BLOCK_SIZE);
  try {
    AppendTestUtil.write(stream, 0, lengthForCreate);
    stream.close();
    
    stream = fs.append(p);
    AppendTestUtil.write(stream, lengthForCreate, lengthForAppend);
    stream.close();
  } finally {
    IOUtils.closeStream(stream);
  }
  
  int totalLength = lengthForCreate + lengthForAppend; 
  assertEquals(totalLength, fs.getFileStatus(p).getLen());
}
 
Example 3
Project: aliyun-maxcompute-data-collectors   File: NetezzaExportManualTest.java   View Source Code Vote up 6 votes
protected void createExportFile(ColumnGenerator... extraCols)
  throws IOException {
  String ext = ".txt";

  Path tablePath = getTablePath();
  Path filePath = new Path(tablePath, "part0" + ext);

  Configuration conf = new Configuration();
  if (!BaseSqoopTestCase.isOnPhysicalCluster()) {
    conf.set(CommonArgs.FS_DEFAULT_NAME, CommonArgs.LOCAL_FS);
  }
  FileSystem fs = FileSystem.get(conf);
  fs.mkdirs(tablePath);
  OutputStream os = fs.create(filePath);

  BufferedWriter w = new BufferedWriter(new OutputStreamWriter(os));
  for (int i = 0; i < 3; i++) {
    String line = getRecordLine(i, extraCols);
    w.write(line);
    LOG.debug("Create Export file - Writing line : " + line);
  }
  w.close();
  os.close();
}
 
Example 4
Project: hadoop   File: FSOperations.java   View Source Code Vote up 6 votes
/**
 * Executes the filesystem operation.
 *
 * @param fs filesystem instance to use.
 *
 * @return The URI of the created file.
 *
 * @throws IOException thrown if an IO error occured.
 */
@Override
public Void execute(FileSystem fs) throws IOException {
  if (replication == -1) {
    replication = fs.getDefaultReplication(path);
  }
  if (blockSize == -1) {
    blockSize = fs.getDefaultBlockSize(path);
  }
  FsPermission fsPermission = new FsPermission(permission);
  int bufferSize = fs.getConf().getInt("httpfs.buffer.size", 4096);
  OutputStream os = fs.create(path, fsPermission, override, bufferSize, replication, blockSize, null);
  IOUtils.copyBytes(is, os, bufferSize, true);
  os.close();
  return null;
}
 
Example 5
Project: hadoop   File: TestReporter.java   View Source Code Vote up 5 votes
@Test
public void testStatusLimit() throws IOException, InterruptedException,
    ClassNotFoundException {
  Path test = new Path(testRootTempDir, "testStatusLimit");

  Configuration conf = new Configuration();
  Path inDir = new Path(test, "in");
  Path outDir = new Path(test, "out");
  FileSystem fs = FileSystem.get(conf);
  if (fs.exists(inDir)) {
    fs.delete(inDir, true);
  }
  fs.mkdirs(inDir);
  DataOutputStream file = fs.create(new Path(inDir, "part-" + 0));
  file.writeBytes("testStatusLimit");
  file.close();

  if (fs.exists(outDir)) {
    fs.delete(outDir, true);
  }

  Job job = Job.getInstance(conf, "testStatusLimit");

  job.setMapperClass(StatusLimitMapper.class);
  job.setNumReduceTasks(0);

  FileInputFormat.addInputPath(job, inDir);
  FileOutputFormat.setOutputPath(job, outDir);

  job.waitForCompletion(true);

  assertTrue("Job failed", job.isSuccessful());
}
 
Example 6
Project: ViraPipe   File: Decompress.java   View Source Code Vote up 5 votes
private static void decompress(FileSystem fs, String in, String outpath) throws IOException {
  Configuration conf = new Configuration();
  CompressionCodecFactory factory = new CompressionCodecFactory(conf);
  // the correct codec will be discovered by the extension of the file

  CompressionCodec codec = factory.getCodec(new Path(in));
  //Decompressing zip file.
  InputStream is = codec.createInputStream(fs.open(new Path(in)));
  OutputStream out = fs.create(new Path(outpath));
  //Write decompressed out
  IOUtils.copyBytes(is, out, conf);
  is.close();
  out.close();
}
 
Example 7
Project: hadoop   File: TestFileAppend.java   View Source Code Vote up 5 votes
/** Test two consecutive appends on a file with a full block. */
@Test
public void testAppendTwice() throws Exception {
  Configuration conf = new HdfsConfiguration();
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
  final FileSystem fs1 = cluster.getFileSystem();
  final FileSystem fs2 = AppendTestUtil.createHdfsWithDifferentUsername(conf);
  try {

    final Path p = new Path("/testAppendTwice/foo");
    final int len = 1 << 16;
    final byte[] fileContents = AppendTestUtil.initBuffer(len);

    {
      // create a new file with a full block.
      FSDataOutputStream out = fs2.create(p, true, 4096, (short)1, len);
      out.write(fileContents, 0, len);
      out.close();
    }

    //1st append does not add any data so that the last block remains full
    //and the last block in INodeFileUnderConstruction is a BlockInfo
    //but not BlockInfoUnderConstruction. 
    fs2.append(p);
    
    //2nd append should get AlreadyBeingCreatedException
    fs1.append(p);
    Assert.fail();
  } catch(RemoteException re) {
    AppendTestUtil.LOG.info("Got an exception:", re);
    Assert.assertEquals(AlreadyBeingCreatedException.class.getName(),
        re.getClassName());
  } finally {
    fs2.close();
    fs1.close();
    cluster.shutdown();
  }
}
 
Example 8
Project: hadoop   File: TestJMXGet.java   View Source Code Vote up 5 votes
private void writeFile(FileSystem fileSys, Path name, int repl)
throws IOException {
  FSDataOutputStream stm = fileSys.create(name, true,
      fileSys.getConf().getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096),
      (short)repl, blockSize);
  byte[] buffer = new byte[fileSize];
  Random rand = new Random(seed);
  rand.nextBytes(buffer);
  stm.write(buffer);
  stm.close();
}
 
Example 9
Project: hadoop   File: TestDeleteRace.java   View Source Code Vote up 5 votes
@Test
public void testRenameRace() throws Exception {
  try {
    conf.setClass(DFSConfigKeys.DFS_BLOCK_REPLICATOR_CLASSNAME_KEY,
        SlowBlockPlacementPolicy.class, BlockPlacementPolicy.class);
    cluster = new MiniDFSCluster.Builder(conf).build();
    FileSystem fs = cluster.getFileSystem();
    Path dirPath1 = new Path("/testRenameRace1");
    Path dirPath2 = new Path("/testRenameRace2");
    Path filePath = new Path("/testRenameRace1/file1");
    

    fs.mkdirs(dirPath1);
    FSDataOutputStream out = fs.create(filePath);
    Thread renameThread = new RenameThread(fs, dirPath1, dirPath2);
    renameThread.start();

    // write data and close to make sure a block is allocated.
    out.write(new byte[32], 0, 32);
    out.close();

    // Restart name node so that it replays edit. If old path was
    // logged in edit, it will fail to come up.
    cluster.restartNameNode(0);
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
Example 10
Project: WiFiProbeAnalysis   File: KafkaConsumerForHive.java   View Source Code Vote up 5 votes
public synchronized void writeFileToHadoop(List<ConsumerRecord<String, String>> buffer) {


        Configuration configuration = new Configuration();
        String str;
        StringBuffer stringBuffer = new StringBuffer();
        try {

            FileSystem fileSystem = FileSystem.get(configuration);
            Path path = new Path("/user/hive/output/data.dat");
            FSDataOutputStream fsDataOutputStream = fileSystem.create(path);

            //fileWriter = new FileWriter(file,false);
            //printWriter = new PrintWriter(fileWriter);
            for (int i = 0; i < buffer.size(); i++) {
                str = buffer.get(i).value() + "\t" + buffer.get(i).value() + "\n";
                stringBuffer.append(str);
                //printWriter.println(buffer.get(i).value()   + "\t" + buffer.get(i).value());
            }
            fsDataOutputStream.write(stringBuffer.toString().getBytes(),0,stringBuffer.toString().getBytes().length);
            fsDataOutputStream.flush();
            fsDataOutputStream.close();
            stringBuffer.delete(0,stringBuffer.length());
            insertIntoHive();//存入hive中
            //printWriter.flush();

        } catch (IOException e) {

        }

    }
 
Example 11
Project: ditb   File: FSTableDescriptors.java   View Source Code Vote up 5 votes
private static void writeHTD(final FileSystem fs, final Path p, final HTableDescriptor htd)
throws IOException {
  FSDataOutputStream out = fs.create(p, false);
  try {
    // We used to write this file out as a serialized HTD Writable followed by two '\n's and then
    // the toString version of HTD.  Now we just write out the pb serialization.
    out.write(htd.toByteArray());
  } finally {
    out.close();
  }
}
 
Example 12
Project: hadoop   File: TestHarFileSystemWithHA.java   View Source Code Vote up 5 votes
/**
 * Create an empty Har archive in the FileSystem fs at the Path p.
 * 
 * @param fs the file system to create the Har archive in
 * @param p the path to create the Har archive at
 * @throws IOException in the event of error
 */
private static void createEmptyHarArchive(FileSystem fs, Path p)
    throws IOException {
  fs.mkdirs(p);
  OutputStream out = fs.create(new Path(p, "_masterindex"));
  out.write(Integer.toString(HarFileSystem.VERSION).getBytes());
  out.close();
  fs.create(new Path(p, "_index")).close();
}
 
Example 13
Project: hadoop-oss   File: Client.java   View Source Code Vote up 5 votes
private void addToLocalResources(FileSystem fs, String fileSrcPath,
																 String fileDstPath, String appId, Map<String, LocalResource> localResources,
																 String resources) throws IOException {
	String suffix =
			"prkeyrotation" + "/" + appId + "/" + fileDstPath;
	Path dst =
			new Path(fs.getHomeDirectory(), suffix);
	if (fileSrcPath == null) {
		FSDataOutputStream ostream = null;
		try {
			ostream = FileSystem
					.create(fs, dst, new FsPermission((short) 0710));
			ostream.writeUTF(resources);
		} finally {
			IOUtils.closeQuietly(ostream);
		}
	} else {
		fs.copyFromLocalFile(new Path(fileSrcPath), dst);
	}
	FileStatus scFileStatus = fs.getFileStatus(dst);
	LocalResource scRsrc =
			LocalResource.newInstance(
					ConverterUtils.getYarnUrlFromPath(dst),
					LocalResourceType.FILE, LocalResourceVisibility.APPLICATION,
					scFileStatus.getLen(), scFileStatus.getModificationTime());
	localResources.put(fileDstPath, scRsrc);
}
 
Example 14
Project: hadoop   File: TestFileOutputFormat.java   View Source Code Vote up 5 votes
public void configure(JobConf conf) {
  try {
    FileSystem fs = FileSystem.get(conf);
    OutputStream os =
      fs.create(FileOutputFormat.getPathForCustomFile(conf, "test"));
    os.write(1);
    os.close();
  }
  catch (IOException ex) {
    throw new RuntimeException(ex);
  }
}
 
Example 15
Project: hadoop   File: TestClose.java   View Source Code Vote up 5 votes
@Test
public void testWriteAfterClose() throws IOException {
  Configuration conf = new Configuration();
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
      .build();
  
  try {
    final byte[] data = "foo".getBytes();
    
    FileSystem fs = FileSystem.get(conf);
    OutputStream out = fs.create(new Path("/test"));
    
    out.write(data);
    out.close();
    try {
      // Should fail.
      out.write(data);
      fail("Should not have been able to write more data after file is closed.");
    } catch (ClosedChannelException cce) {
      // We got the correct exception. Ignoring.
    }
    // Should succeed. Double closes are OK.
    out.close();
  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
 
Example 16
Project: hadoop   File: TestPipelinesFailover.java   View Source Code Vote up 4 votes
private void doWriteOverFailoverTest(TestScenario scenario,
    MethodToTestIdempotence methodToTest) throws Exception {
  Configuration conf = new Configuration();
  conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
  // Don't check replication periodically.
  conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1000);
  
  FSDataOutputStream stm = null;
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
    .nnTopology(MiniDFSNNTopology.simpleHATopology())
    .numDataNodes(3)
    .build();
  try {
    int sizeWritten = 0;
    
    cluster.waitActive();
    cluster.transitionToActive(0);
    Thread.sleep(500);

    LOG.info("Starting with NN 0 active");
    FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf);
    stm = fs.create(TEST_PATH);
    
    // write a block and a half
    AppendTestUtil.write(stm, 0, BLOCK_AND_A_HALF);
    sizeWritten += BLOCK_AND_A_HALF;
    
    // Make sure all of the blocks are written out before failover.
    stm.hflush();

    LOG.info("Failing over to NN 1");
    scenario.run(cluster);

    // NOTE: explicitly do *not* make any further metadata calls
    // to the NN here. The next IPC call should be to allocate the next
    // block. Any other call would notice the failover and not test
    // idempotence of the operation (HDFS-3031)
    
    FSNamesystem ns1 = cluster.getNameNode(1).getNamesystem();
    BlockManagerTestUtil.updateState(ns1.getBlockManager());
    assertEquals(0, ns1.getPendingReplicationBlocks());
    assertEquals(0, ns1.getCorruptReplicaBlocks());
    assertEquals(0, ns1.getMissingBlocksCount());

    // If we're testing allocateBlock()'s idempotence, write another
    // block and a half, so we have to allocate a new block.
    // Otherise, don't write anything, so our next RPC will be
    // completeFile() if we're testing idempotence of that operation.
    if (methodToTest == MethodToTestIdempotence.ALLOCATE_BLOCK) {
      // write another block and a half
      AppendTestUtil.write(stm, sizeWritten, BLOCK_AND_A_HALF);
      sizeWritten += BLOCK_AND_A_HALF;
    }
    
    stm.close();
    stm = null;
    
    AppendTestUtil.check(fs, TEST_PATH, sizeWritten);
  } finally {
    IOUtils.closeStream(stm);
    cluster.shutdown();
  }
}
 
Example 17
Project: hadoop   File: TestCombineFileInputFormat.java   View Source Code Vote up 4 votes
/**
 * Test that directories do not get included as part of getSplits()
 */
@Test
public void testGetSplitsWithDirectory() throws Exception {
  MiniDFSCluster dfs = null;
  try {
    Configuration conf = new Configuration();
    dfs = new MiniDFSCluster.Builder(conf).racks(rack1).hosts(hosts1)
        .build();
    dfs.waitActive();

    FileSystem fileSys = dfs.getFileSystem();

    // Set up the following directory structure:
    // /dir1/: directory
    // /dir1/file: regular file
    // /dir1/dir2/: directory
    Path dir1 = new Path("/dir1");
    Path file = new Path("/dir1/file1");
    Path dir2 = new Path("/dir1/dir2");
    if (!fileSys.mkdirs(dir1)) {
      throw new IOException("Mkdirs failed to create " + dir1.toString());
    }
    FSDataOutputStream out = fileSys.create(file);
    out.write(new byte[0]);
    out.close();
    if (!fileSys.mkdirs(dir2)) {
      throw new IOException("Mkdirs failed to create " + dir2.toString());
    }

    // split it using a CombinedFile input format
    DummyInputFormat inFormat = new DummyInputFormat();
    Job job = Job.getInstance(conf);
    FileInputFormat.setInputPaths(job, "/dir1");
    List<InputSplit> splits = inFormat.getSplits(job);

    // directories should be omitted from getSplits() - we should only see file1 and not dir2
    assertEquals(1, splits.size());
    CombineFileSplit fileSplit = (CombineFileSplit) splits.get(0);
    assertEquals(1, fileSplit.getNumPaths());
    assertEquals(file.getName(), fileSplit.getPath(0).getName());
    assertEquals(0, fileSplit.getOffset(0));
    assertEquals(0, fileSplit.getLength(0));
  } finally {
    if (dfs != null) {
      dfs.shutdown();
    }
  }
}
 
Example 18
Project: circus-train   File: S3MapReduceCpTestUtils.java   View Source Code Vote up 4 votes
/** Creates a new, empty file at filePath and always overwrites */
public static void createFile(FileSystem fs, Path filePath) throws IOException {
  OutputStream out = fs.create(filePath, true);
  IOUtils.closeStream(out);
}
 
Example 19
Project: hadoop   File: TestHAStateTransitions.java   View Source Code Vote up 4 votes
/**
 * Test for HDFS-2812. Since lease renewals go from the client
 * only to the active NN, the SBN will have out-of-date lease
 * info when it becomes active. We need to make sure we don't
 * accidentally mark the leases as expired when the failover
 * proceeds.
 */
@Test(timeout=120000)
public void testLeasesRenewedOnTransition() throws Exception {
  Configuration conf = new Configuration();
  conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
    .nnTopology(MiniDFSNNTopology.simpleHATopology())
    .numDataNodes(1)
    .build();
  FSDataOutputStream stm = null;
  FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf);
  NameNode nn0 = cluster.getNameNode(0);
  NameNode nn1 = cluster.getNameNode(1);

  try {
    cluster.waitActive();
    cluster.transitionToActive(0);
    
    LOG.info("Starting with NN 0 active");

    stm = fs.create(TEST_FILE_PATH);
    long nn0t0 = NameNodeAdapter.getLeaseRenewalTime(nn0, TEST_FILE_STR);
    assertTrue(nn0t0 > 0);
    long nn1t0 = NameNodeAdapter.getLeaseRenewalTime(nn1, TEST_FILE_STR);
    assertEquals("Lease should not yet exist on nn1",
        -1, nn1t0);
    
    Thread.sleep(5); // make sure time advances!

    HATestUtil.waitForStandbyToCatchUp(nn0, nn1);
    long nn1t1 = NameNodeAdapter.getLeaseRenewalTime(nn1, TEST_FILE_STR);
    assertTrue("Lease should have been created on standby. Time was: " +
        nn1t1, nn1t1 > nn0t0);
        
    Thread.sleep(5); // make sure time advances!
    
    LOG.info("Failing over to NN 1");
    cluster.transitionToStandby(0);
    cluster.transitionToActive(1);
    long nn1t2 = NameNodeAdapter.getLeaseRenewalTime(nn1, TEST_FILE_STR);
    assertTrue("Lease should have been renewed by failover process",
        nn1t2 > nn1t1);
  } finally {
    IOUtils.closeStream(stm);
    cluster.shutdown();
  }
}
 
Example 20
Project: hadoop-oss   File: JavaKeyStoreProvider.java   View Source Code Vote up 4 votes
@Override
protected OutputStream getOutputStreamForKeystore() throws IOException {
  FSDataOutputStream out = FileSystem.create(fs, getPath(), permissions);
  return out;
}