Java Code Examples for org.apache.hadoop.fs.FSDataInputStream#readUTF()

The following examples show how to use org.apache.hadoop.fs.FSDataInputStream#readUTF() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: HdfsCreate.java    From BigData-In-Practice with Apache License 2.0 6 votes vote down vote up
public static void main(String[] args) throws IOException {
    //获取文件系统
    FileSystem fileSystem = SysUtil.getFileSystem();

    // 如果因为权限而无法写入,可以先修改权限 hadoop dfs -chmod 777 /hadoop
    Path path = new Path("/hadoop/create.txt");
    // 获取输出流
    FSDataOutputStream outputStream = fileSystem.create(path);
    // 写入一些内容
    outputStream.writeUTF("Hello HDFS!");
    outputStream.close();

    // ------写入完毕后,再读出来-----------
    // 获取该文件的输入流
    FSDataInputStream inputStream = fileSystem.open(path);
    String data = inputStream.readUTF();
    System.out.println(data);
    // 输出: Hello HDFS!

    fileSystem.close();
}
 
Example 2
Source File: HcfsFileSystemTest.java    From cephfs-hadoop with GNU Lesser General Public License v2.1 6 votes vote down vote up
@org.junit.Test
public void testTextWriteAndRead() throws Exception{

    String testString="Is there anyone out there?";
    String readChars=null;

    FSDataOutputStream dfsOut=null;
    dfsOut=fs.create(new Path("test1.txt"));
    dfsOut.writeUTF(testString);
    dfsOut.close();

    FSDataInputStream dfsin=null;

    dfsin=fs.open(new Path("test1.txt"));
    readChars=dfsin.readUTF();
    dfsin.close();

    assertEquals(testString, readChars);

    fs.delete(new Path("test1.txt"), true);

    assertFalse(fs.exists(new Path("test1")));
}
 
Example 3
Source File: HistoryFileManager.java    From hadoop with Apache License 2.0 5 votes vote down vote up
private String getJobSummary(FileContext fc, Path path) throws IOException {
  Path qPath = fc.makeQualified(path);
  FSDataInputStream in = fc.open(qPath);
  String jobSummaryString = in.readUTF();
  in.close();
  return jobSummaryString;
}
 
Example 4
Source File: TestJobHistoryParsing.java    From hadoop with Apache License 2.0 5 votes vote down vote up
private static String getJobSummary(FileContext fc, Path path)
    throws IOException {
  Path qPath = fc.makeQualified(path);
  FSDataInputStream in = fc.open(qPath);
  String jobSummaryString = in.readUTF();
  in.close();
  return jobSummaryString;
}
 
Example 5
Source File: HistoryFileManager.java    From big-c with Apache License 2.0 5 votes vote down vote up
private String getJobSummary(FileContext fc, Path path) throws IOException {
  Path qPath = fc.makeQualified(path);
  FSDataInputStream in = fc.open(qPath);
  String jobSummaryString = in.readUTF();
  in.close();
  return jobSummaryString;
}
 
Example 6
Source File: TestJobHistoryParsing.java    From big-c with Apache License 2.0 5 votes vote down vote up
private static String getJobSummary(FileContext fc, Path path)
    throws IOException {
  Path qPath = fc.makeQualified(path);
  FSDataInputStream in = fc.open(qPath);
  String jobSummaryString = in.readUTF();
  in.close();
  return jobSummaryString;
}
 
Example 7
Source File: MRLocalClusterIntegrationTest.java    From hadoop-mini-clusters with Apache License 2.0 5 votes vote down vote up
private String readFileFromHdfs(String filename) throws Exception {
    FileSystem hdfsFsHandle = dfsCluster.getHdfsFileSystemHandle();
    FSDataInputStream reader = hdfsFsHandle.open(new Path(filename));
    String output = reader.readUTF();
    reader.close();
    hdfsFsHandle.close();
    return output;
}
 
Example 8
Source File: TestCopyFiles.java    From hadoop with Apache License 2.0 4 votes vote down vote up
public void testCopyDfsToDfsUpdateWithSkipCRC() throws Exception {
  MiniDFSCluster cluster = null;
  try {
    Configuration conf = new Configuration();
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
    final FileSystem hdfs = cluster.getFileSystem();
    final String namenode = hdfs.getUri().toString();
    
    FileSystem fs = FileSystem.get(URI.create(namenode), new Configuration());
    // Create two files of the same name, same length but different
    // contents
    final String testfilename = "test";
    final String srcData = "act act act";
    final String destData = "cat cat cat";
    
    if (namenode.startsWith("hdfs://")) {
      deldir(hdfs,"/logs");
      
      Path srcPath = new Path("/srcdat", testfilename);
      Path destPath = new Path("/destdat", testfilename);
      FSDataOutputStream out = fs.create(srcPath, true);
      out.writeUTF(srcData);
      out.close();

      out = fs.create(destPath, true);
      out.writeUTF(destData);
      out.close();
      
      // Run with -skipcrccheck option
      ToolRunner.run(new DistCpV1(conf), new String[] {
        "-p",
        "-update",
        "-skipcrccheck",
        "-log",
        namenode+"/logs",
        namenode+"/srcdat",
        namenode+"/destdat"});
      
      // File should not be overwritten
      FSDataInputStream in = hdfs.open(destPath);
      String s = in.readUTF();
      System.out.println("Dest had: " + s);
      assertTrue("Dest got over written even with skip crc",
          s.equalsIgnoreCase(destData));
      in.close();
      
      deldir(hdfs, "/logs");

      // Run without the option        
      ToolRunner.run(new DistCpV1(conf), new String[] {
        "-p",
        "-update",
        "-log",
        namenode+"/logs",
        namenode+"/srcdat",
        namenode+"/destdat"});
      
      // File should be overwritten
      in = hdfs.open(destPath);
      s = in.readUTF();
      System.out.println("Dest had: " + s);

      assertTrue("Dest did not get overwritten without skip crc",
          s.equalsIgnoreCase(srcData));
      in.close();

      deldir(hdfs, "/destdat");
      deldir(hdfs, "/srcdat");
      deldir(hdfs, "/logs");
     }
  } finally {
    if (cluster != null) { cluster.shutdown(); }
  }
}
 
Example 9
Source File: TestCopyFiles.java    From big-c with Apache License 2.0 4 votes vote down vote up
public void testCopyDfsToDfsUpdateWithSkipCRC() throws Exception {
  MiniDFSCluster cluster = null;
  try {
    Configuration conf = new Configuration();
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
    final FileSystem hdfs = cluster.getFileSystem();
    final String namenode = hdfs.getUri().toString();
    
    FileSystem fs = FileSystem.get(URI.create(namenode), new Configuration());
    // Create two files of the same name, same length but different
    // contents
    final String testfilename = "test";
    final String srcData = "act act act";
    final String destData = "cat cat cat";
    
    if (namenode.startsWith("hdfs://")) {
      deldir(hdfs,"/logs");
      
      Path srcPath = new Path("/srcdat", testfilename);
      Path destPath = new Path("/destdat", testfilename);
      FSDataOutputStream out = fs.create(srcPath, true);
      out.writeUTF(srcData);
      out.close();

      out = fs.create(destPath, true);
      out.writeUTF(destData);
      out.close();
      
      // Run with -skipcrccheck option
      ToolRunner.run(new DistCpV1(conf), new String[] {
        "-p",
        "-update",
        "-skipcrccheck",
        "-log",
        namenode+"/logs",
        namenode+"/srcdat",
        namenode+"/destdat"});
      
      // File should not be overwritten
      FSDataInputStream in = hdfs.open(destPath);
      String s = in.readUTF();
      System.out.println("Dest had: " + s);
      assertTrue("Dest got over written even with skip crc",
          s.equalsIgnoreCase(destData));
      in.close();
      
      deldir(hdfs, "/logs");

      // Run without the option        
      ToolRunner.run(new DistCpV1(conf), new String[] {
        "-p",
        "-update",
        "-log",
        namenode+"/logs",
        namenode+"/srcdat",
        namenode+"/destdat"});
      
      // File should be overwritten
      in = hdfs.open(destPath);
      s = in.readUTF();
      System.out.println("Dest had: " + s);

      assertTrue("Dest did not get overwritten without skip crc",
          s.equalsIgnoreCase(srcData));
      in.close();

      deldir(hdfs, "/destdat");
      deldir(hdfs, "/srcdat");
      deldir(hdfs, "/logs");
     }
  } finally {
    if (cluster != null) { cluster.shutdown(); }
  }
}