Java Code Examples for org.apache.hadoop.fs.FileSystem#getLength()

The following examples show how to use org.apache.hadoop.fs.FileSystem#getLength() . These examples are extracted from open source projects. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may want to check out the right sidebar which shows the related API usage.
Example 1
@Override
public RecordReader<NullWritable,ColumnAndIndex> getRecordReader( final InputSplit split, final JobConf job, final Reporter reporter ) throws IOException {
  FileSplit fileSplit = (FileSplit)split;
  Path path = fileSplit.getPath();
  FileSystem fs = path.getFileSystem( job );
  long fileLength = fs.getLength( path );
  long start = fileSplit.getStart();
  long length = fileSplit.getLength();
  InputStream in = fs.open( path );
  IJobReporter jobReporter = new HadoopJobReporter( reporter );
  jobReporter.setStatus( String.format( "Read file : %s" , path.toString() ) );
  HiveReaderSetting hiveConfig = new HiveReaderSetting( fileSplit , job );
  if ( hiveConfig.isVectorMode() ){
    IVectorizedReaderSetting vectorizedSetting = new HiveVectorizedReaderSetting( fileSplit , job , hiveConfig );
    return (RecordReader)new MDSHiveDirectVectorizedReader( in , fileLength , start , length , vectorizedSetting , jobReporter );
  }
  else{
    return new MDSHiveLineReader( in , fileLength , start , length , hiveConfig , jobReporter , spreadCounter );
  }
}
 
Example 2
public MDSCombineSpreadReader( final CombineFileSplit split , final TaskAttemptContext context , final Integer index ) throws IOException{
  Configuration config = context.getConfiguration();
  Path path = split.getPath( index );
  FileSystem fs = path.getFileSystem( config );
  long fileLength = fs.getLength( path );
  InputStream in = fs.open( path );

  innerReader = new MDSSpreadReader();
  innerReader.setStream( in , fileLength , 0 , fileLength );
}
 
Example 3
@Override
public void initialize( final InputSplit inputSplit, final TaskAttemptContext context ) throws IOException, InterruptedException {
  FileSplit fileSplit = (FileSplit)inputSplit;
  Configuration config = context.getConfiguration();
  Path path = fileSplit.getPath();
  FileSystem fs = path.getFileSystem( config );
  long fileLength = fs.getLength( path );
  long start = fileSplit.getStart();
  long length = fileSplit.getLength();
  InputStream in = fs.open( path );
  setStream( in , fileLength , start , length );
}
 
Example 4
Source Project: RDFS   File: UtilsForTests.java    License: Apache License 2.0 5 votes vote down vote up
static String slurpHadoop(Path p, FileSystem fs) throws IOException {
  int len = (int) fs.getLength(p);
  byte[] buf = new byte[len];
  InputStream in = fs.open(p);
  String contents = null;
  try {
    in.read(buf, 0, len);
    contents = new String(buf, "UTF-8");
  } finally {
    in.close();
  }
  return contents;
}
 
Example 5
Source Project: RDFS   File: StreamUtil.java    License: Apache License 2.0 5 votes vote down vote up
static String slurpHadoop(Path p, FileSystem fs) throws IOException {
  int len = (int) fs.getLength(p);
  byte[] buf = new byte[len];
  FSDataInputStream in = fs.open(p);
  String contents = null;
  try {
    in.readFully(in.getPos(), buf);
    contents = new String(buf, "UTF-8");
  } finally {
    in.close();
  }
  return contents;
}
 
Example 6
Source Project: RDFS   File: StreamUtil.java    License: Apache License 2.0 5 votes vote down vote up
static String slurpHadoop(Path p, FileSystem fs) throws IOException {
  int len = (int) fs.getLength(p);
  byte[] buf = new byte[len];
  FSDataInputStream in = fs.open(p);
  String contents = null;
  try {
    in.readFully(in.getPos(), buf);
    contents = new String(buf, "UTF-8");
  } finally {
    in.close();
  }
  return contents;
}
 
Example 7
Source Project: hadoop-gpu   File: UtilsForTests.java    License: Apache License 2.0 5 votes vote down vote up
static String slurpHadoop(Path p, FileSystem fs) throws IOException {
  int len = (int) fs.getLength(p);
  byte[] buf = new byte[len];
  InputStream in = fs.open(p);
  String contents = null;
  try {
    in.read(buf, 0, len);
    contents = new String(buf, "UTF-8");
  } finally {
    in.close();
  }
  return contents;
}
 
Example 8
Source Project: hadoop-gpu   File: StreamUtil.java    License: Apache License 2.0 5 votes vote down vote up
static String slurpHadoop(Path p, FileSystem fs) throws IOException {
  int len = (int) fs.getLength(p);
  byte[] buf = new byte[len];
  FSDataInputStream in = fs.open(p);
  String contents = null;
  try {
    in.readFully(in.getPos(), buf);
    contents = new String(buf, "UTF-8");
  } finally {
    in.close();
  }
  return contents;
}