org.apache.hadoop.fs.FSDataInputStream Java Examples
The following examples show how to use
org.apache.hadoop.fs.FSDataInputStream.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: TestDataJoin.java From RDFS with Apache License 2.0 | 6 votes |
private static void confirmOutput(Path out, JobConf job, int srcs) throws IOException { FileSystem fs = out.getFileSystem(job); FileStatus[] outlist = fs.listStatus(out); assertEquals(1, outlist.length); assertTrue(0 < outlist[0].getLen()); FSDataInputStream in = fs.open(outlist[0].getPath()); LineRecordReader rr = new LineRecordReader(in, 0, Integer.MAX_VALUE, job); LongWritable k = new LongWritable(); Text v = new Text(); int count = 0; while (rr.next(k, v)) { String[] vals = v.toString().split("\t"); assertEquals(srcs + 1, vals.length); int[] ivals = new int[vals.length]; for (int i = 0; i < vals.length; ++i) ivals[i] = Integer.parseInt(vals[i]); assertEquals(0, ivals[0] % (srcs * srcs)); for (int i = 1; i < vals.length; ++i) { assertEquals((ivals[i] - (i - 1)) * srcs, 10 * ivals[0]); } ++count; } assertEquals(4, count); }
Example #2
Source File: TestFileLocalRead.java From RDFS with Apache License 2.0 | 6 votes |
static void checkFullFile(FileSystem fs, Path name) throws IOException { FileStatus stat = fs.getFileStatus(name); BlockLocation[] locations = fs.getFileBlockLocations(stat, 0, fileSize); for (int idx = 0; idx < locations.length; idx++) { String[] hosts = locations[idx].getNames(); for (int i = 0; i < hosts.length; i++) { System.out.print( hosts[i] + " "); } System.out.println(" off " + locations[idx].getOffset() + " len " + locations[idx].getLength()); } byte[] expected = AppendTestUtil.randomBytes(seed, fileSize); FSDataInputStream stm = fs.open(name); byte[] actual = new byte[fileSize]; stm.readFully(0, actual); checkData(actual, 0, expected, "Read 2"); stm.close(); }
Example #3
Source File: StreamFile.java From big-c with Apache License 2.0 | 6 votes |
/** * Send a partial content response with the given range. If there are * no satisfiable ranges, or if multiple ranges are requested, which * is unsupported, respond with range not satisfiable. * * @param in stream to read from * @param out stream to write to * @param response http response to use * @param contentLength for the response header * @param ranges to write to respond with * @throws IOException on error sending the response */ static void sendPartialData(FSDataInputStream in, OutputStream out, HttpServletResponse response, long contentLength, List<InclusiveByteRange> ranges) throws IOException { if (ranges == null || ranges.size() != 1) { response.setContentLength(0); response.setStatus(HttpServletResponse.SC_REQUESTED_RANGE_NOT_SATISFIABLE); response.setHeader("Content-Range", InclusiveByteRange.to416HeaderRangeString(contentLength)); } else { InclusiveByteRange singleSatisfiableRange = ranges.get(0); long singleLength = singleSatisfiableRange.getSize(contentLength); response.setStatus(HttpServletResponse.SC_PARTIAL_CONTENT); response.setHeader("Content-Range", singleSatisfiableRange.toHeaderRangeString(contentLength)); copyFromOffset(in, out, singleSatisfiableRange.getFirst(contentLength), singleLength); } }
Example #4
Source File: TestSmallBlock.java From big-c with Apache License 2.0 | 6 votes |
private void checkFile(FileSystem fileSys, Path name) throws IOException { BlockLocation[] locations = fileSys.getFileBlockLocations( fileSys.getFileStatus(name), 0, fileSize); assertEquals("Number of blocks", fileSize, locations.length); FSDataInputStream stm = fileSys.open(name); byte[] expected = new byte[fileSize]; if (simulatedStorage) { for (int i = 0; i < expected.length; ++i) { expected[i] = SimulatedFSDataset.DEFAULT_DATABYTE; } } else { Random rand = new Random(seed); rand.nextBytes(expected); } // do a sanity check. Read the file byte[] actual = new byte[fileSize]; stm.readFully(0, actual); checkAndEraseData(actual, 0, expected, "Read Sanity Test"); stm.close(); }
Example #5
Source File: Bzip2TextInputFormat.java From spork with Apache License 2.0 | 6 votes |
public BZip2LineRecordReader(Configuration job, FileSplit split) throws IOException { start = split.getStart(); end = start + split.getLength(); final Path file = split.getPath(); // open the file and seek to the start of the split FileSystem fs = file.getFileSystem(job); FSDataInputStream fileIn = fs.open(split.getPath()); fileIn.seek(start); in = new CBZip2InputStream(fileIn, 9, end); if (start != 0) { // skip first line and re-establish "start". // LineRecordReader.readLine(this.in, null); readLine(this.in, null); start = in.getPos(); } pos = in.getPos(); }
Example #6
Source File: TestDataJoin.java From hadoop with Apache License 2.0 | 6 votes |
private static void confirmOutput(Path out, JobConf job, int srcs) throws IOException { FileSystem fs = out.getFileSystem(job); FileStatus[] outlist = fs.listStatus(out); assertEquals(1, outlist.length); assertTrue(0 < outlist[0].getLen()); FSDataInputStream in = fs.open(outlist[0].getPath()); LineRecordReader rr = new LineRecordReader(in, 0, Integer.MAX_VALUE, job); LongWritable k = new LongWritable(); Text v = new Text(); int count = 0; while (rr.next(k, v)) { String[] vals = v.toString().split("\t"); assertEquals(srcs + 1, vals.length); int[] ivals = new int[vals.length]; for (int i = 0; i < vals.length; ++i) ivals[i] = Integer.parseInt(vals[i]); assertEquals(0, ivals[0] % (srcs * srcs)); for (int i = 1; i < vals.length; ++i) { assertEquals((ivals[i] - (i - 1)) * srcs, 10 * ivals[0]); } ++count; } assertEquals(4, count); }
Example #7
Source File: TestHadoop2ByteBufferReads.java From parquet-mr with Apache License 2.0 | 6 votes |
@Test public void testDirectReadFullyLargeBuffer() throws Exception { final ByteBuffer readBuffer = ByteBuffer.allocateDirect(20); FSDataInputStream hadoopStream = new FSDataInputStream(new MockHadoopInputStream()); final MockBufferReader reader = new MockBufferReader(hadoopStream); TestUtils.assertThrows("Should throw EOFException", EOFException.class, () -> { H2SeekableInputStream.readFully(reader, readBuffer); return null; }); // NOTE: This behavior differs from readFullyHeapBuffer because direct uses // several read operations that will read up to the end of the input. This // is a correct value because the bytes in the buffer are valid. This // behavior can't be implemented for the heap buffer without using the read // method instead of the readFully method on the underlying // FSDataInputStream. Assert.assertEquals(10, readBuffer.position()); Assert.assertEquals(20, readBuffer.limit()); }
Example #8
Source File: DistributedRaidFileSystem.java From RDFS with Apache License 2.0 | 6 votes |
@Override public FSDataInputStream open(Path f, int bufferSize) throws IOException { // We want to use RAID logic only on instance of DFS. if (fs instanceof DistributedFileSystem) { DistributedFileSystem underlyingDfs = (DistributedFileSystem) fs; LocatedBlocks lbs = underlyingDfs.getLocatedBlocks(f, 0L, Long.MAX_VALUE); if (lbs != null) { // Use underlying filesystem if the file is under construction. if (!lbs.isUnderConstruction()) { // Use underlying filesystem if file length is 0. final long fileSize = getFileSize(lbs); if (fileSize > 0) { return new ExtFSDataInputStream(conf, this, f, fileSize, getBlockSize(lbs), bufferSize); } } } } return fs.open(f, bufferSize); }
Example #9
Source File: BCFile.java From hadoop-gpu with Apache License 2.0 | 6 votes |
public RBlockState(Algorithm compressionAlgo, FSDataInputStream fsin, BlockRegion region, Configuration conf) throws IOException { this.compressAlgo = compressionAlgo; this.region = region; this.decompressor = compressionAlgo.getDecompressor(); try { this.in = compressAlgo .createDecompressionStream(new BoundedRangeFileInputStream( fsin, this.region.getOffset(), this.region .getCompressedSize()), decompressor, TFile .getFSInputBufferSize(conf)); } catch (IOException e) { compressAlgo.returnDecompressor(decompressor); throw e; } }
Example #10
Source File: FSSpecStoreTest.java From incubator-gobblin with Apache License 2.0 | 6 votes |
@Override protected Spec readSpecFromFile(Path path) throws IOException { if (path.getName().contains("fail")) { throw new IOException("Mean to fail in the test"); } else if (path.getName().contains("serDeFail")) { // Simulate the way that a serDe exception FSDataInputStream fis = fs.open(path); SerializationUtils.deserialize(ByteStreams.toByteArray(fis)); // This line should never be reached since we generate SerDe Exception on purpose. Assert.assertTrue(false); return null; } else return initFlowSpec(Files.createTempDir().getAbsolutePath()); }
Example #11
Source File: AvroEventSerializer.java From mt-flume with Apache License 2.0 | 6 votes |
private Schema loadFromUrl(String schemaUrl) throws IOException { Configuration conf = new Configuration(); Schema.Parser parser = new Schema.Parser(); if (schemaUrl.toLowerCase().startsWith("hdfs://")) { FileSystem fs = FileSystem.get(conf); FSDataInputStream input = null; try { input = fs.open(new Path(schemaUrl)); return parser.parse(input); } finally { if (input != null) { input.close(); } } } else { InputStream is = null; try { is = new URL(schemaUrl).openStream(); return parser.parse(is); } finally { if (is != null) { is.close(); } } } }
Example #12
Source File: AppendTestUtil.java From big-c with Apache License 2.0 | 6 votes |
public static void check(FileSystem fs, Path p, long length) throws IOException { int i = -1; try { final FileStatus status = fs.getFileStatus(p); FSDataInputStream in = fs.open(p); if (in.getWrappedStream() instanceof DFSInputStream) { long len = ((DFSInputStream)in.getWrappedStream()).getFileLength(); assertEquals(length, len); } else { assertEquals(length, status.getLen()); } for(i++; i < length; i++) { assertEquals((byte)i, (byte)in.read()); } i = -(int)length; assertEquals(-1, in.read()); //EOF in.close(); } catch(IOException ioe) { throw new IOException("p=" + p + ", length=" + length + ", i=" + i, ioe); } }
Example #13
Source File: BlockIOUtils.java From hbase with Apache License 2.0 | 6 votes |
/** * Read from an input stream at least <code>necessaryLen</code> and if possible, * <code>extraLen</code> also if available. Analogous to * {@link IOUtils#readFully(InputStream, byte[], int, int)}, but uses positional read and * specifies a number of "extra" bytes that would be desirable but not absolutely necessary to * read. * @param buff ByteBuff to read into. * @param dis the input stream to read from * @param position the position within the stream from which to start reading * @param necessaryLen the number of bytes that are absolutely necessary to read * @param extraLen the number of extra bytes that would be nice to read * @return true if and only if extraLen is > 0 and reading those extra bytes was successful * @throws IOException if failed to read the necessary bytes */ public static boolean preadWithExtra(ByteBuff buff, FSDataInputStream dis, long position, int necessaryLen, int extraLen) throws IOException { int remain = necessaryLen + extraLen; byte[] buf = new byte[remain]; int bytesRead = 0; while (bytesRead < necessaryLen) { int ret = dis.read(position + bytesRead, buf, bytesRead, remain); if (ret < 0) { throw new IOException("Premature EOF from inputStream (positional read returned " + ret + ", was trying to read " + necessaryLen + " necessary bytes and " + extraLen + " extra bytes, successfully read " + bytesRead); } bytesRead += ret; remain -= ret; } // Copy the bytes from on-heap bytes[] to ByteBuffer[] now, and after resolving HDFS-3246, we // will read the bytes to ByteBuffer[] directly without allocating any on-heap byte[]. // TODO I keep the bytes copy here, because I want to abstract the ByteBuffer[] // preadWithExtra method for the upper layer, only need to refactor this method if the // ByteBuffer pread is OK. copyToByteBuff(buf, 0, bytesRead, buff); return (extraLen > 0) && (bytesRead == necessaryLen + extraLen); }
Example #14
Source File: SplitMetaInfoReaderTez.java From tez with Apache License 2.0 | 6 votes |
public static TaskSplitMetaInfo[] readSplitMetaInfo(Configuration conf, FileSystem fs) throws IOException { FSDataInputStream in = null; try { in = getFSDataIS(conf, fs); final String jobSplitFile = MRJobConfig.JOB_SPLIT; final String basePath = conf.get(MRFrameworkConfigs.TASK_LOCAL_RESOURCE_DIR, "."); int numSplits = WritableUtils.readVInt(in); // TODO: check for insane values JobSplit.TaskSplitMetaInfo[] allSplitMetaInfo = new JobSplit.TaskSplitMetaInfo[numSplits]; for (int i = 0; i < numSplits; i++) { JobSplit.SplitMetaInfo splitMetaInfo = new JobSplit.SplitMetaInfo(); splitMetaInfo.readFields(in); JobSplit.TaskSplitIndex splitIndex = new JobSplit.TaskSplitIndex( new Path(basePath, jobSplitFile) .toUri().toString(), splitMetaInfo.getStartOffset()); allSplitMetaInfo[i] = new JobSplit.TaskSplitMetaInfo(splitIndex, splitMetaInfo.getLocations(), splitMetaInfo.getInputDataLength()); } return allSplitMetaInfo; } finally { if (in != null) { in.close(); } } }
Example #15
Source File: TestMerger.java From hadoop with Apache License 2.0 | 6 votes |
private void readOnDiskMapOutput(Configuration conf, FileSystem fs, Path path, List<String> keys, List<String> values) throws IOException { FSDataInputStream in = CryptoUtils.wrapIfNecessary(conf, fs.open(path)); IFile.Reader<Text, Text> reader = new IFile.Reader<Text, Text>(conf, in, fs.getFileStatus(path).getLen(), null, null); DataInputBuffer keyBuff = new DataInputBuffer(); DataInputBuffer valueBuff = new DataInputBuffer(); Text key = new Text(); Text value = new Text(); while (reader.nextRawKey(keyBuff)) { key.readFields(keyBuff); keys.add(key.toString()); reader.nextRawValue(valueBuff); value.readFields(valueBuff); values.add(value.toString()); } }
Example #16
Source File: TestHftpFileSystem.java From RDFS with Apache License 2.0 | 6 votes |
/** * Tests isUnderConstruction() functionality. */ public void testIsUnderConstruction() throws Exception { // Open output file stream. FSDataOutputStream out = hdfs.create(TEST_FILE, true); out.writeBytes("test"); // Test file under construction. FSDataInputStream in1 = hftpFs.open(TEST_FILE); assertTrue(in1.isUnderConstruction()); in1.close(); // Close output file stream. out.close(); // Test file not under construction. FSDataInputStream in2 = hftpFs.open(TEST_FILE); assertFalse(in2.isUnderConstruction()); in2.close(); }
Example #17
Source File: TestWebHDFS.java From hadoop with Apache License 2.0 | 6 votes |
/** test seek */ static void verifySeek(FileSystem fs, Path p, long offset, long length, byte[] buf, byte[] expected) throws IOException { long remaining = length - offset; long checked = 0; LOG.info("XXX SEEK: offset=" + offset + ", remaining=" + remaining); final Ticker t = new Ticker("SEEK", "offset=%d, remaining=%d", offset, remaining); final FSDataInputStream in = fs.open(p, 64 << 10); in.seek(offset); for(; remaining > 0; ) { t.tick(checked, "offset=%d, remaining=%d", offset, remaining); final int n = (int)Math.min(remaining, buf.length); in.readFully(buf, 0, n); checkData(offset, remaining, n, buf, expected); offset += n; remaining -= n; checked += n; } in.close(); t.end(checked); }
Example #18
Source File: TestHftpFileSystem.java From big-c with Apache License 2.0 | 5 votes |
@Test public void testReadClosedStream() throws IOException { final Path testFile = new Path("/testfile+2"); FSDataOutputStream os = hdfs.create(testFile, true); os.writeBytes("0123456789"); os.close(); // ByteRangeInputStream delays opens until reads. Make sure it doesn't // open a closed stream that has never been opened FSDataInputStream in = hftpFs.open(testFile); in.close(); checkClosedStream(in); checkClosedStream(in.getWrappedStream()); // force the stream to connect and then close it in = hftpFs.open(testFile); int ch = in.read(); assertEquals('0', ch); in.close(); checkClosedStream(in); checkClosedStream(in.getWrappedStream()); // make sure seeking doesn't automagically reopen the stream in.seek(4); checkClosedStream(in); checkClosedStream(in.getWrappedStream()); }
Example #19
Source File: TestIFile.java From tez with Apache License 2.0 | 5 votes |
@Test(timeout = 20000) // Test file spill over scenario public void testFileBackedInMemIFileWriter_withSpill() throws IOException { List<KVPair> data = new ArrayList<>(); List<IntWritable> values = new ArrayList<>(); Text key = new Text("key"); IntWritable val = new IntWritable(1); for(int i = 0; i < 5; i++) { data.add(new KVPair(key, val)); values.add(val); } // Setting cache limit to 20. Actual data would be around 43 bytes, so it would spill over. TezTaskOutputFiles tezTaskOutput = new TezTaskOutputFiles(defaultConf, "uniqueId", 1); IFile.FileBackedInMemIFileWriter writer = new IFile.FileBackedInMemIFileWriter(defaultConf, localFs, tezTaskOutput, Text.class, IntWritable.class, codec, null, null, 20); writer.setOutputPath(outputPath); writer.appendKeyValues(data.get(0).getKey(), values.iterator()); Text lastKey = new Text("key3"); IntWritable lastVal = new IntWritable(10); data.add(new KVPair(lastKey, lastVal)); writer.append(lastKey, lastVal); writer.close(); assertTrue("Data should have been flushed to disk", writer.isDataFlushedToDisk()); // Read output content to memory FSDataInputStream inStream = localFs.open(outputPath); byte[] bytes = new byte[(int) writer.getRawLength()]; IFile.Reader.readToMemory(bytes, inStream, (int) writer.getCompressedLength(), codec, false, -1); inStream.close(); readUsingInMemoryReader(bytes, data); }
Example #20
Source File: HoodieCorruptBlock.java From hudi with Apache License 2.0 | 5 votes |
public static HoodieLogBlock getBlock(HoodieLogFile logFile, FSDataInputStream inputStream, Option<byte[]> corruptedBytes, boolean readBlockLazily, long position, long blockSize, long blockEndPos, Map<HeaderMetadataType, String> header, Map<HeaderMetadataType, String> footer) { return new HoodieCorruptBlock(corruptedBytes, inputStream, readBlockLazily, Option.of(new HoodieLogBlockContentLocation(logFile, position, blockSize, blockEndPos)), header, footer); }
Example #21
Source File: PartitionPreservingJoinTests.java From datafu with Apache License 2.0 | 5 votes |
private HashMap<Long,ImpressionClick> loadOutputCounts(String timestamp) throws IOException { HashMap<Long,ImpressionClick> counts = new HashMap<Long,ImpressionClick>(); FileSystem fs = getFileSystem(); String nestedPath = getNestedPathFromTimestamp(timestamp); Assert.assertTrue(fs.exists(new Path(_outputPath, nestedPath))); for (FileStatus stat : fs.globStatus(new Path(_outputPath,nestedPath + "/*.avro"))) { _log.info(String.format("found: %s (%d bytes)",stat.getPath(),stat.getLen())); FSDataInputStream is = fs.open(stat.getPath()); DatumReader <GenericRecord> reader = new GenericDatumReader<GenericRecord>(); DataFileStream<GenericRecord> dataFileStream = new DataFileStream<GenericRecord>(is, reader); try { while (dataFileStream.hasNext()) { GenericRecord r = dataFileStream.next(); Long memberId = (Long)((GenericRecord)r.get("key")).get("id"); Integer impressions = (Integer)((GenericRecord)r.get("value")).get("impressions"); Integer clicks = (Integer)((GenericRecord)r.get("value")).get("clicks"); Assert.assertFalse(counts.containsKey(memberId)); ImpressionClick data = new ImpressionClick(); data.clicks = clicks; data.impressions = impressions; counts.put(memberId, data); } } finally { dataFileStream.close(); } } return counts; }
Example #22
Source File: TestFileConcurrentReader.java From big-c with Apache License 2.0 | 5 votes |
private void assertBytesAvailable( FileSystem fileSystem, Path path, int numBytes ) throws IOException { byte[] buffer = new byte[numBytes]; FSDataInputStream inputStream = fileSystem.open(path); IOUtils.readFully(inputStream, buffer, 0, numBytes); inputStream.close(); assertTrue( "unable to validate bytes", validateSequentialBytes(buffer, 0, numBytes) ); }
Example #23
Source File: TestCachingStrategy.java From hadoop with Apache License 2.0 | 5 votes |
@Test(timeout=120000) public void testSeekAfterSetDropBehind() throws Exception { // start a cluster LOG.info("testSeekAfterSetDropBehind"); Configuration conf = new HdfsConfiguration(); MiniDFSCluster cluster = null; String TEST_PATH = "/test"; int TEST_PATH_LEN = MAX_TEST_FILE_LEN; try { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1) .build(); cluster.waitActive(); FileSystem fs = cluster.getFileSystem(); createHdfsFile(fs, new Path(TEST_PATH), TEST_PATH_LEN, false); // verify that we can seek after setDropBehind FSDataInputStream fis = fs.open(new Path(TEST_PATH)); try { Assert.assertTrue(fis.read() != -1); // create BlockReader fis.setDropBehind(false); // clear BlockReader fis.seek(2); // seek } finally { fis.close(); } } finally { if (cluster != null) { cluster.shutdown(); } } }
Example #24
Source File: TestBlockTokenWithDFS.java From hadoop with Apache License 2.0 | 5 votes |
private boolean checkFile2(FSDataInputStream in) { byte[] toRead = new byte[FILE_SIZE]; try { assertEquals("Cannot read file", toRead.length, in.read(0, toRead, 0, toRead.length)); } catch (IOException e) { return false; } return checkFile(toRead); }
Example #25
Source File: ColumnarStoreMetricsDesc.java From kylin-on-parquet-v2 with Apache License 2.0 | 5 votes |
public ColumnDataReader getMetricsReaderFromFSInput(FSDataInputStream inputStream, int columnDataStartOffset, int columnDataLength, int rowCount) throws IOException { if (Compression.LZ4 == compression && fixLen != -1) { return new FSInputLZ4CompressedColumnReader(inputStream, columnDataStartOffset, columnDataLength, rowCount); } if (fixLen != -1) { return new FSInputNoCompressedColumnReader(inputStream, columnDataStartOffset, columnDataLength / rowCount, rowCount); } return new FSInputGeneralColumnDataReader(inputStream, columnDataStartOffset, columnDataLength); }
Example #26
Source File: HDFSFileSystemUnitTest.java From Scribengin with GNU Affero General Public License v3.0 | 5 votes |
@Test public void testCreateReadWrite() throws Exception { String TEXT = "hello" ; Path testPath = new Path("./build/hdfs/test.txt"); FSDataOutputStream os = fs.create(testPath) ; os.write(TEXT.getBytes()); os.close(); FSDataInputStream is = fs.open(testPath); String text = IOUtil.getStreamContentAsString(is, "UTF-8"); Assert.assertEquals(TEXT, text); }
Example #27
Source File: TestWebHDFSForHA.java From hadoop with Apache License 2.0 | 5 votes |
@Test public void testFailoverAfterOpen() throws IOException { Configuration conf = DFSTestUtil.newHAConfiguration(LOGICAL_NAME); conf.set(FS_DEFAULT_NAME_KEY, HdfsConstants.HDFS_URI_SCHEME + "://" + LOGICAL_NAME); MiniDFSCluster cluster = null; FileSystem fs = null; final Path p = new Path("/test"); final byte[] data = "Hello".getBytes(); try { cluster = new MiniDFSCluster.Builder(conf).nnTopology(topo) .numDataNodes(1).build(); HATestUtil.setFailoverConfigurations(cluster, conf, LOGICAL_NAME); cluster.waitActive(); fs = FileSystem.get(WEBHDFS_URI, conf); cluster.transitionToActive(1); FSDataOutputStream out = fs.create(p); cluster.shutdownNameNode(1); cluster.transitionToActive(0); out.write(data); out.close(); FSDataInputStream in = fs.open(p); byte[] buf = new byte[data.length]; IOUtils.readFully(in, buf, 0, buf.length); Assert.assertArrayEquals(data, buf); } finally { IOUtils.cleanup(null, fs); if (cluster != null) { cluster.shutdown(); } } }
Example #28
Source File: NetFlowReaderSuite.java From spark-netflow with Apache License 2.0 | 5 votes |
@Test public void testFilterScan7() throws IOException { // test filter scan with statistics on time String file = getClass().getResource("/correct/ftv5.2016-01-13.compress.9.sample").getPath(); FSDataInputStream stm = getTestStream(file); NetFlowReader nr = NetFlowReader.prepareReader(stm, 30000); Column[] cols = new Column[]{NetFlowV5.FIELD_SRCADDR, NetFlowV5.FIELD_SRCPORT, NetFlowV5.FIELD_DSTPORT}; FilterPredicate filter = FilterApi.or( FilterApi.and( FilterApi.ge(NetFlowV5.FIELD_UNIX_SECS, -1L), FilterApi.lt(NetFlowV5.FIELD_SRCPORT, 5) ), FilterApi.eq(NetFlowV5.FIELD_DSTPORT, 200) ); RecordBuffer rb = nr.prepareRecordBuffer(cols, filter); assertEquals(rb.getClass(), FilterRecordBuffer.class); Iterator<Object[]> iter = rb.iterator(); Object[][] records = new Object[6][3]; int i = 0; while (iter.hasNext()) { records[i++] = iter.next(); } Object[][] expected = new Object[][] { {0L, 0, 65280}, {1L, 1, 65281}, {2L, 2, 65282}, {3L, 3, 65283}, {4L, 4, 65284}, {456L, 456, 200} }; assertArrayEquals(records, expected); }
Example #29
Source File: TestBlockReaderLocalLegacy.java From big-c with Apache License 2.0 | 5 votes |
@Test public void testBothOldAndNewShortCircuitConfigured() throws Exception { final short REPL_FACTOR = 1; final int FILE_LENGTH = 512; Assume.assumeTrue(null == DomainSocket.getLoadingFailureReason()); TemporarySocketDirectory socketDir = new TemporarySocketDirectory(); HdfsConfiguration conf = getConfiguration(socketDir); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); cluster.waitActive(); socketDir.close(); FileSystem fs = cluster.getFileSystem(); Path path = new Path("/foo"); byte orig[] = new byte[FILE_LENGTH]; for (int i = 0; i < orig.length; i++) { orig[i] = (byte)(i%10); } FSDataOutputStream fos = fs.create(path, (short)1); fos.write(orig); fos.close(); DFSTestUtil.waitReplication(fs, path, REPL_FACTOR); FSDataInputStream fis = cluster.getFileSystem().open(path); byte buf[] = new byte[FILE_LENGTH]; IOUtils.readFully(fis, buf, 0, FILE_LENGTH); fis.close(); Assert.assertArrayEquals(orig, buf); Arrays.equals(orig, buf); cluster.shutdown(); }
Example #30
Source File: RandomAccessByteStream.java From succinct with Apache License 2.0 | 5 votes |
public RandomAccessByteStream(FSDataInputStream stream, long startPos, long limit) throws IOException { this.stream = stream; this.startPos = startPos; this.limit = limit; stream.seek(startPos); }