org.apache.hadoop.io.DataInputBuffer Java Examples
The following examples show how to use
org.apache.hadoop.io.DataInputBuffer.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: InMemoryReader.java From incubator-tez with Apache License 2.0 | 6 votes |
public void nextRawValue(DataInputBuffer value) throws IOException { try { int pos = memDataIn.getPosition(); byte[] data = memDataIn.getData(); value.reset(data, pos, currentValueLength); // Position for the next record long skipped = memDataIn.skip(currentValueLength); if (skipped != currentValueLength) { throw new IOException("Rec# " + recNo + ": Failed to skip past value of length: " + currentValueLength); } // Record the byte bytesRead += currentValueLength; ++recNo; } catch (IOException ioe) { dumpOnError(); throw ioe; } }
Example #2
Source File: TestIFile.java From tez with Apache License 2.0 | 6 votes |
@Test(timeout = 5000) //Test appendValue with DataInputBuffer public void testAppendValueWithDataInputBuffer() throws IOException { List<KVPair> data = KVDataGen.generateTestData(false, rnd.nextInt(100)); IFile.Writer writer = new IFile.Writer(defaultConf, localFs, outputPath, Text.class, IntWritable.class, codec, null, null); final DataInputBuffer previousKey = new DataInputBuffer(); DataInputBuffer key = new DataInputBuffer(); DataInputBuffer value = new DataInputBuffer(); for (KVPair kvp : data) { populateData(kvp, key, value); if ((previousKey != null && BufferUtils.compare(key, previousKey) == 0)) { writer.appendValue(value); } else { writer.append(key, value); } previousKey.reset(k.getData(), 0, k.getLength()); } writer.close(); readAndVerifyData(writer.getRawLength(), writer.getCompressedLength(), data, codec); }
Example #3
Source File: TestJspHelper.java From hadoop with Apache License 2.0 | 6 votes |
@Test public void testReadWriteReplicaState() { try { DataOutputBuffer out = new DataOutputBuffer(); DataInputBuffer in = new DataInputBuffer(); for (HdfsServerConstants.ReplicaState repState : HdfsServerConstants.ReplicaState .values()) { repState.write(out); in.reset(out.getData(), out.getLength()); HdfsServerConstants.ReplicaState result = HdfsServerConstants.ReplicaState .read(in); assertTrue("testReadWrite error !!!", repState == result); out.reset(); in.reset(); } } catch (Exception ex) { fail("testReadWrite ex error ReplicaState"); } }
Example #4
Source File: TestWritableSerialization.java From big-c with Apache License 2.0 | 6 votes |
@Test @SuppressWarnings({"rawtypes", "unchecked"}) public void testWritableComparatorJavaSerialization() throws Exception { Serialization ser = new JavaSerialization(); Serializer<TestWC> serializer = ser.getSerializer(TestWC.class); DataOutputBuffer dob = new DataOutputBuffer(); serializer.open(dob); TestWC orig = new TestWC(0); serializer.serialize(orig); serializer.close(); Deserializer<TestWC> deserializer = ser.getDeserializer(TestWC.class); DataInputBuffer dib = new DataInputBuffer(); dib.reset(dob.getData(), 0, dob.getLength()); deserializer.open(dib); TestWC deser = deserializer.deserialize(null); deserializer.close(); assertEquals(orig, deser); }
Example #5
Source File: ReducePartition.java From RDFS with Apache License 2.0 | 6 votes |
public boolean next(DataInputBuffer key, DataInputBuffer value) throws IOException { MemoryBlockIndex memBlkIdx = keyValueIterator.next(); if (memBlkIdx != null) { int pos = memBlkIdx.getIndex(); MemoryBlock memBlk = memBlkIdx.getMemoryBlock(); int offset = memBlk.offsets[pos]; int keyLen = memBlk.keyLenArray[pos]; int valLen = memBlk.valueLenArray[pos]; dataOutputBuffer.reset(); dataOutputBuffer.writeInt(keyLen); dataOutputBuffer.write(kvbuffer, offset, keyLen); dataOutputBuffer.writeInt(valLen); dataOutputBuffer.write(kvbuffer, offset + keyLen, valLen); key.reset(dataOutputBuffer.getData(), 0, keyLen + WritableUtils.INT_LENGTH_BYTES); value.reset(dataOutputBuffer.getData(), keyLen + WritableUtils.INT_LENGTH_BYTES, valLen + WritableUtils.INT_LENGTH_BYTES); return true; } return false; }
Example #6
Source File: ShuffledUnorderedKVReader.java From incubator-tez with Apache License 2.0 | 6 votes |
public ShuffledUnorderedKVReader(ShuffleManager shuffleManager, Configuration conf, CompressionCodec codec, boolean ifileReadAhead, int ifileReadAheadLength, int ifileBufferSize, TezCounter inputRecordCounter) throws IOException { this.shuffleManager = shuffleManager; this.codec = codec; this.ifileReadAhead = ifileReadAhead; this.ifileReadAheadLength = ifileReadAheadLength; this.ifileBufferSize = ifileBufferSize; this.inputRecordCounter = inputRecordCounter; this.keyClass = ConfigUtils.getIntermediateInputKeyClass(conf); this.valClass = ConfigUtils.getIntermediateInputValueClass(conf); this.keyIn = new DataInputBuffer(); this.valIn = new DataInputBuffer(); SerializationFactory serializationFactory = new SerializationFactory(conf); this.keyDeserializer = serializationFactory.getDeserializer(keyClass); this.keyDeserializer.open(keyIn); this.valDeserializer = serializationFactory.getDeserializer(valClass); this.valDeserializer.open(valIn); }
Example #7
Source File: TestWritableJobConf.java From hadoop with Apache License 2.0 | 6 votes |
private <K> K serDeser(K conf) throws Exception { SerializationFactory factory = new SerializationFactory(CONF); Serializer<K> serializer = factory.getSerializer(GenericsUtil.getClass(conf)); Deserializer<K> deserializer = factory.getDeserializer(GenericsUtil.getClass(conf)); DataOutputBuffer out = new DataOutputBuffer(); serializer.open(out); serializer.serialize(conf); serializer.close(); DataInputBuffer in = new DataInputBuffer(); in.reset(out.getData(), out.getLength()); deserializer.open(in); K after = deserializer.deserialize(null); deserializer.close(); return after; }
Example #8
Source File: TestWritableJobConf.java From hadoop-gpu with Apache License 2.0 | 6 votes |
private <K> K serDeser(K conf) throws Exception { SerializationFactory factory = new SerializationFactory(CONF); Serializer<K> serializer = factory.getSerializer(GenericsUtil.getClass(conf)); Deserializer<K> deserializer = factory.getDeserializer(GenericsUtil.getClass(conf)); DataOutputBuffer out = new DataOutputBuffer(); serializer.open(out); serializer.serialize(conf); serializer.close(); DataInputBuffer in = new DataInputBuffer(); in.reset(out.getData(), out.getLength()); deserializer.open(in); K after = deserializer.deserialize(null); deserializer.close(); return after; }
Example #9
Source File: QueryWritableTest.java From incubator-retired-blur with Apache License 2.0 | 6 votes |
@Test public void testTermQuery() throws IOException { TermQuery query = new TermQuery(new Term("field", "value")); QueryWritable queryWritable = new QueryWritable(); queryWritable.setQuery(query); DataOutputBuffer out = new DataOutputBuffer(); queryWritable.write(out); byte[] data = out.getData(); int length = out.getLength(); DataInputBuffer in = new DataInputBuffer(); in.reset(data, length); QueryWritable newQueryWritable = new QueryWritable(); newQueryWritable.readFields(in); Query termQuery = newQueryWritable.getQuery(); assertEquals(query, termQuery); }
Example #10
Source File: TestPBRecordImpl.java From hadoop with Apache License 2.0 | 6 votes |
@Test(timeout=10000) public void testLocalizerStatusSerDe() throws Exception { LocalizerStatus rsrcS = createLocalizerStatus(); assertTrue(rsrcS instanceof LocalizerStatusPBImpl); LocalizerStatusPBImpl rsrcPb = (LocalizerStatusPBImpl) rsrcS; DataOutputBuffer out = new DataOutputBuffer(); rsrcPb.getProto().writeDelimitedTo(out); DataInputBuffer in = new DataInputBuffer(); in.reset(out.getData(), 0, out.getLength()); LocalizerStatusProto rsrcPbD = LocalizerStatusProto.parseDelimitedFrom(in); assertNotNull(rsrcPbD); LocalizerStatus rsrcD = new LocalizerStatusPBImpl(rsrcPbD); assertEquals(rsrcS, rsrcD); assertEquals("localizer0", rsrcS.getLocalizerId()); assertEquals("localizer0", rsrcD.getLocalizerId()); assertEquals(createLocalResourceStatus(), rsrcS.getResourceStatus(0)); assertEquals(createLocalResourceStatus(), rsrcD.getResourceStatus(0)); }
Example #11
Source File: TestPBRecordImpl.java From hadoop with Apache License 2.0 | 6 votes |
@Test(timeout=10000) public void testLocalResourceStatusSerDe() throws Exception { LocalResourceStatus rsrcS = createLocalResourceStatus(); assertTrue(rsrcS instanceof LocalResourceStatusPBImpl); LocalResourceStatusPBImpl rsrcPb = (LocalResourceStatusPBImpl) rsrcS; DataOutputBuffer out = new DataOutputBuffer(); rsrcPb.getProto().writeDelimitedTo(out); DataInputBuffer in = new DataInputBuffer(); in.reset(out.getData(), 0, out.getLength()); LocalResourceStatusProto rsrcPbD = LocalResourceStatusProto.parseDelimitedFrom(in); assertNotNull(rsrcPbD); LocalResourceStatus rsrcD = new LocalResourceStatusPBImpl(rsrcPbD); assertEquals(rsrcS, rsrcD); assertEquals(createResource(), rsrcS.getResource()); assertEquals(createResource(), rsrcD.getResource()); }
Example #12
Source File: SerializationTestUtil.java From big-c with Apache License 2.0 | 6 votes |
/** * A utility that tests serialization/deserialization. * @param conf configuration to use, "io.serializations" is read to * determine the serialization * @param <K> the class of the item * @param before item to (de)serialize * @return deserialized item */ public static <K> K testSerialization(Configuration conf, K before) throws Exception { SerializationFactory factory = new SerializationFactory(conf); Serializer<K> serializer = factory.getSerializer(GenericsUtil.getClass(before)); Deserializer<K> deserializer = factory.getDeserializer(GenericsUtil.getClass(before)); DataOutputBuffer out = new DataOutputBuffer(); serializer.open(out); serializer.serialize(before); serializer.close(); DataInputBuffer in = new DataInputBuffer(); in.reset(out.getData(), out.getLength()); deserializer.open(in); K after = deserializer.deserialize(null); deserializer.close(); return after; }
Example #13
Source File: TFile.java From hadoop with Apache License 2.0 | 6 votes |
/** * Constructor * * @param reader * The TFile reader object. * @param begin * Begin location of the scan. * @param end * End location of the scan. * @throws IOException */ Scanner(Reader reader, Location begin, Location end) throws IOException { this.reader = reader; // ensure the TFile index is loaded throughout the life of scanner. reader.checkTFileDataIndex(); beginLocation = begin; endLocation = end; valTransferBuffer = new BytesWritable(); // TODO: remember the longest key in a TFile, and use it to replace // MAX_KEY_SIZE. keyBuffer = new byte[MAX_KEY_SIZE]; keyDataInputStream = new DataInputBuffer(); valueBufferInputStream = new ChunkDecoder(); valueDataInputStream = new DataInputStream(valueBufferInputStream); if (beginLocation.compareTo(endLocation) >= 0) { currentLocation = new Location(endLocation); } else { currentLocation = new Location(0, 0); initBlock(beginLocation.getBlockIndex()); inBlockAdvance(beginLocation.getRecordIndex()); } }
Example #14
Source File: BinInterSedes.java From spork with Apache License 2.0 | 6 votes |
@SuppressWarnings("unchecked") private int compareBinInterSedesBag(ByteBuffer bb1, ByteBuffer bb2, byte dt1, byte dt2) throws IOException { int s1 = bb1.position(); int s2 = bb2.position(); int l1 = bb1.remaining(); int l2 = bb2.remaining(); // first compare sizes int bsz1 = readSize(bb1, dt1); int bsz2 = readSize(bb2, dt2); if (bsz1 > bsz2) return 1; else if (bsz1 < bsz2) return -1; else { DataInputBuffer buffer1 = new DataInputBuffer(); DataInputBuffer buffer2 = new DataInputBuffer(); buffer1.reset(bb1.array(), s1, l1); buffer2.reset(bb2.array(), s2, l2); DataBag bag1 = (DataBag) mSedes.readDatum(buffer1, dt1); DataBag bag2 = (DataBag) mSedes.readDatum(buffer2, dt2); bb1.position(buffer1.getPosition()); bb2.position(buffer2.getPosition()); return bag1.compareTo(bag2); } }
Example #15
Source File: TestDelegationToken.java From hadoop with Apache License 2.0 | 6 votes |
private boolean testDelegationTokenIdentiferSerializationRoundTrip(Text owner, Text renewer, Text realUser) throws IOException { TestDelegationTokenIdentifier dtid = new TestDelegationTokenIdentifier( owner, renewer, realUser); DataOutputBuffer out = new DataOutputBuffer(); dtid.writeImpl(out); DataInputBuffer in = new DataInputBuffer(); in.reset(out.getData(), out.getLength()); try { TestDelegationTokenIdentifier dtid2 = new TestDelegationTokenIdentifier(); dtid2.readFields(in); assertTrue(dtid.equals(dtid2)); return true; } catch(IOException e){ return false; } }
Example #16
Source File: IFile.java From big-c with Apache License 2.0 | 6 votes |
public void nextRawValue(DataInputBuffer value) throws IOException { final byte[] valBytes = (value.getData().length < currentValueLength) ? new byte[currentValueLength << 1] : value.getData(); int i = readData(valBytes, 0, currentValueLength); if (i != currentValueLength) { throw new IOException ("Asked for " + currentValueLength + " Got: " + i); } value.reset(valBytes, currentValueLength); // Record the bytes read bytesRead += currentValueLength; ++recNo; ++numRecordsRead; }
Example #17
Source File: IFile.java From hadoop with Apache License 2.0 | 6 votes |
public void append(DataInputBuffer key, DataInputBuffer value) throws IOException { int keyLength = key.getLength() - key.getPosition(); if (keyLength < 0) { throw new IOException("Negative key-length not allowed: " + keyLength + " for " + key); } int valueLength = value.getLength() - value.getPosition(); if (valueLength < 0) { throw new IOException("Negative value-length not allowed: " + valueLength + " for " + value); } WritableUtils.writeVInt(out, keyLength); WritableUtils.writeVInt(out, valueLength); out.write(key.getData(), key.getPosition(), keyLength); out.write(value.getData(), value.getPosition(), valueLength); // Update bytes written decompressedBytesWritten += keyLength + valueLength + WritableUtils.getVIntSize(keyLength) + WritableUtils.getVIntSize(valueLength); ++numRecordsWritten; }
Example #18
Source File: TestPBRecordImpl.java From big-c with Apache License 2.0 | 6 votes |
@Test(timeout=10000) public void testLocalResourceStatusSerDe() throws Exception { LocalResourceStatus rsrcS = createLocalResourceStatus(); assertTrue(rsrcS instanceof LocalResourceStatusPBImpl); LocalResourceStatusPBImpl rsrcPb = (LocalResourceStatusPBImpl) rsrcS; DataOutputBuffer out = new DataOutputBuffer(); rsrcPb.getProto().writeDelimitedTo(out); DataInputBuffer in = new DataInputBuffer(); in.reset(out.getData(), 0, out.getLength()); LocalResourceStatusProto rsrcPbD = LocalResourceStatusProto.parseDelimitedFrom(in); assertNotNull(rsrcPbD); LocalResourceStatus rsrcD = new LocalResourceStatusPBImpl(rsrcPbD); assertEquals(rsrcS, rsrcD); assertEquals(createResource(), rsrcS.getResource()); assertEquals(createResource(), rsrcD.getResource()); }
Example #19
Source File: ValuesIterator.java From tez with Apache License 2.0 | 6 votes |
/** * read the next key - which may be the same as the current key. */ private void readNextKey() throws IOException { more = in.next(); if (more) { DataInputBuffer nextKeyBytes = in.getKey(); if (!in.isSameKey()) { keyIn.reset(nextKeyBytes.getData(), nextKeyBytes.getPosition(), nextKeyBytes.getLength() - nextKeyBytes.getPosition()); nextKey = keyDeserializer.deserialize(nextKey); // hasMoreValues = is it first key or is key the same? hasMoreValues = (key == null) || (comparator.compare(key, nextKey) == 0); if (key == null || false == hasMoreValues) { // invariant: more=true & there are no more values in an existing key group // so this indicates start of new key group if(inputKeyCounter != null) { inputKeyCounter.increment(1); } ++keyCtr; } } else { hasMoreValues = in.isSameKey(); } } else { hasMoreValues = false; } }
Example #20
Source File: TestWritableSerialization.java From hadoop-gpu with Apache License 2.0 | 6 votes |
/** * A utility that tests serialization/deserialization. * @param <K> the class of the item * @param conf configuration to use, "io.serializations" is read to * determine the serialization * @param before item to (de)serialize * @return deserialized item */ public static<K> K testSerialization(Configuration conf, K before) throws Exception { SerializationFactory factory = new SerializationFactory(conf); Serializer<K> serializer = factory.getSerializer(GenericsUtil.getClass(before)); Deserializer<K> deserializer = factory.getDeserializer(GenericsUtil.getClass(before)); DataOutputBuffer out = new DataOutputBuffer(); serializer.open(out); serializer.serialize(before); serializer.close(); DataInputBuffer in = new DataInputBuffer(); in.reset(out.getData(), out.getLength()); deserializer.open(in); K after = deserializer.deserialize(null); deserializer.close(); assertEquals(before, after); return after; }
Example #21
Source File: SerializationTestUtil.java From hadoop with Apache License 2.0 | 6 votes |
/** * A utility that tests serialization/deserialization. * @param conf configuration to use, "io.serializations" is read to * determine the serialization * @param <K> the class of the item * @param before item to (de)serialize * @return deserialized item */ public static <K> K testSerialization(Configuration conf, K before) throws Exception { SerializationFactory factory = new SerializationFactory(conf); Serializer<K> serializer = factory.getSerializer(GenericsUtil.getClass(before)); Deserializer<K> deserializer = factory.getDeserializer(GenericsUtil.getClass(before)); DataOutputBuffer out = new DataOutputBuffer(); serializer.open(out); serializer.serialize(before); serializer.close(); DataInputBuffer in = new DataInputBuffer(); in.reset(out.getData(), out.getLength()); deserializer.open(in); K after = deserializer.deserialize(null); deserializer.close(); return after; }
Example #22
Source File: SequenceFile.java From gemfirexd-oss with Apache License 2.0 | 6 votes |
@Override public void writeUncompressedBytes(DataOutputStream outStream) throws IOException { if (decompressedStream == null) { rawData = new DataInputBuffer(); decompressedStream = codec.createInputStream(rawData); } else { decompressedStream.resetState(); } rawData.reset(data, 0, dataSize); byte[] buffer = new byte[8192]; int bytesRead = 0; while ((bytesRead = decompressedStream.read(buffer, 0, 8192)) != -1) { outStream.write(buffer, 0, bytesRead); } }
Example #23
Source File: TestSerialization.java From hbase with Apache License 2.0 | 6 votes |
/** * Test RegionInfo serialization * @throws Exception */ @Test public void testRegionInfo() throws Exception { RegionInfo hri = createRandomRegion("testRegionInfo"); // test toByteArray() byte[] hrib = RegionInfo.toByteArray(hri); RegionInfo deserializedHri = RegionInfo.parseFrom(hrib); assertEquals(hri.getEncodedName(), deserializedHri.getEncodedName()); assertEquals(hri, deserializedHri); // test toDelimitedByteArray() hrib = RegionInfo.toDelimitedByteArray(hri); DataInputBuffer buf = new DataInputBuffer(); try { buf.reset(hrib, hrib.length); deserializedHri = RegionInfo.parseFrom(buf); assertEquals(hri.getEncodedName(), deserializedHri.getEncodedName()); assertEquals(hri, deserializedHri); } finally { buf.close(); } }
Example #24
Source File: TestClientKeyValueLocal.java From phoenix with BSD 3-Clause "New" or "Revised" License | 6 votes |
private void validate(KeyValue kv, byte[] row, byte[] family, byte[] qualifier, long ts, Type type, byte[] value) throws IOException { DataOutputBuffer out = new DataOutputBuffer(); kv.write(out); out.close(); byte[] data = out.getData(); // read it back in KeyValue read = new KeyValue(); DataInputBuffer in = new DataInputBuffer(); in.reset(data, data.length); read.readFields(in); in.close(); // validate that its the same assertTrue("Row didn't match!", Bytes.equals(row, read.getRow())); assertTrue("Family didn't match!", Bytes.equals(family, read.getFamily())); assertTrue("Qualifier didn't match!", Bytes.equals(qualifier, read.getQualifier())); assertTrue("Value didn't match!", Bytes.equals(value, read.getValue())); assertEquals("Timestamp didn't match", ts, read.getTimestamp()); assertEquals("Type didn't match", type.getCode(), read.getType()); }
Example #25
Source File: TFile.java From hadoop-gpu with Apache License 2.0 | 6 votes |
/** * Constructor * * @param reader * The TFile reader object. * @param begin * Begin location of the scan. * @param end * End location of the scan. * @throws IOException */ Scanner(Reader reader, Location begin, Location end) throws IOException { this.reader = reader; // ensure the TFile index is loaded throughout the life of scanner. reader.checkTFileDataIndex(); beginLocation = begin; endLocation = end; valTransferBuffer = new BytesWritable(); // TODO: remember the longest key in a TFile, and use it to replace // MAX_KEY_SIZE. keyBuffer = new byte[MAX_KEY_SIZE]; keyDataInputStream = new DataInputBuffer(); valueBufferInputStream = new ChunkDecoder(); valueDataInputStream = new DataInputStream(valueBufferInputStream); if (beginLocation.compareTo(endLocation) >= 0) { currentLocation = new Location(endLocation); } else { currentLocation = new Location(0, 0); initBlock(beginLocation.getBlockIndex()); inBlockAdvance(beginLocation.getRecordIndex()); } }
Example #26
Source File: IFile.java From big-c with Apache License 2.0 | 5 votes |
public boolean nextRawKey(DataInputBuffer key) throws IOException { if (!positionToNextRecord(dataIn)) { return false; } if (keyBytes.length < currentKeyLength) { keyBytes = new byte[currentKeyLength << 1]; } int i = readData(keyBytes, 0, currentKeyLength); if (i != currentKeyLength) { throw new IOException ("Asked for " + currentKeyLength + " Got: " + i); } key.reset(keyBytes, currentKeyLength); bytesRead += currentKeyLength; return true; }
Example #27
Source File: InMemoryReader.java From incubator-tez with Apache License 2.0 | 5 votes |
public KeyState readRawKey(DataInputBuffer key) throws IOException { try { if (!positionToNextRecord(memDataIn)) { return KeyState.NO_KEY; } // Setup the key int pos = memDataIn.getPosition(); byte[] data = memDataIn.getData(); if (currentKeyLength == IFile.RLE_MARKER) { // get key length from original key key.reset(data, originalKeyPos, originalKeyLength); return KeyState.SAME_KEY; } key.reset(data, pos, currentKeyLength); // Position for the next value long skipped = memDataIn.skip(currentKeyLength); if (skipped != currentKeyLength) { throw new IOException("Rec# " + recNo + ": Failed to skip past key of length: " + currentKeyLength); } bytesRead += currentKeyLength; return KeyState.NEW_KEY; } catch (IOException ioe) { dumpOnError(); throw ioe; } }
Example #28
Source File: TestCodec.java From big-c with Apache License 2.0 | 5 votes |
void GzipConcatTest(Configuration conf, Class<? extends Decompressor> decomClass) throws IOException { Random r = new Random(); long seed = r.nextLong(); r.setSeed(seed); LOG.info(decomClass + " seed: " + seed); final int CONCAT = r.nextInt(4) + 3; final int BUFLEN = 128 * 1024; DataOutputBuffer dflbuf = new DataOutputBuffer(); DataOutputBuffer chkbuf = new DataOutputBuffer(); byte[] b = new byte[BUFLEN]; for (int i = 0; i < CONCAT; ++i) { GZIPOutputStream gzout = new GZIPOutputStream(dflbuf); r.nextBytes(b); int len = r.nextInt(BUFLEN); int off = r.nextInt(BUFLEN - len); chkbuf.write(b, off, len); gzout.write(b, off, len); gzout.close(); } final byte[] chk = Arrays.copyOf(chkbuf.getData(), chkbuf.getLength()); CompressionCodec codec = ReflectionUtils.newInstance(GzipCodec.class, conf); Decompressor decom = codec.createDecompressor(); assertNotNull(decom); assertEquals(decomClass, decom.getClass()); DataInputBuffer gzbuf = new DataInputBuffer(); gzbuf.reset(dflbuf.getData(), dflbuf.getLength()); InputStream gzin = codec.createInputStream(gzbuf, decom); dflbuf.reset(); IOUtils.copyBytes(gzin, dflbuf, 4096); final byte[] dflchk = Arrays.copyOf(dflbuf.getData(), dflbuf.getLength()); assertArrayEquals(chk, dflchk); }
Example #29
Source File: TestGridmixRecord.java From RDFS with Apache License 2.0 | 5 votes |
static void lengthTest(GridmixRecord x, GridmixRecord y, int min, int max) throws Exception { final Random r = new Random(); final long seed = r.nextLong(); r.setSeed(seed); LOG.info("length: " + seed); final DataInputBuffer in = new DataInputBuffer(); final DataOutputBuffer out1 = new DataOutputBuffer(); final DataOutputBuffer out2 = new DataOutputBuffer(); for (int i = min; i < max; ++i) { setSerialize(x, r.nextLong(), i, out1); // check write assertEquals(i, out1.getLength()); // write to stream x.write(out2); // check read in.reset(out1.getData(), 0, out1.getLength()); y.readFields(in); assertEquals(i, x.getSize()); assertEquals(i, y.getSize()); } // check stream read in.reset(out2.getData(), 0, out2.getLength()); for (int i = min; i < max; ++i) { y.readFields(in); assertEquals(i, y.getSize()); } }
Example #30
Source File: FreightStreamer.java From RDFS with Apache License 2.0 | 5 votes |
public TextRecordInputStream(FileStatus f) throws IOException { r = new SequenceFile.Reader(fs, f.getPath(), getConf()); key = ReflectionUtils.newInstance(r.getKeyClass().asSubclass(WritableComparable.class), getConf()); val = ReflectionUtils.newInstance(r.getValueClass().asSubclass(Writable.class), getConf()); inbuf = new DataInputBuffer(); outbuf = new DataOutputBuffer(); }