Java Code Examples for org.apache.flink.core.memory.MemorySegment#get()

The following examples show how to use org.apache.flink.core.memory.MemorySegment#get() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: MutableHashTable.java    From flink with Apache License 2.0 6 votes vote down vote up
final protected void buildBloomFilterForBucketsInPartition(int partNum, HashPartition<BT, PT> partition) {
	// Find all the buckets which belongs to this partition, and build bloom filter for each bucket(include its overflow buckets).
	final int bucketsPerSegment = this.bucketsPerSegmentMask + 1;

	int numSegs = this.buckets.length;
	// go over all segments that are part of the table
	for (int i = 0, bucket = 0; i < numSegs && bucket < numBuckets; i++) {
		final MemorySegment segment = this.buckets[i];
		// go over all buckets in the segment
		for (int k = 0; k < bucketsPerSegment && bucket < numBuckets; k++, bucket++) {
			final int bucketInSegmentOffset = k * HASH_BUCKET_SIZE;
			byte partitionNumber = segment.get(bucketInSegmentOffset + HEADER_PARTITION_OFFSET);
			if (partitionNumber == partNum) {
				byte status = segment.get(bucketInSegmentOffset + HEADER_STATUS_OFFSET);
				if (status == BUCKET_STATUS_IN_MEMORY) {
					buildBloomFilterForBucket(bucketInSegmentOffset, segment, partition);
				}
			}
		}
	}
}
 
Example 2
Source File: BinarySegmentUtils.java    From flink with Apache License 2.0 6 votes vote down vote up
public static void copyMultiSegmentsToBytes(
		MemorySegment[] segments,
		int offset,
		byte[] bytes,
		int bytesOffset,
		int numBytes) {
	int remainSize = numBytes;
	for (MemorySegment segment : segments) {
		int remain = segment.size() - offset;
		if (remain > 0) {
			int nCopy = Math.min(remain, remainSize);
			segment.get(offset, bytes, numBytes - remainSize + bytesOffset, nCopy);
			remainSize -= nCopy;
			// next new segment.
			offset = 0;
			if (remainSize == 0) {
				return;
			}
		} else {
			// remain is negative, let's advance to next segment
			// now the offset = offset - segmentSize (-remain)
			offset = -remain;
		}
	}
}
 
Example 3
Source File: CompressedBlockChannelWriter.java    From flink with Apache License 2.0 6 votes vote down vote up
@Override
public void writeBlock(MemorySegment block) throws IOException {
	if (copyCompress) {
		int offset = 0;
		int len = block.size();

		while (len > 0) {
			int copy = Math.min(len, buf.length - count);
			if (copy == 0) {
				flushBuffer();
			} else {
				block.get(offset, buf, count, copy);
				count += copy;
				offset += copy;
				len -= copy;
			}
		}
	} else {
		compressBuffer(block.wrap(0, block.size()), block.size());
	}

	boolean add = blockQueue.add(block);
	Preconditions.checkState(add); // LinkedBlockingQueue never add fail.
}
 
Example 4
Source File: SegmentsUtil.java    From flink with Apache License 2.0 6 votes vote down vote up
private static int getTwoByteSlowly(
		MemorySegment[] segments, int segSize, int segNum, int segOffset) {
	MemorySegment segment = segments[segNum];
	int ret = 0;
	for (int i = 0; i < 2; i++) {
		if (segOffset == segSize) {
			segment = segments[++segNum];
			segOffset = 0;
		}
		int unsignedByte = segment.get(segOffset) & 0xff;
		if (LITTLE_ENDIAN) {
			ret |= (unsignedByte << (i * 8));
		} else {
			ret |= (unsignedByte << ((1 - i) * 8));
		}
		segOffset++;
	}
	return ret;
}
 
Example 5
Source File: BinarySegmentUtils.java    From flink with Apache License 2.0 6 votes vote down vote up
private static long getLongSlowly(
	MemorySegment[] segments, int segSize, int segNum, int segOffset) {
	MemorySegment segment = segments[segNum];
	long ret = 0;
	for (int i = 0; i < 8; i++) {
		if (segOffset == segSize) {
			segment = segments[++segNum];
			segOffset = 0;
		}
		long unsignedByte = segment.get(segOffset) & 0xff;
		if (LITTLE_ENDIAN) {
			ret |= (unsignedByte << (i * 8));
		} else {
			ret |= (unsignedByte << ((7 - i) * 8));
		}
		segOffset++;
	}
	return ret;
}
 
Example 6
Source File: CompactingHashTable.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
@Override
public final void insert(T record) throws IOException {
	if (this.closed) {
		return;
	}

	final int hashCode = MathUtils.jenkinsHash(this.buildSideComparator.hash(record));
	final int posHashCode = hashCode % this.numBuckets;
	
	// get the bucket for the given hash code
	final int bucketArrayPos = posHashCode >>> this.bucketsPerSegmentBits;
	final int bucketInSegmentPos = (posHashCode & this.bucketsPerSegmentMask) << NUM_INTRA_BUCKET_BITS;
	final MemorySegment bucket = this.buckets[bucketArrayPos];
	
	// get the basic characteristics of the bucket
	final int partitionNumber = bucket.get(bucketInSegmentPos + HEADER_PARTITION_OFFSET);
	InMemoryPartition<T> partition = this.partitions.get(partitionNumber);
	
	long pointer = insertRecordIntoPartition(record, partition, false);
	insertBucketEntryFromStart(bucket, bucketInSegmentPos, hashCode, pointer, partitionNumber);
}
 
Example 7
Source File: MutableHashTable.java    From flink with Apache License 2.0 6 votes vote down vote up
/**
 * Move to next bucket, return true while move to a on heap bucket, return false while move to a spilled bucket
 * or there is no more bucket.
 */
private boolean moveToNextBucket() {
	scanCount++;
	if (scanCount > totalBucketNumber - 1) {
		return false;
	}
	// move to next bucket, update all the current bucket status with new bucket information.
	final int bucketArrayPos = scanCount >> this.bucketsPerSegmentBits;
	final int currentBucketInSegmentOffset = (scanCount & this.bucketsPerSegmentMask) << NUM_INTRA_BUCKET_BITS;
	MemorySegment currentBucket = this.buckets[bucketArrayPos];
	final int partitionNumber = currentBucket.get(currentBucketInSegmentOffset + HEADER_PARTITION_OFFSET);
	final HashPartition<BT, PT> p = this.partitionsBeingBuilt.get(partitionNumber);
	if (p.isInMemory()) {
		setBucket(currentBucket, p.overflowSegments, p, currentBucketInSegmentOffset);
		return true;
	} else {
		return false;
	}
}
 
Example 8
Source File: MutableHashTable.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
final protected void buildBloomFilterForBucketsInPartition(int partNum, HashPartition<BT, PT> partition) {
	// Find all the buckets which belongs to this partition, and build bloom filter for each bucket(include its overflow buckets).
	final int bucketsPerSegment = this.bucketsPerSegmentMask + 1;

	int numSegs = this.buckets.length;
	// go over all segments that are part of the table
	for (int i = 0, bucket = 0; i < numSegs && bucket < numBuckets; i++) {
		final MemorySegment segment = this.buckets[i];
		// go over all buckets in the segment
		for (int k = 0; k < bucketsPerSegment && bucket < numBuckets; k++, bucket++) {
			final int bucketInSegmentOffset = k * HASH_BUCKET_SIZE;
			byte partitionNumber = segment.get(bucketInSegmentOffset + HEADER_PARTITION_OFFSET);
			if (partitionNumber == partNum) {
				byte status = segment.get(bucketInSegmentOffset + HEADER_STATUS_OFFSET);
				if (status == BUCKET_STATUS_IN_MEMORY) {
					buildBloomFilterForBucket(bucketInSegmentOffset, segment, partition);
				}
			}
		}
	}
}
 
Example 9
Source File: MutableHashTable.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
protected final void insertIntoTable(final BT record, final int hashCode) throws IOException {
	final int posHashCode = hashCode % this.numBuckets;
	
	// get the bucket for the given hash code
	final int bucketArrayPos = posHashCode >> this.bucketsPerSegmentBits;
	final int bucketInSegmentPos = (posHashCode & this.bucketsPerSegmentMask) << NUM_INTRA_BUCKET_BITS;
	final MemorySegment bucket = this.buckets[bucketArrayPos];
	
	// get the basic characteristics of the bucket
	final int partitionNumber = bucket.get(bucketInSegmentPos + HEADER_PARTITION_OFFSET);
	
	// get the partition descriptor for the bucket
	if (partitionNumber < 0 || partitionNumber >= this.partitionsBeingBuilt.size()) {
		throw new RuntimeException("Error: Hash structures in Hash-Join are corrupt. Invalid partition number for bucket.");
	}
	final HashPartition<BT, PT> p = this.partitionsBeingBuilt.get(partitionNumber);
	
	// --------- Step 1: Get the partition for this pair and put the pair into the buffer ---------
	
	long pointer = p.insertIntoBuildBuffer(record);
	if (pointer != -1) {
		// record was inserted into an in-memory partition. a pointer must be inserted into the buckets
		insertBucketEntry(p, bucket, bucketInSegmentPos, hashCode, pointer, true);
	} else {
		byte status = bucket.get(bucketInSegmentPos + HEADER_STATUS_OFFSET);
		if (status == BUCKET_STATUS_IN_FILTER) {
			// While partition has been spilled, relocation bloom filter bits for current bucket,
			// and build bloom filter with hashcode.
			this.bloomFilter.setBitsLocation(bucket, bucketInSegmentPos + BUCKET_HEADER_LENGTH);
			this.bloomFilter.addHash(hashCode);
		}
	}
}
 
Example 10
Source File: BinarySegmentUtils.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * unset bit from segments.
 *
 * @param segments target segments.
 * @param baseOffset bits base offset.
 * @param index bit index from base offset.
 */
public static void bitUnSet(MemorySegment[] segments, int baseOffset, int index) {
	if (segments.length == 1) {
		MemorySegment segment = segments[0];
		int offset = baseOffset + byteIndex(index);
		byte current = segment.get(offset);
		current &= ~(1 << (index & BIT_BYTE_INDEX_MASK));
		segment.put(offset, current);
	} else {
		bitUnSetMultiSegments(segments, baseOffset, index);
	}
}
 
Example 11
Source File: MutableHashTable.java    From flink with Apache License 2.0 5 votes vote down vote up
protected final void insertIntoTable(final BT record, final int hashCode) throws IOException {
	final int posHashCode = hashCode % this.numBuckets;
	
	// get the bucket for the given hash code
	final int bucketArrayPos = posHashCode >> this.bucketsPerSegmentBits;
	final int bucketInSegmentPos = (posHashCode & this.bucketsPerSegmentMask) << NUM_INTRA_BUCKET_BITS;
	final MemorySegment bucket = this.buckets[bucketArrayPos];
	
	// get the basic characteristics of the bucket
	final int partitionNumber = bucket.get(bucketInSegmentPos + HEADER_PARTITION_OFFSET);
	
	// get the partition descriptor for the bucket
	if (partitionNumber < 0 || partitionNumber >= this.partitionsBeingBuilt.size()) {
		throw new RuntimeException("Error: Hash structures in Hash-Join are corrupt. Invalid partition number for bucket.");
	}
	final HashPartition<BT, PT> p = this.partitionsBeingBuilt.get(partitionNumber);
	
	// --------- Step 1: Get the partition for this pair and put the pair into the buffer ---------
	
	long pointer = p.insertIntoBuildBuffer(record);
	if (pointer != -1) {
		// record was inserted into an in-memory partition. a pointer must be inserted into the buckets
		insertBucketEntry(p, bucket, bucketInSegmentPos, hashCode, pointer, true);
	} else {
		byte status = bucket.get(bucketInSegmentPos + HEADER_STATUS_OFFSET);
		if (status == BUCKET_STATUS_IN_FILTER) {
			// While partition has been spilled, relocation bloom filter bits for current bucket,
			// and build bloom filter with hashcode.
			this.bloomFilter.setBitsLocation(bucket, bucketInSegmentPos + BUCKET_HEADER_LENGTH);
			this.bloomFilter.addHash(hashCode);
		}
	}
}
 
Example 12
Source File: BinaryString.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Compares two strings lexicographically.
 * Since UTF-8 uses groups of six bits, it is sometimes useful to use octal notation which
 * uses 3-bit groups. With a calculator which can convert between hexadecimal and octal it
 * can be easier to manually create or interpret UTF-8 compared with using binary.
 * So we just compare the binary.
 */
@Override
public int compareTo(@Nonnull BinaryString other) {
	if (javaObject != null && other.javaObject != null) {
		return javaObject.compareTo(other.javaObject);
	}

	ensureMaterialized();
	other.ensureMaterialized();
	if (segments.length == 1 && other.segments.length == 1) {

		int len = Math.min(sizeInBytes, other.sizeInBytes);
		MemorySegment seg1 = segments[0];
		MemorySegment seg2 = other.segments[0];

		for (int i = 0; i < len; i++) {
			int res = (seg1.get(offset + i) & 0xFF) - (seg2.get(other.offset + i) & 0xFF);
			if (res != 0) {
				return res;
			}
		}
		return sizeInBytes - other.sizeInBytes;
	}

	// if there are multi segments.
	return compareMultiSegments(other);
}
 
Example 13
Source File: IntNormalizedKeyComputer.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
public void swapKey(MemorySegment segI, int offsetI, MemorySegment segJ, int offsetJ) {

	int temp0 = segI.getInt(offsetI);
	segI.putInt(offsetI, segJ.getInt(offsetJ));
	segJ.putInt(offsetJ, temp0);

	byte temp1 = segI.get(offsetI + 4);
	segI.put(offsetI + 4, segJ.get(offsetJ + 4));
	segJ.put(offsetJ + 4, temp1);

}
 
Example 14
Source File: StringUtf8Utils.java    From flink with Apache License 2.0 5 votes vote down vote up
public static String decodeUTF8(MemorySegment input, int offset, int byteLen) {
	char[] chars = allocateReuseChars(byteLen);
	int len = decodeUTF8Strict(input, offset, byteLen, chars);
	if (len < 0) {
		byte[] bytes = allocateReuseBytes(byteLen);
		input.get(offset, bytes, 0, byteLen);
		return defaultDecodeUTF8(bytes, 0, byteLen);
	}
	return new String(chars, 0, len);
}
 
Example 15
Source File: CompactingHashTable.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
public T getMatchFor(PT probeSideRecord, T reuse) {
	if (closed) {
		return null;
	}
	final int searchHashCode = MathUtils.jenkinsHash(this.probeTypeComparator.hash(probeSideRecord));
	
	final int posHashCode = searchHashCode % numBuckets;
	
	// get the bucket for the given hash code
	MemorySegment bucket = buckets[posHashCode >> bucketsPerSegmentBits];
	int bucketInSegmentOffset = (posHashCode & bucketsPerSegmentMask) << NUM_INTRA_BUCKET_BITS;
	
	// get the basic characteristics of the bucket
	final int partitionNumber = bucket.get(bucketInSegmentOffset + HEADER_PARTITION_OFFSET);
	final InMemoryPartition<T> p = partitions.get(partitionNumber);
	final MemorySegment[] overflowSegments = p.overflowSegments;
	
	this.pairComparator.setReference(probeSideRecord);
	
	int countInSegment = bucket.getInt(bucketInSegmentOffset + HEADER_COUNT_OFFSET);
	int numInSegment = 0;
	int posInSegment = bucketInSegmentOffset + BUCKET_HEADER_LENGTH;

	// loop over all segments that are involved in the bucket (original bucket plus overflow buckets)
	while (true) {
		
		while (numInSegment < countInSegment) {
			
			final int thisCode = bucket.getInt(posInSegment);
			posInSegment += HASH_CODE_LEN;
				
			// check if the hash code matches
			if (thisCode == searchHashCode) {
				// get the pointer to the pair
				final int pointerOffset = bucketInSegmentOffset + BUCKET_POINTER_START_OFFSET + (numInSegment * POINTER_LEN);
				final long pointer = bucket.getLong(pointerOffset);
				numInSegment++;
				
				// deserialize the key to check whether it is really equal, or whether we had only a hash collision
				try {
					reuse = p.readRecordAt(pointer, reuse);
					
					if (this.pairComparator.equalToReference(reuse)) {
						this.partition = p;
						this.bucket = bucket;
						this.pointerOffsetInBucket = pointerOffset;
						return reuse;
					}
				}
				catch (IOException e) {
					throw new RuntimeException("Error deserializing record from the hashtable: " + e.getMessage(), e);
				}
			}
			else {
				numInSegment++;
			}
		}
		
		// this segment is done. check if there is another chained bucket
		final long forwardPointer = bucket.getLong(bucketInSegmentOffset + HEADER_FORWARD_OFFSET);
		if (forwardPointer == BUCKET_FORWARD_POINTER_NOT_SET) {
			return null;
		}
		
		final int overflowSegNum = (int) (forwardPointer >>> 32);
		bucket = overflowSegments[overflowSegNum];
		bucketInSegmentOffset = (int) forwardPointer;
		countInSegment = bucket.getInt(bucketInSegmentOffset + HEADER_COUNT_OFFSET);
		posInSegment = bucketInSegmentOffset + BUCKET_HEADER_LENGTH;
		numInSegment = 0;
	}
}
 
Example 16
Source File: CompactingHashTable.java    From flink with Apache License 2.0 4 votes vote down vote up
public T getMatchFor(PT probeSideRecord, T reuse) {
	if (closed) {
		return null;
	}
	final int searchHashCode = MathUtils.jenkinsHash(this.probeTypeComparator.hash(probeSideRecord));
	
	final int posHashCode = searchHashCode % numBuckets;
	
	// get the bucket for the given hash code
	MemorySegment bucket = buckets[posHashCode >> bucketsPerSegmentBits];
	int bucketInSegmentOffset = (posHashCode & bucketsPerSegmentMask) << NUM_INTRA_BUCKET_BITS;
	
	// get the basic characteristics of the bucket
	final int partitionNumber = bucket.get(bucketInSegmentOffset + HEADER_PARTITION_OFFSET);
	final InMemoryPartition<T> p = partitions.get(partitionNumber);
	final MemorySegment[] overflowSegments = p.overflowSegments;
	
	this.pairComparator.setReference(probeSideRecord);
	
	int countInSegment = bucket.getInt(bucketInSegmentOffset + HEADER_COUNT_OFFSET);
	int numInSegment = 0;
	int posInSegment = bucketInSegmentOffset + BUCKET_HEADER_LENGTH;

	// loop over all segments that are involved in the bucket (original bucket plus overflow buckets)
	while (true) {
		
		while (numInSegment < countInSegment) {
			
			final int thisCode = bucket.getInt(posInSegment);
			posInSegment += HASH_CODE_LEN;
				
			// check if the hash code matches
			if (thisCode == searchHashCode) {
				// get the pointer to the pair
				final int pointerOffset = bucketInSegmentOffset + BUCKET_POINTER_START_OFFSET + (numInSegment * POINTER_LEN);
				final long pointer = bucket.getLong(pointerOffset);
				numInSegment++;
				
				// deserialize the key to check whether it is really equal, or whether we had only a hash collision
				try {
					reuse = p.readRecordAt(pointer, reuse);
					
					if (this.pairComparator.equalToReference(reuse)) {
						this.partition = p;
						this.bucket = bucket;
						this.pointerOffsetInBucket = pointerOffset;
						return reuse;
					}
				}
				catch (IOException e) {
					throw new RuntimeException("Error deserializing record from the hashtable: " + e.getMessage(), e);
				}
			}
			else {
				numInSegment++;
			}
		}
		
		// this segment is done. check if there is another chained bucket
		final long forwardPointer = bucket.getLong(bucketInSegmentOffset + HEADER_FORWARD_OFFSET);
		if (forwardPointer == BUCKET_FORWARD_POINTER_NOT_SET) {
			return null;
		}
		
		final int overflowSegNum = (int) (forwardPointer >>> 32);
		bucket = overflowSegments[overflowSegNum];
		bucketInSegmentOffset = (int) forwardPointer;
		countInSegment = bucket.getInt(bucketInSegmentOffset + HEADER_COUNT_OFFSET);
		posInSegment = bucketInSegmentOffset + BUCKET_HEADER_LENGTH;
		numInSegment = 0;
	}
}
 
Example 17
Source File: CompactingHashTable.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
/**
 * Compacts (garbage collects) partition with copy-compact strategy using compaction partition
 * 
 * @param partitionNumber partition to compact
 * @throws IOException 
 */
private void compactPartition(final int partitionNumber) throws IOException {
	// do nothing if table was closed, parameter is invalid or no garbage exists
	if (this.closed || partitionNumber >= this.partitions.size() || this.partitions.get(partitionNumber).isCompacted()) {
		return;
	}
	// release all segments owned by compaction partition
	this.compactionMemory.clearAllMemory(availableMemory);
	this.compactionMemory.allocateSegments(1);
	this.compactionMemory.pushDownPages();
	T tempHolder = this.buildSideSerializer.createInstance();
	final int numPartitions = this.partitions.size();
	InMemoryPartition<T> partition = this.partitions.remove(partitionNumber);
	MemorySegment[] overflowSegments = partition.overflowSegments;
	long pointer;
	int pointerOffset;
	int bucketOffset;
	final int bucketsPerSegment = this.bucketsPerSegmentMask + 1;
	for (int i = 0, bucket = partitionNumber; i < this.buckets.length && bucket < this.numBuckets; i++) {
		MemorySegment segment = this.buckets[i];
		// go over all buckets in the segment belonging to the partition
		for (int k = bucket % bucketsPerSegment; k < bucketsPerSegment && bucket < this.numBuckets; k += numPartitions, bucket += numPartitions) {
			bucketOffset = k * HASH_BUCKET_SIZE;
			if((int)segment.get(bucketOffset + HEADER_PARTITION_OFFSET) != partitionNumber) {
				throw new IOException("Accessed wrong bucket! wanted: " + partitionNumber + " got: " + segment.get(bucketOffset + HEADER_PARTITION_OFFSET));
			}
			// loop over all segments that are involved in the bucket (original bucket plus overflow buckets)
			int countInSegment = segment.getInt(bucketOffset + HEADER_COUNT_OFFSET);
			int numInSegment = 0;
			pointerOffset = bucketOffset + BUCKET_POINTER_START_OFFSET;
			while (true) {
				while (numInSegment < countInSegment) {
					pointer = segment.getLong(pointerOffset);
					tempHolder = partition.readRecordAt(pointer, tempHolder);
					pointer = this.compactionMemory.appendRecord(tempHolder);
					segment.putLong(pointerOffset, pointer);
					pointerOffset += POINTER_LEN;
					numInSegment++;
				}
				// this segment is done. check if there is another chained bucket
				final long forwardPointer = segment.getLong(bucketOffset + HEADER_FORWARD_OFFSET);
				if (forwardPointer == BUCKET_FORWARD_POINTER_NOT_SET) {
					break;
				}
				final int overflowSegNum = (int) (forwardPointer >>> 32);
				segment = overflowSegments[overflowSegNum];
				bucketOffset = (int) forwardPointer;
				countInSegment = segment.getInt(bucketOffset + HEADER_COUNT_OFFSET);
				pointerOffset = bucketOffset + BUCKET_POINTER_START_OFFSET;
				numInSegment = 0;
			}
			segment = this.buckets[i];
		}
	}
	// swap partition with compaction partition
	this.compactionMemory.setPartitionNumber(partitionNumber);
	this.partitions.add(partitionNumber, compactionMemory);
	this.partitions.get(partitionNumber).overflowSegments = partition.overflowSegments;
	this.partitions.get(partitionNumber).numOverflowSegments = partition.numOverflowSegments;
	this.partitions.get(partitionNumber).nextOverflowBucket = partition.nextOverflowBucket;
	this.partitions.get(partitionNumber).setIsCompacted(true);
	//this.partitions.get(partitionNumber).pushDownPages();
	this.compactionMemory = partition;
	this.compactionMemory.resetRecordCounter();
	this.compactionMemory.setPartitionNumber(-1);
	this.compactionMemory.overflowSegments = null;
	this.compactionMemory.numOverflowSegments = 0;
	this.compactionMemory.nextOverflowBucket = 0;
	// try to allocate maximum segment count
	this.compactionMemory.clearAllMemory(this.availableMemory);
	int maxSegmentNumber = this.getMaxPartition();
	this.compactionMemory.allocateSegments(maxSegmentNumber);
	this.compactionMemory.resetRWViews();
	this.compactionMemory.pushDownPages();
}
 
Example 18
Source File: SegmentsUtil.java    From flink with Apache License 2.0 3 votes vote down vote up
/**
 * unset bit.
 *
 * @param segment target segment.
 * @param baseOffset bits base offset.
 * @param index bit index from base offset.
 */
public static void bitUnSet(MemorySegment segment, int baseOffset, int index) {
	int offset = baseOffset + byteIndex(index);
	byte current = segment.get(offset);
	current &= ~(1 << (index & BIT_BYTE_INDEX_MASK));
	segment.put(offset, current);
}
 
Example 19
Source File: SegmentsUtil.java    From flink with Apache License 2.0 3 votes vote down vote up
/**
 * unset bit.
 *
 * @param segment target segment.
 * @param baseOffset bits base offset.
 * @param index bit index from base offset.
 */
public static void bitUnSet(MemorySegment segment, int baseOffset, int index) {
	int offset = baseOffset + byteIndex(index);
	byte current = segment.get(offset);
	current &= ~(1 << (index & BIT_BYTE_INDEX_MASK));
	segment.put(offset, current);
}
 
Example 20
Source File: SegmentsUtil.java    From flink with Apache License 2.0 2 votes vote down vote up
/**
 * read bit.
 *
 * @param segment target segment.
 * @param baseOffset bits base offset.
 * @param index bit index from base offset.
 */
public static boolean bitGet(MemorySegment segment, int baseOffset, int index) {
	int offset = baseOffset + byteIndex(index);
	byte current = segment.get(offset);
	return (current & (1 << (index & BIT_BYTE_INDEX_MASK))) != 0;
}