Java Code Examples for org.apache.flink.api.common.typeutils.TypeSerializer#getLength()

The following examples show how to use org.apache.flink.api.common.typeutils.TypeSerializer#getLength() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: TupleSerializerBase.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
@Override
public int getLength() {
	if (length == -2) {
		int sum = 0;
		for (TypeSerializer<Object> serializer : fieldSerializers) {
			if (serializer.getLength() > 0) {
				sum += serializer.getLength();
			} else {
				length = -1;
				return length;
			}
		}
		length = sum;
	}
	return length;
}
 
Example 2
Source File: TupleSerializerBase.java    From flink with Apache License 2.0 6 votes vote down vote up
@Override
public int getLength() {
	if (length == -2) {
		int sum = 0;
		for (TypeSerializer<Object> serializer : fieldSerializers) {
			if (serializer.getLength() > 0) {
				sum += serializer.getLength();
			} else {
				length = -1;
				return length;
			}
		}
		length = sum;
	}
	return length;
}
 
Example 3
Source File: RocksDbTtlCompactFiltersManager.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
public void configCompactFilter(
		@Nonnull StateDescriptor<?, ?> stateDesc,
		TypeSerializer<?> stateSerializer) {
	StateTtlConfig ttlConfig = stateDesc.getTtlConfig();
	if (ttlConfig.isEnabled() && ttlConfig.getCleanupStrategies().inRocksdbCompactFilter()) {
		if (!enableTtlCompactionFilter) {
			LOG.warn("Cannot configure RocksDB TTL compaction filter for state <{}>: " +
				"feature is disabled for the state backend.", stateDesc.getName());
			return;
		}
		FlinkCompactionFilterFactory compactionFilterFactory = compactionFilterFactories.get(stateDesc.getName());
		Preconditions.checkNotNull(compactionFilterFactory);
		long ttl = ttlConfig.getTtl().toMilliseconds();

		StateTtlConfig.RocksdbCompactFilterCleanupStrategy rocksdbCompactFilterCleanupStrategy =
			ttlConfig.getCleanupStrategies().getRocksdbCompactFilterCleanupStrategy();
		Preconditions.checkNotNull(rocksdbCompactFilterCleanupStrategy);
		long queryTimeAfterNumEntries =
			rocksdbCompactFilterCleanupStrategy.getQueryTimeAfterNumEntries();

		FlinkCompactionFilter.Config config;
		if (stateDesc instanceof ListStateDescriptor) {
			TypeSerializer<?> elemSerializer = ((ListSerializer<?>) stateSerializer).getElementSerializer();
			int len = elemSerializer.getLength();
			if (len > 0) {
				config = FlinkCompactionFilter.Config.createForFixedElementList(
					ttl, queryTimeAfterNumEntries, len + 1); // plus one byte for list element delimiter
			} else {
				config = FlinkCompactionFilter.Config.createForList(
					ttl, queryTimeAfterNumEntries,
					new ListElementFilterFactory<>(elemSerializer.duplicate()));
			}
		} else if (stateDesc instanceof MapStateDescriptor) {
			config = FlinkCompactionFilter.Config.createForMap(ttl, queryTimeAfterNumEntries);
		} else {
			config = FlinkCompactionFilter.Config.createForValue(ttl, queryTimeAfterNumEntries);
		}
		compactionFilterFactory.configure(config);
	}
}
 
Example 4
Source File: RocksDbTtlCompactFiltersManager.java    From flink with Apache License 2.0 5 votes vote down vote up
public void configCompactFilter(
		@Nonnull StateDescriptor<?, ?> stateDesc,
		TypeSerializer<?> stateSerializer) {
	StateTtlConfig ttlConfig = stateDesc.getTtlConfig();
	if (ttlConfig.isEnabled() && ttlConfig.getCleanupStrategies().inRocksdbCompactFilter()) {
		FlinkCompactionFilterFactory compactionFilterFactory = compactionFilterFactories.get(stateDesc.getName());
		Preconditions.checkNotNull(compactionFilterFactory);
		long ttl = ttlConfig.getTtl().toMilliseconds();

		StateTtlConfig.RocksdbCompactFilterCleanupStrategy rocksdbCompactFilterCleanupStrategy =
			ttlConfig.getCleanupStrategies().getRocksdbCompactFilterCleanupStrategy();
		Preconditions.checkNotNull(rocksdbCompactFilterCleanupStrategy);
		long queryTimeAfterNumEntries =
			rocksdbCompactFilterCleanupStrategy.getQueryTimeAfterNumEntries();

		FlinkCompactionFilter.Config config;
		if (stateDesc instanceof ListStateDescriptor) {
			TypeSerializer<?> elemSerializer = ((ListSerializer<?>) stateSerializer).getElementSerializer();
			int len = elemSerializer.getLength();
			if (len > 0) {
				config = FlinkCompactionFilter.Config.createForFixedElementList(
					ttl, queryTimeAfterNumEntries, len + 1); // plus one byte for list element delimiter
			} else {
				config = FlinkCompactionFilter.Config.createForList(
					ttl, queryTimeAfterNumEntries,
					new ListElementFilterFactory<>(elemSerializer.duplicate()));
			}
		} else if (stateDesc instanceof MapStateDescriptor) {
			config = FlinkCompactionFilter.Config.createForMap(ttl, queryTimeAfterNumEntries);
		} else {
			config = FlinkCompactionFilter.Config.createForValue(ttl, queryTimeAfterNumEntries);
		}
		compactionFilterFactory.configure(config);
	}
}
 
Example 5
Source File: DefaultInMemorySorterFactory.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
DefaultInMemorySorterFactory(
		@Nonnull TypeSerializerFactory<T> typeSerializerFactory,
		@Nonnull TypeComparator<T> typeComparator,
		int thresholdForInPlaceSorting) {
	this.typeSerializerFactory = typeSerializerFactory;
	this.typeComparator = typeComparator;

	TypeSerializer<T> typeSerializer = typeSerializerFactory.getSerializer();

	this.useFixedLengthRecordSorter = typeComparator.supportsSerializationWithKeyNormalization() &&
		typeSerializer.getLength() > 0 && typeSerializer.getLength() <= thresholdForInPlaceSorting;
}
 
Example 6
Source File: DefaultInMemorySorterFactory.java    From flink with Apache License 2.0 5 votes vote down vote up
DefaultInMemorySorterFactory(
		@Nonnull TypeSerializerFactory<T> typeSerializerFactory,
		@Nonnull TypeComparator<T> typeComparator,
		int thresholdForInPlaceSorting) {
	this.typeSerializerFactory = typeSerializerFactory;
	this.typeComparator = typeComparator;

	TypeSerializer<T> typeSerializer = typeSerializerFactory.getSerializer();

	this.useFixedLengthRecordSorter = typeComparator.supportsSerializationWithKeyNormalization() &&
		typeSerializer.getLength() > 0 && typeSerializer.getLength() <= thresholdForInPlaceSorting;
}
 
Example 7
Source File: TimerSerializer.java    From flink with Apache License 2.0 5 votes vote down vote up
private static int computeTotalByteLength(
	TypeSerializer<?> keySerializer,
	TypeSerializer<?> namespaceSerializer) {
	if (keySerializer.getLength() >= 0 && namespaceSerializer.getLength() >= 0) {
		// timestamp + key + namespace
		return Long.BYTES + keySerializer.getLength() + namespaceSerializer.getLength();
	} else {
		return -1;
	}
}
 
Example 8
Source File: RocksDbTtlCompactFiltersManager.java    From flink with Apache License 2.0 5 votes vote down vote up
public void configCompactFilter(
		@Nonnull StateDescriptor<?, ?> stateDesc,
		TypeSerializer<?> stateSerializer) {
	StateTtlConfig ttlConfig = stateDesc.getTtlConfig();
	if (ttlConfig.isEnabled() && ttlConfig.getCleanupStrategies().inRocksdbCompactFilter()) {
		if (!enableTtlCompactionFilter) {
			LOG.warn("Cannot configure RocksDB TTL compaction filter for state <{}>: " +
				"feature is disabled for the state backend.", stateDesc.getName());
			return;
		}
		FlinkCompactionFilterFactory compactionFilterFactory = compactionFilterFactories.get(stateDesc.getName());
		Preconditions.checkNotNull(compactionFilterFactory);
		long ttl = ttlConfig.getTtl().toMilliseconds();

		StateTtlConfig.RocksdbCompactFilterCleanupStrategy rocksdbCompactFilterCleanupStrategy =
			ttlConfig.getCleanupStrategies().getRocksdbCompactFilterCleanupStrategy();
		Preconditions.checkNotNull(rocksdbCompactFilterCleanupStrategy);
		long queryTimeAfterNumEntries =
			rocksdbCompactFilterCleanupStrategy.getQueryTimeAfterNumEntries();

		FlinkCompactionFilter.Config config;
		if (stateDesc instanceof ListStateDescriptor) {
			TypeSerializer<?> elemSerializer = ((ListSerializer<?>) stateSerializer).getElementSerializer();
			int len = elemSerializer.getLength();
			if (len > 0) {
				config = FlinkCompactionFilter.Config.createForFixedElementList(
					ttl, queryTimeAfterNumEntries, len + 1); // plus one byte for list element delimiter
			} else {
				config = FlinkCompactionFilter.Config.createForList(
					ttl, queryTimeAfterNumEntries,
					new ListElementFilterFactory<>(elemSerializer.duplicate()));
			}
		} else if (stateDesc instanceof MapStateDescriptor) {
			config = FlinkCompactionFilter.Config.createForMap(ttl, queryTimeAfterNumEntries);
		} else {
			config = FlinkCompactionFilter.Config.createForValue(ttl, queryTimeAfterNumEntries);
		}
		compactionFilterFactory.configure(config);
	}
}
 
Example 9
Source File: TimerSerializer.java    From flink with Apache License 2.0 5 votes vote down vote up
private static int computeTotalByteLength(
	TypeSerializer<?> keySerializer,
	TypeSerializer<?> namespaceSerializer) {
	if (keySerializer.getLength() >= 0 && namespaceSerializer.getLength() >= 0) {
		// timestamp + key + namespace
		return Long.BYTES + keySerializer.getLength() + namespaceSerializer.getLength();
	} else {
		return -1;
	}
}
 
Example 10
Source File: DefaultInMemorySorterFactory.java    From flink with Apache License 2.0 5 votes vote down vote up
DefaultInMemorySorterFactory(
		@Nonnull TypeSerializerFactory<T> typeSerializerFactory,
		@Nonnull TypeComparator<T> typeComparator,
		int thresholdForInPlaceSorting) {
	this.typeSerializerFactory = typeSerializerFactory;
	this.typeComparator = typeComparator;

	TypeSerializer<T> typeSerializer = typeSerializerFactory.getSerializer();

	this.useFixedLengthRecordSorter = typeComparator.supportsSerializationWithKeyNormalization() &&
		typeSerializer.getLength() > 0 && typeSerializer.getLength() <= thresholdForInPlaceSorting;
}
 
Example 11
Source File: RocksDBKeySerializationUtils.java    From flink with Apache License 2.0 4 votes vote down vote up
public static boolean isSerializerTypeVariableSized(@Nonnull TypeSerializer<?> serializer) {
	return serializer.getLength() < 0;
}
 
Example 12
Source File: RocksDBUtils.java    From bravo with Apache License 2.0 4 votes vote down vote up
public static boolean isAmbiguousKeyPossible(TypeSerializer<?> keySerializer,
		TypeSerializer<?> namespaceSerializer) {
	return (keySerializer.getLength() < 0) && (namespaceSerializer.getLength() < 0);
}
 
Example 13
Source File: FixedLengthRecordSorter.java    From flink with Apache License 2.0 4 votes vote down vote up
public FixedLengthRecordSorter(TypeSerializer<T> serializer, TypeComparator<T> comparator, 
		List<MemorySegment> memory)
{
	if (serializer == null || comparator == null || memory == null) {
		throw new NullPointerException();
	}
	
	this.serializer = serializer;
	this.comparator = comparator;
	this.useNormKeyUninverted = !comparator.invertNormalizedKey();
	
	// check the size of the first buffer and record it. all further buffers must have the same size.
	// the size must also be a power of 2
	this.totalNumBuffers = memory.size();
	if (this.totalNumBuffers < MIN_REQUIRED_BUFFERS) {
		throw new IllegalArgumentException("Normalized-Key sorter requires at least " + MIN_REQUIRED_BUFFERS + " memory buffers.");
	}
	this.segmentSize = memory.get(0).size();
	this.recordSize = serializer.getLength();
	this.numKeyBytes = this.comparator.getNormalizeKeyLen();
	
	// check that the serializer and comparator allow our operations
	if (this.recordSize <= 0) {
		throw new IllegalArgumentException("This sorter works only for fixed-length data types.");
	} else if (this.recordSize > this.segmentSize) {
		throw new IllegalArgumentException("This sorter works only for record lengths below the memory segment size.");
	} else if (!comparator.supportsSerializationWithKeyNormalization()) {
		throw new IllegalArgumentException("This sorter requires a comparator that supports serialization with key normalization.");
	}
	
	// compute the entry size and limits
	this.recordsPerSegment = segmentSize / this.recordSize;
	this.lastEntryOffset = (this.recordsPerSegment - 1) * this.recordSize;
	this.swapBuffer = new byte[this.recordSize];
	
	this.freeMemory = new ArrayList<MemorySegment>(memory);
	
	// create the buffer collections
	this.sortBuffer = new ArrayList<MemorySegment>(16);
	this.outView = new SingleSegmentOutputView(this.segmentSize);
	this.inView = new SingleSegmentInputView(this.lastEntryOffset + this.recordSize);
	this.currentSortBufferSegment = nextMemorySegment();
	this.sortBuffer.add(this.currentSortBufferSegment);
	this.outView.set(this.currentSortBufferSegment);
	
	this.recordInstance = this.serializer.createInstance();
}
 
Example 14
Source File: MutableHashTable.java    From flink with Apache License 2.0 4 votes vote down vote up
public MutableHashTable(TypeSerializer<BT> buildSideSerializer, TypeSerializer<PT> probeSideSerializer,
		TypeComparator<BT> buildSideComparator, TypeComparator<PT> probeSideComparator,
		TypePairComparator<PT, BT> comparator, List<MemorySegment> memorySegments,
		IOManager ioManager, int avgRecordLen, boolean useBloomFilters)
{
	// some sanity checks first
	if (memorySegments == null) {
		throw new NullPointerException();
	}
	if (memorySegments.size() < MIN_NUM_MEMORY_SEGMENTS) {
		throw new IllegalArgumentException("Too few memory segments provided. Hash Join needs at least " + 
			MIN_NUM_MEMORY_SEGMENTS + " memory segments.");
	}
	
	// assign the members
	this.buildSideSerializer = buildSideSerializer;
	this.probeSideSerializer = probeSideSerializer;
	this.buildSideComparator = buildSideComparator;
	this.probeSideComparator = probeSideComparator;
	this.recordComparator = comparator;
	this.availableMemory = memorySegments;
	this.ioManager = ioManager;
	this.useBloomFilters = useBloomFilters;
	
	this.avgRecordLen = avgRecordLen > 0 ? avgRecordLen : 
			buildSideSerializer.getLength() == -1 ? DEFAULT_RECORD_LEN : buildSideSerializer.getLength();
	
	// check the size of the first buffer and record it. all further buffers must have the same size.
	// the size must also be a power of 2
	this.totalNumBuffers = memorySegments.size();
	this.segmentSize = memorySegments.get(0).size();
	if ( (this.segmentSize & this.segmentSize - 1) != 0) {
		throw new IllegalArgumentException("Hash Table requires buffers whose size is a power of 2.");
	}
	int bucketsPerSegment = this.segmentSize >> NUM_INTRA_BUCKET_BITS;
	if (bucketsPerSegment == 0) {
		throw new IllegalArgumentException("Hash Table requires buffers of at least " + HASH_BUCKET_SIZE + " bytes.");
	}
	this.bucketsPerSegmentMask = bucketsPerSegment - 1;
	this.bucketsPerSegmentBits = MathUtils.log2strict(bucketsPerSegment);
	
	// take away the write behind buffers
	this.writeBehindBuffers = new LinkedBlockingQueue<MemorySegment>();
	this.numWriteBehindBuffers = getNumWriteBehindBuffers(memorySegments.size());
	
	this.partitionsBeingBuilt = new ArrayList<HashPartition<BT, PT>>();
	this.partitionsPending = new ArrayList<HashPartition<BT, PT>>();
	
	// because we allow to open and close multiple times, the state is initially closed
	this.closed.set(true);
}
 
Example 15
Source File: CompactingHashTable.java    From flink with Apache License 2.0 4 votes vote down vote up
public CompactingHashTable(TypeSerializer<T> buildSideSerializer,
							TypeComparator<T> buildSideComparator,
							List<MemorySegment> memorySegments,
							int avgRecordLen) {
	
	super(buildSideSerializer, buildSideComparator);
	
	// some sanity checks first
	if (memorySegments == null) {
		throw new NullPointerException();
	}
	if (memorySegments.size() < MIN_NUM_MEMORY_SEGMENTS) {
		throw new IllegalArgumentException("Too few memory segments provided. Hash Table needs at least " + 
			MIN_NUM_MEMORY_SEGMENTS + " memory segments.");
	}
	
	this.availableMemory = (memorySegments instanceof ArrayList) ? 
			(ArrayList<MemorySegment>) memorySegments :
			new ArrayList<MemorySegment>(memorySegments);

	
	this.avgRecordLen = buildSideSerializer.getLength() > 0 ? buildSideSerializer.getLength() : avgRecordLen;
	
	// check the size of the first buffer and record it. all further buffers must have the same size.
	// the size must also be a power of 2
	this.segmentSize = memorySegments.get(0).size();
	if ( (this.segmentSize & this.segmentSize - 1) != 0) {
		throw new IllegalArgumentException("Hash Table requires buffers whose size is a power of 2.");
	}
	
	this.pageSizeInBits = MathUtils.log2strict(this.segmentSize);
	
	int bucketsPerSegment = this.segmentSize >> NUM_INTRA_BUCKET_BITS;
	if (bucketsPerSegment == 0) {
		throw new IllegalArgumentException("Hash Table requires buffers of at least " + HASH_BUCKET_SIZE + " bytes.");
	}
	this.bucketsPerSegmentMask = bucketsPerSegment - 1;
	this.bucketsPerSegmentBits = MathUtils.log2strict(bucketsPerSegment);
	
	this.partitions = new ArrayList<InMemoryPartition<T>>();
	
	// so far no partition has any MemorySegments
}
 
Example 16
Source File: RocksDBKeySerializationUtils.java    From flink with Apache License 2.0 4 votes vote down vote up
public static boolean isSerializerTypeVariableSized(@Nonnull TypeSerializer<?> serializer) {
	return serializer.getLength() < 0;
}
 
Example 17
Source File: FixedLengthRecordSorter.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
public FixedLengthRecordSorter(TypeSerializer<T> serializer, TypeComparator<T> comparator, 
		List<MemorySegment> memory)
{
	if (serializer == null || comparator == null || memory == null) {
		throw new NullPointerException();
	}
	
	this.serializer = serializer;
	this.comparator = comparator;
	this.useNormKeyUninverted = !comparator.invertNormalizedKey();
	
	// check the size of the first buffer and record it. all further buffers must have the same size.
	// the size must also be a power of 2
	this.totalNumBuffers = memory.size();
	if (this.totalNumBuffers < MIN_REQUIRED_BUFFERS) {
		throw new IllegalArgumentException("Normalized-Key sorter requires at least " + MIN_REQUIRED_BUFFERS + " memory buffers.");
	}
	this.segmentSize = memory.get(0).size();
	this.recordSize = serializer.getLength();
	this.numKeyBytes = this.comparator.getNormalizeKeyLen();
	
	// check that the serializer and comparator allow our operations
	if (this.recordSize <= 0) {
		throw new IllegalArgumentException("This sorter works only for fixed-length data types.");
	} else if (this.recordSize > this.segmentSize) {
		throw new IllegalArgumentException("This sorter works only for record lengths below the memory segment size.");
	} else if (!comparator.supportsSerializationWithKeyNormalization()) {
		throw new IllegalArgumentException("This sorter requires a comparator that supports serialization with key normalization.");
	}
	
	// compute the entry size and limits
	this.recordsPerSegment = segmentSize / this.recordSize;
	this.lastEntryOffset = (this.recordsPerSegment - 1) * this.recordSize;
	this.swapBuffer = new byte[this.recordSize];
	
	this.freeMemory = new ArrayList<MemorySegment>(memory);
	
	// create the buffer collections
	this.sortBuffer = new ArrayList<MemorySegment>(16);
	this.outView = new SingleSegmentOutputView(this.segmentSize);
	this.inView = new SingleSegmentInputView(this.lastEntryOffset + this.recordSize);
	this.currentSortBufferSegment = nextMemorySegment();
	this.sortBuffer.add(this.currentSortBufferSegment);
	this.outView.set(this.currentSortBufferSegment);
	
	this.recordInstance = this.serializer.createInstance();
}
 
Example 18
Source File: MutableHashTable.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
public MutableHashTable(TypeSerializer<BT> buildSideSerializer, TypeSerializer<PT> probeSideSerializer,
		TypeComparator<BT> buildSideComparator, TypeComparator<PT> probeSideComparator,
		TypePairComparator<PT, BT> comparator, List<MemorySegment> memorySegments,
		IOManager ioManager, int avgRecordLen, boolean useBloomFilters)
{
	// some sanity checks first
	if (memorySegments == null) {
		throw new NullPointerException();
	}
	if (memorySegments.size() < MIN_NUM_MEMORY_SEGMENTS) {
		throw new IllegalArgumentException("Too few memory segments provided. Hash Join needs at least " + 
			MIN_NUM_MEMORY_SEGMENTS + " memory segments.");
	}
	
	// assign the members
	this.buildSideSerializer = buildSideSerializer;
	this.probeSideSerializer = probeSideSerializer;
	this.buildSideComparator = buildSideComparator;
	this.probeSideComparator = probeSideComparator;
	this.recordComparator = comparator;
	this.availableMemory = memorySegments;
	this.ioManager = ioManager;
	this.useBloomFilters = useBloomFilters;
	
	this.avgRecordLen = avgRecordLen > 0 ? avgRecordLen : 
			buildSideSerializer.getLength() == -1 ? DEFAULT_RECORD_LEN : buildSideSerializer.getLength();
	
	// check the size of the first buffer and record it. all further buffers must have the same size.
	// the size must also be a power of 2
	this.totalNumBuffers = memorySegments.size();
	this.segmentSize = memorySegments.get(0).size();
	if ( (this.segmentSize & this.segmentSize - 1) != 0) {
		throw new IllegalArgumentException("Hash Table requires buffers whose size is a power of 2.");
	}
	int bucketsPerSegment = this.segmentSize >> NUM_INTRA_BUCKET_BITS;
	if (bucketsPerSegment == 0) {
		throw new IllegalArgumentException("Hash Table requires buffers of at least " + HASH_BUCKET_SIZE + " bytes.");
	}
	this.bucketsPerSegmentMask = bucketsPerSegment - 1;
	this.bucketsPerSegmentBits = MathUtils.log2strict(bucketsPerSegment);
	
	// take away the write behind buffers
	this.writeBehindBuffers = new LinkedBlockingQueue<MemorySegment>();
	this.numWriteBehindBuffers = getNumWriteBehindBuffers(memorySegments.size());
	
	this.partitionsBeingBuilt = new ArrayList<HashPartition<BT, PT>>();
	this.partitionsPending = new ArrayList<HashPartition<BT, PT>>();
	
	// because we allow to open and close multiple times, the state is initially closed
	this.closed.set(true);
}
 
Example 19
Source File: CompactingHashTable.java    From flink with Apache License 2.0 4 votes vote down vote up
public CompactingHashTable(TypeSerializer<T> buildSideSerializer,
							TypeComparator<T> buildSideComparator,
							List<MemorySegment> memorySegments,
							int avgRecordLen) {
	
	super(buildSideSerializer, buildSideComparator);
	
	// some sanity checks first
	if (memorySegments == null) {
		throw new NullPointerException();
	}
	if (memorySegments.size() < MIN_NUM_MEMORY_SEGMENTS) {
		throw new IllegalArgumentException("Too few memory segments provided. Hash Table needs at least " + 
			MIN_NUM_MEMORY_SEGMENTS + " memory segments.");
	}
	
	this.availableMemory = (memorySegments instanceof ArrayList) ? 
			(ArrayList<MemorySegment>) memorySegments :
			new ArrayList<MemorySegment>(memorySegments);

	
	this.avgRecordLen = buildSideSerializer.getLength() > 0 ? buildSideSerializer.getLength() : avgRecordLen;
	
	// check the size of the first buffer and record it. all further buffers must have the same size.
	// the size must also be a power of 2
	this.segmentSize = memorySegments.get(0).size();
	if ( (this.segmentSize & this.segmentSize - 1) != 0) {
		throw new IllegalArgumentException("Hash Table requires buffers whose size is a power of 2.");
	}
	
	this.pageSizeInBits = MathUtils.log2strict(this.segmentSize);
	
	int bucketsPerSegment = this.segmentSize >> NUM_INTRA_BUCKET_BITS;
	if (bucketsPerSegment == 0) {
		throw new IllegalArgumentException("Hash Table requires buffers of at least " + HASH_BUCKET_SIZE + " bytes.");
	}
	this.bucketsPerSegmentMask = bucketsPerSegment - 1;
	this.bucketsPerSegmentBits = MathUtils.log2strict(bucketsPerSegment);
	
	this.partitions = new ArrayList<InMemoryPartition<T>>();
	
	// so far no partition has any MemorySegments
}
 
Example 20
Source File: RocksDBKeySerializationUtils.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
public static boolean isSerializerTypeVariableSized(@Nonnull TypeSerializer<?> serializer) {
	return serializer.getLength() < 0;
}