Java Code Examples for org.apache.flink.util.ExceptionUtils#rethrowIOException()

The following examples show how to use org.apache.flink.util.ExceptionUtils#rethrowIOException() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: HadoopRecoverableFsDataOutputStream.java    From flink with Apache License 2.0 6 votes vote down vote up
private static boolean truncate(final FileSystem hadoopFs, final Path file, final long length) throws IOException {
	if (truncateHandle != null) {
		try {
			return (Boolean) truncateHandle.invoke(hadoopFs, file, length);
		}
		catch (InvocationTargetException e) {
			ExceptionUtils.rethrowIOException(e.getTargetException());
		}
		catch (Throwable t) {
			throw new IOException(
					"Truncation of file failed because of access/linking problems with Hadoop's truncate call. " +
							"This is most likely a dependency conflict or class loading problem.");
		}
	}
	else {
		throw new IllegalStateException("Truncation handle has not been initialized");
	}
	return false;
}
 
Example 2
Source File: HadoopRecoverableFsDataOutputStream.java    From Flink-CEPplus with Apache License 2.0 6 votes vote down vote up
private static boolean truncate(final FileSystem hadoopFs, final Path file, final long length) throws IOException {
	if (truncateHandle != null) {
		try {
			return (Boolean) truncateHandle.invoke(hadoopFs, file, length);
		}
		catch (InvocationTargetException e) {
			ExceptionUtils.rethrowIOException(e.getTargetException());
		}
		catch (Throwable t) {
			throw new IOException(
					"Truncation of file failed because of access/linking problems with Hadoop's truncate call. " +
							"This is most likely a dependency conflict or class loading problem.");
		}
	}
	else {
		throw new IllegalStateException("Truncation handle has not been initialized");
	}
	return false;
}
 
Example 3
Source File: HadoopRecoverableFsDataOutputStream.java    From flink with Apache License 2.0 6 votes vote down vote up
private static boolean truncate(final FileSystem hadoopFs, final Path file, final long length) throws IOException {
	if (!HadoopUtils.isMinHadoopVersion(2, 7)) {
		throw new IllegalStateException("Truncation is not available in hadoop version < 2.7 , You are on Hadoop " + VersionInfo.getVersion());
	}

	if (truncateHandle != null) {
		try {
			return (Boolean) truncateHandle.invoke(hadoopFs, file, length);
		}
		catch (InvocationTargetException e) {
			ExceptionUtils.rethrowIOException(e.getTargetException());
		}
		catch (Throwable t) {
			throw new IOException(
					"Truncation of file failed because of access/linking problems with Hadoop's truncate call. " +
							"This is most likely a dependency conflict or class loading problem.");
		}
	}
	else {
		throw new IllegalStateException("Truncation handle has not been initialized");
	}
	return false;
}
 
Example 4
Source File: AsynchronousBufferFileWriter.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Writes the given block asynchronously.
 *
 * @param buffer
 * 		the buffer to be written (will be recycled when done)
 *
 * @throws IOException
 * 		thrown if adding the write operation fails
 */
@Override
public void writeBlock(Buffer buffer) throws IOException {
	try {
		// if successfully added, the buffer will be recycled after the write operation
		addRequest(new BufferWriteRequest(this, buffer));
	} catch (Throwable e) {
		// if not added, we need to recycle here
		buffer.recycleBuffer();
		ExceptionUtils.rethrowIOException(e);
	}

}
 
Example 5
Source File: NetworkEnvironment.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
@VisibleForTesting
public void setupInputGate(SingleInputGate gate) throws IOException {
	BufferPool bufferPool = null;
	int maxNumberOfMemorySegments;
	try {
		if (enableCreditBased) {
			maxNumberOfMemorySegments = gate.getConsumedPartitionType().isBounded() ?
				extraNetworkBuffersPerGate : Integer.MAX_VALUE;

			// assign exclusive buffers to input channels directly and use the rest for floating buffers
			gate.assignExclusiveSegments(networkBufferPool, networkBuffersPerChannel);
			bufferPool = networkBufferPool.createBufferPool(0, maxNumberOfMemorySegments);
		} else {
			maxNumberOfMemorySegments = gate.getConsumedPartitionType().isBounded() ?
				gate.getNumberOfInputChannels() * networkBuffersPerChannel +
					extraNetworkBuffersPerGate : Integer.MAX_VALUE;

			bufferPool = networkBufferPool.createBufferPool(gate.getNumberOfInputChannels(),
				maxNumberOfMemorySegments);
		}
		gate.setBufferPool(bufferPool);
	} catch (Throwable t) {
		if (bufferPool != null) {
			bufferPool.lazyDestroy();
		}

		ExceptionUtils.rethrowIOException(t);
	}
}
 
Example 6
Source File: NettyMessage.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
@Override
ByteBuf write(ByteBufAllocator allocator) throws IOException {
	// receiver ID (16), sequence number (4), backlog (4), isBuffer (1), buffer size (4)
	final int messageHeaderLength = 16 + 4 + 4 + 1 + 4;

	ByteBuf headerBuf = null;
	try {
		if (buffer instanceof Buffer) {
			// in order to forward the buffer to netty, it needs an allocator set
			((Buffer) buffer).setAllocator(allocator);
		}

		// only allocate header buffer - we will combine it with the data buffer below
		headerBuf = allocateBuffer(allocator, ID, messageHeaderLength, buffer.readableBytes(), false);

		receiverId.writeTo(headerBuf);
		headerBuf.writeInt(sequenceNumber);
		headerBuf.writeInt(backlog);
		headerBuf.writeBoolean(isBuffer);
		headerBuf.writeInt(buffer.readableBytes());

		CompositeByteBuf composityBuf = allocator.compositeDirectBuffer();
		composityBuf.addComponent(headerBuf);
		composityBuf.addComponent(buffer);
		// update writer index since we have data written to the components:
		composityBuf.writerIndex(headerBuf.writerIndex() + buffer.writerIndex());
		return composityBuf;
	}
	catch (Throwable t) {
		if (headerBuf != null) {
			headerBuf.release();
		}
		buffer.release();

		ExceptionUtils.rethrowIOException(t);
		return null; // silence the compiler
	}
}
 
Example 7
Source File: AsynchronousBufferFileWriter.java    From Flink-CEPplus with Apache License 2.0 5 votes vote down vote up
/**
 * Writes the given block asynchronously.
 *
 * @param buffer
 * 		the buffer to be written (will be recycled when done)
 *
 * @throws IOException
 * 		thrown if adding the write operation fails
 */
@Override
public void writeBlock(Buffer buffer) throws IOException {
	try {
		// if successfully added, the buffer will be recycled after the write operation
		addRequest(new BufferWriteRequest(this, buffer));
	} catch (Throwable e) {
		// if not added, we need to recycle here
		buffer.recycleBuffer();
		ExceptionUtils.rethrowIOException(e);
	}

}
 
Example 8
Source File: NettyMessage.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
ByteBuf write(ByteBufAllocator allocator) throws IOException {
	// receiver ID (16), sequence number (4), backlog (4), isBuffer (1), buffer size (4)
	final int messageHeaderLength = 16 + 4 + 4 + 1 + 4;

	ByteBuf headerBuf = null;
	try {
		if (buffer instanceof Buffer) {
			// in order to forward the buffer to netty, it needs an allocator set
			((Buffer) buffer).setAllocator(allocator);
		}

		// only allocate header buffer - we will combine it with the data buffer below
		headerBuf = allocateBuffer(allocator, ID, messageHeaderLength, buffer.readableBytes(), false);

		receiverId.writeTo(headerBuf);
		headerBuf.writeInt(sequenceNumber);
		headerBuf.writeInt(backlog);
		headerBuf.writeBoolean(isBuffer);
		headerBuf.writeInt(buffer.readableBytes());

		CompositeByteBuf composityBuf = allocator.compositeDirectBuffer();
		composityBuf.addComponent(headerBuf);
		composityBuf.addComponent(buffer);
		// update writer index since we have data written to the components:
		composityBuf.writerIndex(headerBuf.writerIndex() + buffer.writerIndex());
		return composityBuf;
	}
	catch (Throwable t) {
		if (headerBuf != null) {
			headerBuf.release();
		}
		buffer.release();

		ExceptionUtils.rethrowIOException(t);
		return null; // silence the compiler
	}
}
 
Example 9
Source File: NettyMessage.java    From flink with Apache License 2.0 5 votes vote down vote up
@Override
ByteBuf write(ByteBufAllocator allocator) throws IOException {
	ByteBuf headerBuf = null;
	try {
		// in order to forward the buffer to netty, it needs an allocator set
		buffer.setAllocator(allocator);

		// only allocate header buffer - we will combine it with the data buffer below
		headerBuf = allocateBuffer(allocator, ID, MESSAGE_HEADER_LENGTH, bufferSize, false);

		receiverId.writeTo(headerBuf);
		headerBuf.writeInt(sequenceNumber);
		headerBuf.writeInt(backlog);
		headerBuf.writeByte(dataType.ordinal());
		headerBuf.writeBoolean(isCompressed);
		headerBuf.writeInt(buffer.readableBytes());

		CompositeByteBuf composityBuf = allocator.compositeDirectBuffer();
		composityBuf.addComponent(headerBuf);
		composityBuf.addComponent(buffer.asByteBuf());
		// update writer index since we have data written to the components:
		composityBuf.writerIndex(headerBuf.writerIndex() + buffer.asByteBuf().writerIndex());
		return composityBuf;
	}
	catch (Throwable t) {
		if (headerBuf != null) {
			headerBuf.release();
		}
		buffer.recycleBuffer();

		ExceptionUtils.rethrowIOException(t);
		return null; // silence the compiler
	}
}
 
Example 10
Source File: AsynchronousBufferFileWriter.java    From flink with Apache License 2.0 5 votes vote down vote up
/**
 * Writes the given block asynchronously.
 *
 * @param buffer
 * 		the buffer to be written (will be recycled when done)
 *
 * @throws IOException
 * 		thrown if adding the write operation fails
 */
@Override
public void writeBlock(Buffer buffer) throws IOException {
	try {
		// if successfully added, the buffer will be recycled after the write operation
		addRequest(new BufferWriteRequest(this, buffer));
	} catch (Throwable e) {
		// if not added, we need to recycle here
		buffer.recycleBuffer();
		ExceptionUtils.rethrowIOException(e);
	}

}
 
Example 11
Source File: NetworkBufferPool.java    From flink with Apache License 2.0 4 votes vote down vote up
@Override
public List<MemorySegment> requestMemorySegments() throws IOException {
	synchronized (factoryLock) {
		if (isDestroyed) {
			throw new IllegalStateException("Network buffer pool has already been destroyed.");
		}

		tryRedistributeBuffers();
	}

	final List<MemorySegment> segments = new ArrayList<>(numberOfSegmentsToRequest);
	try {
		final Deadline deadline = Deadline.fromNow(requestSegmentsTimeout);
		while (true) {
			if (isDestroyed) {
				throw new IllegalStateException("Buffer pool is destroyed.");
			}

			final MemorySegment segment = availableMemorySegments.poll(2, TimeUnit.SECONDS);
			if (segment != null) {
				segments.add(segment);
			}

			if (segments.size() >= numberOfSegmentsToRequest) {
				break;
			}

			if (!deadline.hasTimeLeft()) {
				throw new IOException(String.format("Timeout triggered when requesting exclusive buffers: %s, " +
								" or you may increase the timeout which is %dms by setting the key '%s'.",
						getConfigDescription(),
						requestSegmentsTimeout.toMillis(),
						NettyShuffleEnvironmentOptions.NETWORK_EXCLUSIVE_BUFFERS_REQUEST_TIMEOUT_MILLISECONDS.key()));
			}
		}
	} catch (Throwable e) {
		try {
			recycleMemorySegments(segments, numberOfSegmentsToRequest);
		} catch (IOException inner) {
			e.addSuppressed(inner);
		}
		ExceptionUtils.rethrowIOException(e);
	}

	return segments;
}
 
Example 12
Source File: NetworkBufferPool.java    From flink with Apache License 2.0 4 votes vote down vote up
private BufferPool internalCreateBufferPool(
		int numRequiredBuffers,
		int maxUsedBuffers,
		@Nullable BufferPoolOwner bufferPoolOwner,
		int numSubpartitions,
		int maxBuffersPerChannel) throws IOException {

	// It is necessary to use a separate lock from the one used for buffer
	// requests to ensure deadlock freedom for failure cases.
	synchronized (factoryLock) {
		if (isDestroyed) {
			throw new IllegalStateException("Network buffer pool has already been destroyed.");
		}

		// Ensure that the number of required buffers can be satisfied.
		// With dynamic memory management this should become obsolete.
		if (numTotalRequiredBuffers + numRequiredBuffers > totalNumberOfMemorySegments) {
			throw new IOException(String.format("Insufficient number of network buffers: " +
							"required %d, but only %d available. %s.",
					numRequiredBuffers,
					totalNumberOfMemorySegments - numTotalRequiredBuffers,
					getConfigDescription()));
		}

		this.numTotalRequiredBuffers += numRequiredBuffers;

		// We are good to go, create a new buffer pool and redistribute
		// non-fixed size buffers.
		LocalBufferPool localBufferPool =
			new LocalBufferPool(
				this,
				numRequiredBuffers,
				maxUsedBuffers,
				bufferPoolOwner,
				numSubpartitions,
				maxBuffersPerChannel);

		allBufferPools.add(localBufferPool);

		try {
			redistributeBuffers();
		} catch (IOException e) {
			try {
				destroyBufferPool(localBufferPool);
			} catch (IOException inner) {
				e.addSuppressed(inner);
			}
			ExceptionUtils.rethrowIOException(e);
		}

		return localBufferPool;
	}
}
 
Example 13
Source File: NetworkBufferPool.java    From flink with Apache License 2.0 4 votes vote down vote up
@Override
public List<MemorySegment> requestMemorySegments() throws IOException {
	synchronized (factoryLock) {
		if (isDestroyed) {
			throw new IllegalStateException("Network buffer pool has already been destroyed.");
		}

		tryRedistributeBuffers();
	}

	final List<MemorySegment> segments = new ArrayList<>(numberOfSegmentsToRequest);
	try {
		final Deadline deadline = Deadline.fromNow(requestSegmentsTimeout);
		while (true) {
			if (isDestroyed) {
				throw new IllegalStateException("Buffer pool is destroyed.");
			}

			MemorySegment segment;
			synchronized (availableMemorySegments) {
				if ((segment = internalRequestMemorySegment()) == null) {
					availableMemorySegments.wait(2000);
				}
			}
			if (segment != null) {
				segments.add(segment);
			}

			if (segments.size() >= numberOfSegmentsToRequest) {
				break;
			}

			if (!deadline.hasTimeLeft()) {
				throw new IOException(String.format("Timeout triggered when requesting exclusive buffers: %s, " +
								" or you may increase the timeout which is %dms by setting the key '%s'.",
						getConfigDescription(),
						requestSegmentsTimeout.toMillis(),
						NettyShuffleEnvironmentOptions.NETWORK_EXCLUSIVE_BUFFERS_REQUEST_TIMEOUT_MILLISECONDS.key()));
			}
		}
	} catch (Throwable e) {
		try {
			recycleMemorySegments(segments, numberOfSegmentsToRequest);
		} catch (IOException inner) {
			e.addSuppressed(inner);
		}
		ExceptionUtils.rethrowIOException(e);
	}

	return segments;
}
 
Example 14
Source File: PendingCheckpoint.java    From flink with Apache License 2.0 4 votes vote down vote up
public CompletedCheckpoint finalizeCheckpoint() throws IOException {

		synchronized (lock) {
			checkState(!isDiscarded(), "checkpoint is discarded");
			checkState(isFullyAcknowledged(), "Pending checkpoint has not been fully acknowledged yet");

			// make sure we fulfill the promise with an exception if something fails
			try {
				// write out the metadata
				final CheckpointMetadata savepoint = new CheckpointMetadata(checkpointId, operatorStates.values(), masterStates);
				final CompletedCheckpointStorageLocation finalizedLocation;

				try (CheckpointMetadataOutputStream out = targetLocation.createMetadataOutputStream()) {
					Checkpoints.storeCheckpointMetadata(savepoint, out);
					finalizedLocation = out.closeAndFinalizeCheckpoint();
				}

				CompletedCheckpoint completed = new CompletedCheckpoint(
						jobId,
						checkpointId,
						checkpointTimestamp,
						System.currentTimeMillis(),
						operatorStates,
						masterStates,
						props,
						finalizedLocation);

				onCompletionPromise.complete(completed);

				// to prevent null-pointers from concurrent modification, copy reference onto stack
				PendingCheckpointStats statsCallback = this.statsCallback;
				if (statsCallback != null) {
					// Finalize the statsCallback and give the completed checkpoint a
					// callback for discards.
					CompletedCheckpointStats.DiscardCallback discardCallback =
							statsCallback.reportCompletedCheckpoint(finalizedLocation.getExternalPointer());
					completed.setDiscardCallback(discardCallback);
				}

				// mark this pending checkpoint as disposed, but do NOT drop the state
				dispose(false);

				return completed;
			}
			catch (Throwable t) {
				onCompletionPromise.completeExceptionally(t);
				ExceptionUtils.rethrowIOException(t);
				return null; // silence the compiler
			}
		}
	}
 
Example 15
Source File: NetworkBufferPool.java    From flink with Apache License 2.0 4 votes vote down vote up
@Override
public BufferPool createBufferPool(int numRequiredBuffers, int maxUsedBuffers, Optional<BufferPoolOwner> owner) throws IOException {
	// It is necessary to use a separate lock from the one used for buffer
	// requests to ensure deadlock freedom for failure cases.
	synchronized (factoryLock) {
		if (isDestroyed) {
			throw new IllegalStateException("Network buffer pool has already been destroyed.");
		}

		// Ensure that the number of required buffers can be satisfied.
		// With dynamic memory management this should become obsolete.
		if (numTotalRequiredBuffers + numRequiredBuffers > totalNumberOfMemorySegments) {
			throw new IOException(String.format("Insufficient number of network buffers: " +
							"required %d, but only %d available. %s.",
					numRequiredBuffers,
					totalNumberOfMemorySegments - numTotalRequiredBuffers,
					getConfigDescription()));
		}

		this.numTotalRequiredBuffers += numRequiredBuffers;

		// We are good to go, create a new buffer pool and redistribute
		// non-fixed size buffers.
		LocalBufferPool localBufferPool =
			new LocalBufferPool(this, numRequiredBuffers, maxUsedBuffers, owner);

		allBufferPools.add(localBufferPool);

		try {
			redistributeBuffers();
		} catch (IOException e) {
			try {
				destroyBufferPool(localBufferPool);
			} catch (IOException inner) {
				e.addSuppressed(inner);
			}
			ExceptionUtils.rethrowIOException(e);
		}

		return localBufferPool;
	}
}
 
Example 16
Source File: PendingCheckpoint.java    From flink with Apache License 2.0 4 votes vote down vote up
public CompletedCheckpoint finalizeCheckpoint() throws IOException {

		synchronized (lock) {
			checkState(isFullyAcknowledged(), "Pending checkpoint has not been fully acknowledged yet.");

			// make sure we fulfill the promise with an exception if something fails
			try {
				// write out the metadata
				final Savepoint savepoint = new SavepointV2(checkpointId, operatorStates.values(), masterState);
				final CompletedCheckpointStorageLocation finalizedLocation;

				try (CheckpointMetadataOutputStream out = targetLocation.createMetadataOutputStream()) {
					Checkpoints.storeCheckpointMetadata(savepoint, out);
					finalizedLocation = out.closeAndFinalizeCheckpoint();
				}

				CompletedCheckpoint completed = new CompletedCheckpoint(
						jobId,
						checkpointId,
						checkpointTimestamp,
						System.currentTimeMillis(),
						operatorStates,
						masterState,
						props,
						finalizedLocation);

				onCompletionPromise.complete(completed);

				// to prevent null-pointers from concurrent modification, copy reference onto stack
				PendingCheckpointStats statsCallback = this.statsCallback;
				if (statsCallback != null) {
					// Finalize the statsCallback and give the completed checkpoint a
					// callback for discards.
					CompletedCheckpointStats.DiscardCallback discardCallback =
							statsCallback.reportCompletedCheckpoint(finalizedLocation.getExternalPointer());
					completed.setDiscardCallback(discardCallback);
				}

				// mark this pending checkpoint as disposed, but do NOT drop the state
				dispose(false);

				return completed;
			}
			catch (Throwable t) {
				onCompletionPromise.completeExceptionally(t);
				ExceptionUtils.rethrowIOException(t);
				return null; // silence the compiler
			}
		}
	}
 
Example 17
Source File: NetworkBufferPool.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
@Override
public BufferPool createBufferPool(int numRequiredBuffers, int maxUsedBuffers, Optional<BufferPoolOwner> owner) throws IOException {
	// It is necessary to use a separate lock from the one used for buffer
	// requests to ensure deadlock freedom for failure cases.
	synchronized (factoryLock) {
		if (isDestroyed) {
			throw new IllegalStateException("Network buffer pool has already been destroyed.");
		}

		// Ensure that the number of required buffers can be satisfied.
		// With dynamic memory management this should become obsolete.
		if (numTotalRequiredBuffers + numRequiredBuffers > totalNumberOfMemorySegments) {
			throw new IOException(String.format("Insufficient number of network buffers: " +
							"required %d, but only %d available. The total number of network " +
							"buffers is currently set to %d of %d bytes each. You can increase this " +
							"number by setting the configuration keys '%s', '%s', and '%s'.",
					numRequiredBuffers,
					totalNumberOfMemorySegments - numTotalRequiredBuffers,
					totalNumberOfMemorySegments,
					memorySegmentSize,
					TaskManagerOptions.NETWORK_BUFFERS_MEMORY_FRACTION.key(),
					TaskManagerOptions.NETWORK_BUFFERS_MEMORY_MIN.key(),
					TaskManagerOptions.NETWORK_BUFFERS_MEMORY_MAX.key()));
		}

		this.numTotalRequiredBuffers += numRequiredBuffers;

		// We are good to go, create a new buffer pool and redistribute
		// non-fixed size buffers.
		LocalBufferPool localBufferPool =
			new LocalBufferPool(this, numRequiredBuffers, maxUsedBuffers, owner);

		allBufferPools.add(localBufferPool);

		try {
			redistributeBuffers();
		} catch (IOException e) {
			try {
				destroyBufferPool(localBufferPool);
			} catch (IOException inner) {
				e.addSuppressed(inner);
			}
			ExceptionUtils.rethrowIOException(e);
		}

		return localBufferPool;
	}
}
 
Example 18
Source File: PendingCheckpoint.java    From Flink-CEPplus with Apache License 2.0 4 votes vote down vote up
public CompletedCheckpoint finalizeCheckpoint() throws IOException {

		synchronized (lock) {
			checkState(isFullyAcknowledged(), "Pending checkpoint has not been fully acknowledged yet.");

			// make sure we fulfill the promise with an exception if something fails
			try {
				// write out the metadata
				final Savepoint savepoint = new SavepointV2(checkpointId, operatorStates.values(), masterState);
				final CompletedCheckpointStorageLocation finalizedLocation;

				try (CheckpointMetadataOutputStream out = targetLocation.createMetadataOutputStream()) {
					Checkpoints.storeCheckpointMetadata(savepoint, out);
					finalizedLocation = out.closeAndFinalizeCheckpoint();
				}

				CompletedCheckpoint completed = new CompletedCheckpoint(
						jobId,
						checkpointId,
						checkpointTimestamp,
						System.currentTimeMillis(),
						operatorStates,
						masterState,
						props,
						finalizedLocation);

				onCompletionPromise.complete(completed);

				// to prevent null-pointers from concurrent modification, copy reference onto stack
				PendingCheckpointStats statsCallback = this.statsCallback;
				if (statsCallback != null) {
					// Finalize the statsCallback and give the completed checkpoint a
					// callback for discards.
					CompletedCheckpointStats.DiscardCallback discardCallback =
							statsCallback.reportCompletedCheckpoint(finalizedLocation.getExternalPointer());
					completed.setDiscardCallback(discardCallback);
				}

				// mark this pending checkpoint as disposed, but do NOT drop the state
				dispose(false);

				return completed;
			}
			catch (Throwable t) {
				onCompletionPromise.completeExceptionally(t);
				ExceptionUtils.rethrowIOException(t);
				return null; // silence the compiler
			}
		}
	}