org.apache.kafka.connect.data.Struct Java Examples
The following examples show how to use
org.apache.kafka.connect.data.Struct.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: StructHelper.java From connect-utils with Apache License 2.0 | 6 votes |
public static Struct struct( String name, String f1, Schema.Type t1, boolean o1, Object v1, String f2, Schema.Type t2, boolean o2, Object v2 ) { return struct( name, Arrays.asList( FieldState.of(f1, t1, o1, v1), FieldState.of(f2, t2, o2, v2) ) ); }
Example #2
Source File: SinkConverter.java From mongo-kafka with Apache License 2.0 | 6 votes |
private RecordConverter getRecordConverter(final Object data, final Schema schema) { // AVRO or JSON with schema if (schema != null && data instanceof Struct) { LOGGER.debug("using schemaful converter"); return schemafulConverter; } // structured JSON without schema if (data instanceof Map) { LOGGER.debug("using schemaless converter"); return schemalessConverter; } // raw JSON string if (data instanceof String) { LOGGER.debug("using raw converter"); return rawConverter; } throw new DataException( "Error: no converter present due to unexpected object type " + data.getClass().getName()); }
Example #3
Source File: PlainPayloadFormatterTest.java From kafka-connect-lambda with Apache License 2.0 | 6 votes |
@Before public void setup() { keySchema = SchemaBuilder.struct() .name(TEST_KEY_CLASS) .field("key_name", Schema.STRING_SCHEMA) .build(); valueSchema = SchemaBuilder.struct() .name(TEST_VALUE_CLASS) .field(TEST_VALUE_FIELD, Schema.STRING_SCHEMA) .build(); keyStruct = new Struct(keySchema) .put("key_name", TEST_KEY); valueStruct = new Struct(valueSchema) .put(TEST_VALUE_FIELD, TEST_VALUE); keyList = new ArrayList<>(); keyList.add(TEST_KEY); keyMap = new HashMap<>(); keyMap.put(TEST_KEY, TEST_VALUE); valueList = new ArrayList<>(); valueList.add(TEST_VALUE); valueMap = new HashMap<>(); valueMap.put(TEST_VALUE_KEY, TEST_VALUE); formatter = new PlainPayloadFormatter(); mapper = new ObjectMapper(); }
Example #4
Source File: NormalizeSchema.java From kafka-connect-transform-common with Apache License 2.0 | 6 votes |
void copy(Struct input, Struct output) { for (Field outputField : output.schema().fields()) { Field inputField = input.schema().field(outputField.name()); if (null != inputField) { if (Schema.Type.STRUCT == outputField.schema().type()) { Struct inputStruct = input.getStruct(inputField.name()); if (null == inputStruct) { output.put(outputField, null); } else { Struct outputStruct = new Struct(outputField.schema()); copy(inputStruct, outputStruct); } } else { output.put(outputField, input.get(outputField.name())); } } else { log.trace("copy() - Skipping '{}' because input does not have field.", outputField.name()); } } }
Example #5
Source File: JsonFileReaderTest.java From kafka-connect-fs with Apache License 2.0 | 6 votes |
@ParameterizedTest @MethodSource("fileSystemConfigProvider") public void readDataWithRecordPerLineDisabled(ReaderFsTestConfig fsConfig) throws IOException { Path file = createDataFile(fsConfig, 1, false); Map<String, Object> readerConfig = getReaderConfig(); readerConfig.put(JsonFileReader.FILE_READER_JSON_RECORD_PER_LINE, "false"); FileReader reader = getReader(fsConfig.getFs(), file, readerConfig); assertTrue(reader.hasNext()); int recordCount = 0; while (reader.hasNext()) { Struct record = reader.next(); checkData(record, recordCount); recordCount++; } reader.close(); assertEquals(1, recordCount, "The number of records in the file does not match"); }
Example #6
Source File: StatusConverterTest.java From kafka-connect-twitter with Apache License 2.0 | 6 votes |
void assertStatus(Status status, Struct struct) { assertEquals(status.getCreatedAt(), struct.get("CreatedAt"), "CreatedAt does not match."); assertEquals(status.getId(), struct.get("Id"), "Id does not match."); assertEquals(status.getText(), struct.get("Text"), "Text does not match."); assertEquals(status.getSource(), struct.get("Source"), "Source does not match."); assertEquals(status.isTruncated(), struct.get("Truncated"), "Truncated does not match."); assertEquals(status.getInReplyToStatusId(), struct.get("InReplyToStatusId"), "InReplyToStatusId does not match."); assertEquals(status.getInReplyToUserId(), struct.get("InReplyToUserId"), "InReplyToUserId does not match."); assertEquals(status.getInReplyToScreenName(), struct.get("InReplyToScreenName"), "InReplyToScreenName does not match."); assertEquals(status.isFavorited(), struct.get("Favorited"), "Favorited does not match."); assertEquals(status.isRetweeted(), struct.get("Retweeted"), "Retweeted does not match."); assertEquals(status.getFavoriteCount(), struct.get("FavoriteCount"), "FavoriteCount does not match."); assertEquals(status.isRetweet(), struct.get("Retweet"), "Retweet does not match."); assertEquals(status.getRetweetCount(), struct.get("RetweetCount"), "RetweetCount does not match."); assertEquals(status.isRetweetedByMe(), struct.get("RetweetedByMe"), "RetweetedByMe does not match."); assertEquals(status.getCurrentUserRetweetId(), struct.get("CurrentUserRetweetId"), "CurrentUserRetweetId does not match."); assertEquals(status.isPossiblySensitive(), struct.get("PossiblySensitive"), "PossiblySensitive does not match."); assertEquals(status.getLang(), struct.get("Lang"), "Lang does not match."); assertUser(status.getUser(), struct.getStruct("User")); assertPlace(status.getPlace(), struct.getStruct("Place")); assertGeoLocation(status.getGeoLocation(), struct.getStruct("GeoLocation")); assertEquals(convert(status.getContributors()), struct.getArray("Contributors"), "Contributors does not match."); assertEquals(convert(status.getWithheldInCountries()), struct.get("WithheldInCountries"), "WithheldInCountries does not match."); }
Example #7
Source File: EventConverter.java From kafka-connect-splunk with Apache License 2.0 | 6 votes |
static <T> void setFieldValue(JsonNode messageNode, Struct struct, String fieldName, Class<T> cls) { T structValue = null; if (messageNode.has(fieldName)) { JsonNode valueNode = messageNode.get(fieldName); if (String.class.equals(cls) && valueNode.isObject()) { try { structValue = (T) ObjectMapperFactory.INSTANCE.writeValueAsString(valueNode); } catch (JsonProcessingException e) { throw new IllegalStateException(e); } } else if (!valueNode.isNull()) { structValue = ObjectMapperFactory.INSTANCE.convertValue(valueNode, cls); } } struct.put(fieldName, structValue); }
Example #8
Source File: ParquetFileReaderTest.java From kafka-connect-fs with Apache License 2.0 | 6 votes |
@ParameterizedTest @MethodSource("fileSystemConfigProvider") public void readerWithProjection(ReaderFsTestConfig fsConfig) throws IOException { Map<String, Object> readerConfig = getReaderConfig(); readerConfig.put(ParquetFileReader.FILE_READER_PARQUET_PROJECTION, projectionSchema.toString()); readerConfig.put(AgnosticFileReader.FILE_READER_AGNOSTIC_EXTENSIONS_PARQUET, getFileExtension()); fsConfig.setReader(getReader(fsConfig.getFs(), fsConfig.getDataFile(), readerConfig)); while (fsConfig.getReader().hasNext()) { Struct record = fsConfig.getReader().next(); assertNotNull(record.schema().field(FIELD_INDEX)); assertNotNull(record.schema().field(FIELD_NAME)); assertNull(record.schema().field(FIELD_SURNAME)); } FileSystem testFs = FileSystem.newInstance(fsConfig.getFsUri(), new Configuration()); fsConfig.setReader(getReader(testFs, fsConfig.getDataFile(), readerConfig)); assertThrows(DataException.class, () -> readAllData(fsConfig)); }
Example #9
Source File: UserTypeDeserializer.java From debezium-incubator with Apache License 2.0 | 6 votes |
public Object deserialize(AbstractType<?> abstractType, ByteBuffer bb) { ByteBuffer userTypeByteBuffer = (ByteBuffer) super.deserialize(abstractType, bb); UserType userType = (UserType) abstractType; UserTypes.Value value = UserTypes.Value.fromSerialized(userTypeByteBuffer, userType); List<ByteBuffer> elements = value.getElements(); Struct struct = new Struct(getSchemaBuilder(abstractType).build()); for (int i = 0; i < userType.fieldNames().size(); i++) { String fieldName = userType.fieldNameAsString(i); AbstractType<?> fieldType = userType.type(i); struct.put(fieldName, CassandraTypeDeserializer.deserialize(fieldType, elements.get(i))); } return struct; }
Example #10
Source File: ParquetHiveUtilTest.java From streamx with Apache License 2.0 | 6 votes |
private void prepareData(String topic, int partition) throws Exception { TopicPartition tp = new TopicPartition(topic, partition); DataWriter hdfsWriter = createWriter(context, avroData); hdfsWriter.recover(tp); String key = "key"; Schema schema = createSchema(); Struct record = createRecord(schema); Collection<SinkRecord> sinkRecords = new ArrayList<>(); for (long offset = 0; offset < 7; offset++) { SinkRecord sinkRecord = new SinkRecord(topic, partition, Schema.STRING_SCHEMA, key, schema, record, offset); sinkRecords.add(sinkRecord); } hdfsWriter.write(sinkRecords); hdfsWriter.close(assignment); hdfsWriter.stop(); }
Example #11
Source File: StructSerializationModule.java From connect-utils with Apache License 2.0 | 6 votes |
@Override public void serialize(Struct struct, JsonGenerator jsonGenerator, SerializerProvider serializerProvider) throws IOException, JsonProcessingException { struct.validate(); Storage result = new Storage(); result.schema = struct.schema(); result.fieldValues = new ArrayList<>(); for (Field field : struct.schema().fields()) { log.trace("serialize() - Processing field '{}'", field.name()); KeyValue keyValue = new KeyValue(); keyValue.name = field.name(); keyValue.schema = field.schema(); keyValue.value(struct.get(field)); result.fieldValues.add(keyValue); } jsonGenerator.writeObject(result); }
Example #12
Source File: TopicPartitionWriterTest.java From streamx with Apache License 2.0 | 6 votes |
private void verify(Set<Path> expectedFiles, Struct[] records, Schema schema) throws IOException { Path path = new Path(FileUtils.topicDirectory(url, topicsDir, TOPIC)); FileStatus[] statuses = FileUtils.traverse(storage, path, new CommittedFileFilter()); assertEquals(expectedFiles.size(), statuses.length); int index = 0; for (FileStatus status : statuses) { Path filePath = status.getPath(); assertTrue(expectedFiles.contains(status.getPath())); Collection<Object> avroRecords = schemaFileReader.readData(conf, filePath); assertEquals(3, avroRecords.size()); for (Object avroRecord: avroRecords) { assertEquals(avroData.fromConnectData(schema, records[index]), avroRecord); } index++; } }
Example #13
Source File: MongodbSourceTask.java From kafka-connect-mongodb with Apache License 2.0 | 6 votes |
/** * Poll this MongodbSourceTask for new records. * * @return a list of source records * @throws InterruptException */ @Override public List<SourceRecord> poll() throws InterruptException { List<SourceRecord> records = new ArrayList<>(); while (!reader.isEmpty()) { Document message = reader.pool(); Struct messageStruct = getStruct(message); String topic = getTopic(message); String db = getDB(message); String timestamp = getTimestamp(message); records.add(new SourceRecord(Collections.singletonMap("mongodb", db), Collections.singletonMap(db, timestamp), topic, messageStruct.schema(), messageStruct)); log.trace(message.toString()); } return records; }
Example #14
Source File: SinkConverter.java From kafka-connect-mongodb with Apache License 2.0 | 6 votes |
private RecordConverter getRecordConverter(Object data, Schema schema) { //AVRO or JSON with schema if(schema != null && data instanceof Struct) { logger.debug("using schemaful converter"); return schemafulConverter; } //structured JSON without schema if(data instanceof Map) { logger.debug("using schemaless converter"); return schemalessConverter; } //raw JSON string if(data instanceof String) { logger.debug("using raw converter"); return rawConverter; } throw new DataException("error: no converter present due to unexpected object type " + data.getClass().getName()); }
Example #15
Source File: StatusConverter.java From kafka-connect-twitter with Apache License 2.0 | 6 votes |
static Struct convertExtendedMediaEntity(ExtendedMediaEntity extendedMediaEntity) { return new Struct(SCHEMA_EXTENDED_MEDIA_ENTITY) .put("VideoAspectRatioWidth", extendedMediaEntity.getVideoAspectRatioWidth()) .put("VideoAspectRatioHeight", extendedMediaEntity.getVideoAspectRatioHeight()) .put("VideoDurationMillis", extendedMediaEntity.getVideoDurationMillis()) .put("VideoVariants", extendedMediaEntity.getVideoVariants()) .put("ExtAltText", extendedMediaEntity.getExtAltText()) .put("Id", extendedMediaEntity.getId()) .put("Type", extendedMediaEntity.getType()) .put("MediaURL", extendedMediaEntity.getMediaURL()) .put("Sizes", extendedMediaEntity.getSizes()) .put("MediaURLHttps", extendedMediaEntity.getMediaURLHttps()) .put("URL", extendedMediaEntity.getURL()) .put("Text", extendedMediaEntity.getText()) .put("ExpandedURL", extendedMediaEntity.getExpandedURL()) .put("Start", extendedMediaEntity.getStart()) .put("End", extendedMediaEntity.getEnd()) .put("DisplayURL", extendedMediaEntity.getDisplayURL()); }
Example #16
Source File: CellData.java From debezium-incubator with Apache License 2.0 | 6 votes |
@Override public Struct record(Schema schema) { Struct cellStruct = new Struct(schema) .put(CELL_DELETION_TS_KEY, deletionTs) .put(CELL_SET_KEY, true); if (value instanceof Struct) { Schema valueSchema = schema.field(CELL_VALUE_KEY).schema(); Struct clonedValue = cloneValue(valueSchema, (Struct) value); cellStruct.put(CELL_VALUE_KEY, clonedValue); } else { cellStruct.put(CELL_VALUE_KEY, value); } return cellStruct; }
Example #17
Source File: JsonFileReaderTest.java From kafka-connect-fs with Apache License 2.0 | 6 votes |
@ParameterizedTest @MethodSource("fileSystemConfigProvider") public void readDifferentCompressionTypes(ReaderFsTestConfig fsConfig) { Arrays.stream(CompressionType.values()).forEach(compressionType -> { try { Path file = createDataFile(fsConfig, NUM_RECORDS, true, compressionType); Map<String, Object> readerConfig = getReaderConfig(); readerConfig.put(JsonFileReader.FILE_READER_JSON_COMPRESSION_TYPE, compressionType.toString()); readerConfig.put(JsonFileReader.FILE_READER_JSON_COMPRESSION_CONCATENATED, "true"); FileReader reader = getReader(fsConfig.getFs(), file, readerConfig); assertTrue(reader.hasNext()); int recordCount = 0; while (reader.hasNext()) { Struct record = reader.next(); checkData(record, recordCount); recordCount++; } reader.close(); assertEquals(NUM_RECORDS, recordCount, "The number of records in the file does not match"); } catch (Exception e) { throw new RuntimeException(e); } }); }
Example #18
Source File: UnivocityFileReaderTest.java From kafka-connect-fs with Apache License 2.0 | 6 votes |
@ParameterizedTest @MethodSource("fileSystemConfigProvider") public void readAllDataWithoutHeader(ReaderFsTestConfig fsConfig) throws IOException { Path file = createDataFile(fsConfig, false); Map<String, Object> readerConfig = getReaderConfig(); readerConfig.put(T.FILE_READER_DELIMITED_SETTINGS_HEADER, "false"); FileReader reader = getReader(fsConfig.getFs(), file, readerConfig); assertTrue(reader.hasNext()); int recordCount = 0; while (reader.hasNext()) { Struct record = reader.next(); checkData(record, recordCount); recordCount++; } assertEquals(NUM_RECORDS, recordCount, "The number of records in the file does not match"); }
Example #19
Source File: LogFieldConverter.java From kafka-connect-spooldir with Apache License 2.0 | 5 votes |
public void convert(LogEntry logEntry, Struct struct) { final Object input = logEntry.fieldData().get(this.logFieldName); final Object output; if (null == input) { output = null; } else { output = convert(input); } log.trace("convert() - Setting {} to {}", field.name(), output); struct.put(this.field, output); }
Example #20
Source File: ChangeCaseTest.java From kafka-connect-transform-common with Apache License 2.0 | 5 votes |
@Test public void test() { this.transformation.configure( ImmutableMap.of( ChangeCaseConfig.FROM_CONFIG, CaseFormat.UPPER_UNDERSCORE.toString(), ChangeCaseConfig.TO_CONFIG, CaseFormat.LOWER_UNDERSCORE.toString() ) ); final Schema inputSchema = SchemaBuilder.struct() .field("FIRST_NAME", Schema.STRING_SCHEMA) .field("LAST_NAME", Schema.STRING_SCHEMA) .build(); final Schema expectedSchema = SchemaBuilder.struct() .field("first_name", Schema.STRING_SCHEMA) .field("last_name", Schema.STRING_SCHEMA) .build(); final Struct inputStruct = new Struct(inputSchema) .put("FIRST_NAME", "test") .put("LAST_NAME", "user"); final Struct expectedStruct = new Struct(expectedSchema) .put("first_name", "test") .put("last_name", "user"); final SinkRecord inputRecord = new SinkRecord( "topic", 1, null, null, inputSchema, inputStruct, 1L ); for (int i = 0; i < 50; i++) { final SinkRecord transformedRecord = this.transformation.apply(inputRecord); assertNotNull(transformedRecord, "transformedRecord should not be null."); assertSchema(expectedSchema, transformedRecord.valueSchema()); assertStruct(expectedStruct, (Struct) transformedRecord.value()); } }
Example #21
Source File: StatusConverter.java From kafka-connect-twitter with Apache License 2.0 | 5 votes |
static Struct convertURLEntity(URLEntity uRLEntity) { return new Struct(SCHEMA_URL_ENTITY) .put("URL", uRLEntity.getURL()) .put("Text", uRLEntity.getText()) .put("ExpandedURL", uRLEntity.getExpandedURL()) .put("Start", uRLEntity.getStart()) .put("End", uRLEntity.getEnd()) .put("DisplayURL", uRLEntity.getDisplayURL()); }
Example #22
Source File: Record.java From debezium-incubator with Apache License 2.0 | 5 votes |
public Struct buildKey() { if (keySchema == null) { return null; } List<CellData> primary = rowData.getPrimary(); Struct struct = new Struct(keySchema); for (CellData cellData : primary) { struct.put(cellData.name, cellData.value); } return struct; }
Example #23
Source File: UnivocityFileReaderTest.java From kafka-connect-fs with Apache License 2.0 | 5 votes |
@ParameterizedTest @MethodSource("fileSystemConfigProvider") public void readDifferentCompressionTypes(ReaderFsTestConfig fsConfig) { Arrays.stream(CompressionType.values()).forEach(compressionType -> { try { Path file = createDataFile(fsConfig, true, compressionType); Map<String, Object> readerConfig = getReaderConfig(); readerConfig.put(T.FILE_READER_DELIMITED_COMPRESSION_TYPE, compressionType.toString()); readerConfig.put(T.FILE_READER_DELIMITED_COMPRESSION_CONCATENATED, "true"); readerConfig.put(T.FILE_READER_DELIMITED_SETTINGS_HEADER, "true"); FileReader reader = getReader(fsConfig.getFs(), file, readerConfig); assertTrue(reader.hasNext()); int recordCount = 0; while (reader.hasNext()) { Struct record = reader.next(); checkData(record, recordCount); recordCount++; } reader.close(); assertEquals(NUM_RECORDS, recordCount, "The number of records in the file does not match"); } catch (Exception e) { throw new RuntimeException(e); } }); }
Example #24
Source File: PatternRename.java From kafka-connect-transform-common with Apache License 2.0 | 5 votes |
@Override protected SchemaAndValue processStruct(R record, Schema inputSchema, Struct inputStruct) { final SchemaBuilder outputSchemaBuilder = SchemaBuilder.struct(); outputSchemaBuilder.name(inputSchema.name()); outputSchemaBuilder.doc(inputSchema.doc()); if (null != inputSchema.defaultValue()) { outputSchemaBuilder.defaultValue(inputSchema.defaultValue()); } if (null != inputSchema.parameters() && !inputSchema.parameters().isEmpty()) { outputSchemaBuilder.parameters(inputSchema.parameters()); } if (inputSchema.isOptional()) { outputSchemaBuilder.optional(); } Map<String, String> fieldMappings = new HashMap<>(inputSchema.fields().size()); for (final Field inputField : inputSchema.fields()) { log.trace("process() - Processing field '{}'", inputField.name()); final Matcher fieldMatcher = this.config.pattern.matcher(inputField.name()); final String outputFieldName; if (fieldMatcher.find()) { outputFieldName = fieldMatcher.replaceAll(this.config.replacement); } else { outputFieldName = inputField.name(); } log.trace("process() - Mapping field '{}' to '{}'", inputField.name(), outputFieldName); fieldMappings.put(inputField.name(), outputFieldName); outputSchemaBuilder.field(outputFieldName, inputField.schema()); } final Schema outputSchema = outputSchemaBuilder.build(); final Struct outputStruct = new Struct(outputSchema); for (Map.Entry<String, String> entry : fieldMappings.entrySet()) { final String inputField = entry.getKey(), outputField = entry.getValue(); log.trace("process() - Copying '{}' to '{}'", inputField, outputField); final Object value = inputStruct.get(inputField); outputStruct.put(outputField, value); } return new SchemaAndValue(outputSchema, outputStruct); }
Example #25
Source File: IRCMessage.java From hello-kafka-streams with Apache License 2.0 | 5 votes |
public IRCMessage(String channel, IRCUser user, String message) { super(SCHEMA); this.put("timestamp", System.currentTimeMillis()); this.put("channel", channel); this.put("message", message); this.put("user", new Struct(SCHEMA.field("user").schema()) .put("nick", user.getNick()) .put("username", user.getUsername()) .put("host", user.getHost()) ); }
Example #26
Source File: HiveIntegrationAvroTest.java From streamx with Apache License 2.0 | 5 votes |
private Struct[] createRecords(Schema schema) { Struct record1 = new Struct(schema) .put("boolean", true) .put("int", 16) .put("long", 12L) .put("float", 12.2f) .put("double", 12.2); Struct record2 = new Struct(schema) .put("boolean", true) .put("int", 17) .put("long", 12L) .put("float", 12.2f) .put("double", 12.2); Struct record3 = new Struct(schema) .put("boolean", true) .put("int", 18) .put("long", 12L) .put("float", 12.2f) .put("double", 12.2); ArrayList<Struct> records = new ArrayList<>(); records.add(record1); records.add(record2); records.add(record3); return records.toArray(new Struct[records.size()]); }
Example #27
Source File: StatusConverter.java From kafka-connect-twitter with Apache License 2.0 | 5 votes |
public static List<Struct> convert(MediaEntity[] items) { List<Struct> result = new ArrayList<>(); if (null == items) { return result; } for (MediaEntity item : items) { Struct struct = convertMediaEntity(item); result.add(struct); } return result; }
Example #28
Source File: ExtractTimestamp.java From kafka-connect-transform-common with Apache License 2.0 | 5 votes |
protected long process(SchemaAndValue schemaAndValue) { final long result; if (schemaAndValue.value() instanceof Struct) { result = processStruct(schemaAndValue); } else if (schemaAndValue.value() instanceof Map) { result = processMap(schemaAndValue); } else { throw new UnsupportedOperationException(); } return result; }
Example #29
Source File: RowData.java From debezium-incubator with Apache License 2.0 | 5 votes |
@Override public Struct record(Schema schema) { Struct struct = new Struct(schema); for (Field field : schema.fields()) { Schema cellSchema = getFieldSchema(field.name(), schema); CellData cellData = cellMap.get(field.name()); // only add the cell if it is not null if (cellData != null) { struct.put(field.name(), cellData.record(cellSchema)); } } return struct; }
Example #30
Source File: OracleEventMetadataProvider.java From debezium-incubator with Apache License 2.0 | 5 votes |
@Override public String getTransactionId(DataCollectionId source, OffsetContext offset, Object key, Struct value) { if (value == null) { return null; } final Struct sourceInfo = value.getStruct(Envelope.FieldName.SOURCE); if (source == null) { return null; } return sourceInfo.getString(SourceInfo.TXID_KEY); }