Java Code Examples for org.apache.avro.generic.GenericData.Record
The following examples show how to use
org.apache.avro.generic.GenericData.Record. These examples are extracted from open source projects.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source Project: components Source File: MarketoSOAPClientTest.java License: Apache License 2.0 | 6 votes |
@Test public void testSyncLead() throws Exception { doReturn(getSyncLeadResult()).when(port).syncLead(any(ParamsSyncLead.class), any(AuthenticationHeader.class), any(MktowsContextHeader.class)); oprops.afterOutputOperation(); oprops.beforeMappingInput(); mktoSR = client.syncLead(oprops, null); assertNotNull(mktoSR); assertFalse(mktoSR.isSuccess()); IndexedRecord record = new Record(MarketoConstants.getSOAPOutputSchemaForSyncLead()); record.put(0, 12345); record.put(1, "[email protected]"); mktoSR = client.syncLead(oprops, record); assertNotNull(mktoSR); assertTrue(mktoSR.isSuccess()); // doThrow(new RuntimeException("error")).when(port).syncLead(any(ParamsSyncLead.class), any(AuthenticationHeader.class), any(MktowsContextHeader.class)); mktoSR = client.syncLead(oprops, record); assertNotNull(mktoSR); assertFalse(mktoSR.isSuccess()); }
Example 2
Source Project: components Source File: MarketoListOperationWriterTest.java License: Apache License 2.0 | 6 votes |
@Test public void testRetryOperationFailDieOnError() throws Exception { IndexedRecord record = new Record(MarketoConstants.getListOperationRESTSchema()); record.put(0, 12345); record.put(1, 54321); doReturn(false).when(client).isErrorRecoverable(any(List.class)); doReturn(getFailedSyncResult("REST", "902", "Invalid operation")).when(client) .addToList(any(ListOperationParameters.class)); writer.open("test"); writer.write(record); try { writer.close(); fail("Should not be here"); } catch (Exception e) { assertTrue(e.getMessage().contains("902")); } }
Example 3
Source Project: data-highway Source File: ConsumerRecordWriterTest.java License: Apache License 2.0 | 6 votes |
@Test public void write_Flush() throws IOException { when(outputStreamFactory.create(LOCATION)).thenReturn(abortableOutputStream); ArgumentCaptor<OutputStream> captor = ArgumentCaptor.forClass(OutputStream.class); when(recordWriterFactory.create(eq(schema1), captor.capture())).thenReturn(recordWriter); underTest.getByteCounter().getAndAdd(3L); // fake some written bytes ConsumerRecord<Void, Record> record = record(schema1, "foo", 1, 10); underTest.write(record); verify(recordWriter).write(record.value()); assertThat(underTest.getRecordCounter().get(), is(0L)); verify(metrics).consumedBytes(10); verify(metrics).offsetHighwaterMark(0, 1); verify(metrics).uploadedBytes(3L); verify(metrics).uploadedEvents(1L); assertThat(writers.size(), is(0)); }
Example 4
Source Project: components Source File: MarketoOpportunityClientTest.java License: Apache License 2.0 | 6 votes |
@Test public void testDeleteOpportunities() throws Exception { oprops.customObjectDeleteBy.setValue(CustomObjectDeleteBy.idField); // doThrow(new MarketoException("REST", "error")).when(client).executePostRequest(eq(SyncResult.class), any(JsonObject.class)); List<IndexedRecord> records = new ArrayList<>(); IndexedRecord record = new Record(MarketoConstants.getCustomObjectRecordSchema()); record.put(0, "mkto-123456"); records.add(record); mktoSR = client.deleteOpportunities(oprops, records); assertFalse(mktoSR.isSuccess()); assertFalse(mktoSR.getErrorsString().isEmpty()); // doReturn(new SyncResult()).when(client).executePostRequest(eq(SyncResult.class), any(JsonObject.class)); mktoSR = client.deleteOpportunities(oprops, records); assertFalse(mktoSR.isSuccess()); // doReturn(getListOperationResult(true, "deleted")).when(client).executePostRequest(eq(SyncResult.class), any(JsonObject.class)); mktoSR = client.deleteOpportunities(oprops, records); assertTrue(mktoSR.isSuccess()); assertTrue(mktoSR.getErrorsString().isEmpty()); }
Example 5
Source Project: data-highway Source File: TruckParkTest.java License: Apache License 2.0 | 6 votes |
@Test public void test() throws Exception { Schema schema = SchemaBuilder.record("r").fields().name("f").type().stringType().noDefault().endRecord(); Record value = new Record(schema); ConsumerRecord<Void, Record> record1 = new ConsumerRecord<>(topic, 0, 1, null, value); ConsumerRecords<Void, Record> records1 = new ConsumerRecords<>( ImmutableMap.of(partition, ImmutableList.of(record1))); when(consumer.poll(pollTimeout)).thenReturn(records1); underTest.run(null); InOrder inOrder = inOrder(consumer, writer, context); inOrder.verify(consumer).assign(partitions); inOrder.verify(consumer).seek(partition, 1L); inOrder.verify(consumer).poll(pollTimeout); inOrder.verify(writer).write(record1); inOrder.verify(consumer).pause(partitions); inOrder.verify(writer).close(); inOrder.verify(context).close(); }
Example 6
Source Project: localization_nifi Source File: TestKiteStorageProcessor.java License: Apache License 2.0 | 6 votes |
@Test public void testIncompatibleSchema() throws IOException { Schema incompatible = SchemaBuilder.record("User").fields() .requiredLong("id") .requiredString("username") .optionalString("email") // the dataset requires this field .endRecord(); // this user has the email field and could be stored, but the schema is // still incompatible so the entire stream is rejected Record incompatibleUser = new Record(incompatible); incompatibleUser.put("id", 1L); incompatibleUser.put("username", "a"); incompatibleUser.put("email", "[email protected]"); TestRunner runner = TestRunners.newTestRunner(StoreInKiteDataset.class); runner.setProperty(StoreInKiteDataset.KITE_DATASET_URI, datasetUri); runner.assertValid(); runner.enqueue(streamFor(incompatibleUser)); runner.run(); runner.assertAllFlowFilesTransferred("incompatible", 1); }
Example 7
Source Project: iceberg Source File: RandomData.java License: Apache License 2.0 | 6 votes |
private static Iterable<Record> newIterable(Supplier<RandomDataGenerator> newGenerator, Schema schema, int numRecords) { return () -> new Iterator<Record>() { private int count = 0; private RandomDataGenerator generator = newGenerator.get(); @Override public boolean hasNext() { return count < numRecords; } @Override public Record next() { if (count >= numRecords) { throw new NoSuchElementException(); } count += 1; return (Record) TypeUtil.visit(schema, generator); } }; }
Example 8
Source Project: iceberg Source File: TestReadProjection.java License: Apache License 2.0 | 6 votes |
@Test public void testReorderedProjection() throws Exception { Schema schema = new Schema( Types.NestedField.required(0, "id", Types.LongType.get()), Types.NestedField.optional(1, "data", Types.StringType.get()) ); Record record = new Record(AvroSchemaUtil.convert(schema, "table")); record.put("id", 34L); record.put("data", "test"); Schema reordered = new Schema( Types.NestedField.optional(2, "missing_1", Types.StringType.get()), Types.NestedField.optional(1, "data", Types.StringType.get()), Types.NestedField.optional(3, "missing_2", Types.LongType.get()) ); Record projected = writeAndRead("full_projection", schema, reordered, record); Assert.assertNull("Should contain the correct 0 value", projected.get(0)); Assert.assertEquals("Should contain the correct 1 value", "test", projected.get(1).toString()); Assert.assertNull("Should contain the correct 2 value", projected.get(2)); }
Example 9
Source Project: components Source File: MarketoSOAPClientTest.java License: Apache License 2.0 | 6 votes |
@Test public void testSyncMultipleLeads() throws Exception { doReturn(getSyncMultipleLeadsResult()).when(port).syncMultipleLeads(any(ParamsSyncMultipleLeads.class), any(AuthenticationHeader.class)); oprops.afterOutputOperation(); oprops.beforeMappingInput(); mktoSR = client.syncLead(oprops, null); assertNotNull(mktoSR); assertFalse(mktoSR.isSuccess()); IndexedRecord record = new Record(MarketoConstants.getSOAPOutputSchemaForSyncLead()); record.put(0, 12345); record.put(1, "[email protected]"); mktoSR = client.syncMultipleLeads(oprops, Arrays.asList(record)); assertNotNull(mktoSR); assertTrue(mktoSR.isSuccess()); // doThrow(new RuntimeException("error")).when(port).syncMultipleLeads(any(ParamsSyncMultipleLeads.class), any(AuthenticationHeader.class)); mktoSR = client.syncMultipleLeads(oprops, Arrays.asList(record)); assertNotNull(mktoSR); assertFalse(mktoSR.isSuccess()); }
Example 10
Source Project: kite Source File: TestFileSystemDataset.java License: Apache License 2.0 | 6 votes |
@Test(expected = ValidationException.class) public void testCannotMergeDatasetsWithDifferentFormats() throws IOException { FileSystemDataset<Record> ds = new FileSystemDataset.Builder<Record>() .namespace("ns") .name("users") .configuration(getConfiguration()) .descriptor(new DatasetDescriptor.Builder() .schema(USER_SCHEMA) .format(Formats.AVRO) .location(testDirectory) .build()) .type(Record.class) .build(); FileSystemDataset<Record> dsUpdate = new FileSystemDataset.Builder<Record>() .namespace("ns") .name("users") .configuration(getConfiguration()) .descriptor(new DatasetDescriptor.Builder() .schema(USER_SCHEMA) .format(Formats.PARQUET) .location(testDirectory) .build()) .type(Record.class) .build(); ds.merge(dsUpdate); }
Example 11
Source Project: components Source File: MarketoCompanyClientTest.java License: Apache License 2.0 | 6 votes |
@Test public void testDeleteCompany() throws Exception { oprops.customObjectDeleteBy.setValue(CustomObjectDeleteBy.idField); // doThrow(new MarketoException("REST", "error")).when(client).executePostRequest(eq(SyncResult.class), any(JsonObject.class)); List<IndexedRecord> records = new ArrayList<>(); IndexedRecord record = new Record(MarketoConstants.getCustomObjectRecordSchema()); record.put(0, "mkto-123456"); records.add(record); mktoSR = client.deleteCompany(oprops, records); assertFalse(mktoSR.isSuccess()); assertFalse(mktoSR.getErrorsString().isEmpty()); // doReturn(new SyncResult()).when(client).executePostRequest(eq(SyncResult.class), any(JsonObject.class)); mktoSR = client.deleteCompany(oprops, records); assertFalse(mktoSR.isSuccess()); // doReturn(getListOperationResult(true, "deleted")).when(client).executePostRequest(eq(SyncResult.class), any(JsonObject.class)); mktoSR = client.deleteCompany(oprops, records); assertTrue(mktoSR.isSuccess()); assertTrue(mktoSR.getErrorsString().isEmpty()); }
Example 12
Source Project: kite Source File: TestMapReduce.java License: Apache License 2.0 | 6 votes |
@Test @SuppressWarnings("deprecation") public void testSignalReadyOutputView() throws Exception { Assume.assumeTrue(!Hadoop.isHadoop1()); populateInputDataset(); populateOutputDataset(); // existing output will be overwritten Job job = new Job(); DatasetKeyInputFormat.configure(job).readFrom(inputDataset).withType(GenericData.Record.class); job.setMapperClass(LineCountMapper.class); job.setMapOutputKeyClass(Text.class); job.setMapOutputValueClass(IntWritable.class); job.setReducerClass(GenericStatsReducer.class); View<Record> outputView = outputDataset.with("name", "apple", "banana", "carrot"); DatasetKeyOutputFormat.configure(job).appendTo(outputView).withType(GenericData.Record.class); Assert.assertTrue(job.waitForCompletion(true)); Assert.assertFalse("Output dataset should not be signaled ready", ((Signalable)outputDataset).isReady()); Assert.assertTrue("Output view should be signaled ready", ((Signalable)outputView).isReady()); }
Example 13
Source Project: components Source File: MarketoListOperationWriterTest.java License: Apache License 2.0 | 6 votes |
@Test public void testWriteSOAP() throws Exception { doReturn(getSuccessSyncResult("added")).when(client).addToList(any(ListOperationParameters.class)); IndexedRecord record = new Record(MarketoConstants.getListOperationSOAPSchema()); record.put(0, "MKTOLISTNAME"); record.put(1, "TESTS"); record.put(2, "ID"); record.put(3, "12345"); props.connection.apiMode.setValue(APIMode.SOAP); props.dieOnError.setValue(false); props.multipleOperation.setValue(true); props.schemaInput.schema.setValue(MarketoConstants.getListOperationSOAPSchema()); props.updateOutputSchemas(); when(client.getApi()).thenReturn("SOAP"); when(sink.getProperties()).thenReturn(props); writer.open("test"); writer.write(record); assertEquals(1, writer.getSuccessfulWrites().size()); record.put(1, "TEST2"); writer.write(record); assertNotNull(writer.close()); assertEquals(1, writer.getSuccessfulWrites().size()); // }
Example 14
Source Project: DataflowTemplates Source File: BigQueryConvertersTest.java License: Apache License 2.0 | 6 votes |
/** * Tests that {@link BigQueryConverters.AvroToEntity} creates an Entity without a valid key when a * field is of type Record. */ @Test public void testAvroToEntityRecordField() throws Exception { // Create test data TableFieldSchema column = generateNestedTableFieldSchema(); List<TableFieldSchema> fields = new ArrayList<>(); fields.add(column); TableSchema bqSchema = new TableSchema().setFields(fields); Record record = generateNestedAvroRecord(); SchemaAndRecord inputBqData = new SchemaAndRecord(record, bqSchema); // Run the test Entity outputEntity = converter.apply(inputBqData); // Assess results String expectedCauseMessage = String.format("Column [address] of type [RECORD] not supported."); assertTrue(!outputEntity.hasKey()); assertEquals( expectedCauseMessage, outputEntity.getPropertiesMap().get("cause").getStringValue()); assertEquals(record.toString(), outputEntity.getPropertiesMap().get("row").getStringValue()); }
Example 15
Source Project: nifi Source File: TestAvroRecordConverter.java License: Apache License 2.0 | 6 votes |
/** * Tests the case where we want to default map one field and explicitly map * another. */ @Test public void testExplicitMapping() throws Exception { // We will convert s1 from string to long (or leave it null), ignore s2, // convert l1 from long to string, and leave l2 the same. Schema input = NESTED_RECORD_SCHEMA; Schema parent = NESTED_PARENT_SCHEMA; Schema output = UNNESTED_OUTPUT_SCHEMA; Map<String, String> mapping = ImmutableMap.of("parent.id", "parentId"); AvroRecordConverter converter = new AvroRecordConverter(input, output, mapping); Record inputRecord = new Record(input); inputRecord.put("l1", 5L); inputRecord.put("s1", "1000"); Record parentRecord = new Record(parent); parentRecord.put("id", 200L); parentRecord.put("name", "parent"); inputRecord.put("parent", parentRecord); Record outputRecord = converter.convert(inputRecord); assertEquals(5L, outputRecord.get("l1")); assertEquals(1000L, outputRecord.get("s1")); assertEquals(200L, outputRecord.get("parentId")); }
Example 16
Source Project: iceberg Source File: TestReadProjection.java License: Apache License 2.0 | 6 votes |
@Test public void testEmptyProjection() throws Exception { Schema schema = new Schema( Types.NestedField.required(0, "id", Types.LongType.get()), Types.NestedField.optional(1, "data", Types.StringType.get()) ); Record record = new Record(AvroSchemaUtil.convert(schema, "table")); record.put("id", 34L); record.put("data", "test"); Record projected = writeAndRead("empty_projection", schema, schema.select(), record); Assert.assertNotNull("Should read a non-null record", projected); try { projected.get(0); Assert.fail("Should not retrieve value with ordinal 0"); } catch (ArrayIndexOutOfBoundsException e) { // this is expected because there are no values } }
Example 17
Source Project: components Source File: MarketoOutputWriterTest.java License: Apache License 2.0 | 6 votes |
@Test public void testWriteCustomObject() throws Exception { props.outputOperation.setValue(OutputOperation.deleteCustomObjects); props.updateSchemaRelated(); when(sink.getProperties()).thenReturn(props); doReturn(getSuccessSyncResult("deleted")).when(client).deleteCustomObjects(any(TMarketoOutputProperties.class), any(List.class)); IndexedRecord record = new Record(MarketoConstants.getRESTOutputSchemaForSyncLead()); record.put(0, 12345); writer.open("test"); writer.write(record); assertNotNull(writer.close()); // props.outputOperation.setValue(OutputOperation.syncCustomObjects); when(sink.getProperties()).thenReturn(props); doReturn(getSuccessSyncResult("updated")).when(client).syncCustomObjects(any(TMarketoOutputProperties.class), any(List.class)); writer.open("test"); writer.write(record); assertNotNull(writer.close()); }
Example 18
Source Project: iceberg Source File: TestReadProjection.java License: Apache License 2.0 | 6 votes |
@Test public void testRename() throws Exception { Schema writeSchema = new Schema( Types.NestedField.required(0, "id", Types.LongType.get()), Types.NestedField.optional(1, "data", Types.StringType.get()) ); Record record = new Record(AvroSchemaUtil.convert(writeSchema, "table")); record.put("id", 34L); record.put("data", "test"); Schema readSchema = new Schema( Types.NestedField.required(0, "id", Types.LongType.get()), Types.NestedField.optional(1, "renamed", Types.StringType.get()) ); Record projected = writeAndRead("project_and_rename", writeSchema, readSchema, record); Assert.assertEquals("Should contain the correct id value", 34L, (long) projected.get("id")); int cmp = Comparators.charSequences() .compare("test", (CharSequence) projected.get("renamed")); Assert.assertTrue("Should contain the correct data/renamed value", cmp == 0); }
Example 19
Source Project: kite Source File: TestLocalDatasetURIs.java License: Apache License 2.0 | 6 votes |
@Test public void testRelative() { DatasetRepository repo = DatasetRepositories.repositoryFor("repo:file:target/data"); repo.delete("ns", "test"); repo.create("ns", "test", descriptor); Dataset<Record> ds = Datasets.<Record, Dataset<Record>> load("dataset:file:target/data/ns/test", Record.class); Assert.assertNotNull("Should load dataset", ds); Assert.assertTrue(ds instanceof FileSystemDataset); Path cwd = localFS.makeQualified(new Path(".")); Assert.assertEquals("Locations should match", new Path(cwd, "target/data/ns/test").toUri(), ds.getDescriptor().getLocation()); Assert.assertEquals("Descriptors should match", repo.load("ns", "test").getDescriptor(), ds.getDescriptor()); Assert.assertEquals("Should report correct namespace", "ns", ds.getNamespace()); Assert.assertEquals("Should report correct name", "test", ds.getName()); repo.delete("ns", "test"); }
Example 20
Source Project: incubator-pinot Source File: DerivedColumnNoTransformationTest.java License: Apache License 2.0 | 6 votes |
private List<GenericRecord> generateTestData() throws Exception { Schema schema = new Schema.Parser().parse(ClassLoader.getSystemResourceAsStream(AVRO_SCHEMA)); List<GenericRecord> inputRecords = new ArrayList<GenericRecord>(); GenericRecord input = new GenericData.Record(schema); input.put("d1", "abc1"); input.put("d2", 501L); input.put("d3", "xyz1"); input.put("hoursSinceEpoch", generateRandomHoursSinceEpoch()); input.put("m1", 10); input.put("m2", 20); inputRecords.add(input); input = new GenericData.Record(schema); input.put("d1", "abc2"); input.put("d2", 502L); input.put("d3", "xyz2"); input.put("hoursSinceEpoch", generateRandomHoursSinceEpoch()); input.put("m1", 10); input.put("m2", 20); inputRecords.add(input); return inputRecords; }
Example 21
Source Project: kite Source File: TestFileSystemDataset.java License: Apache License 2.0 | 6 votes |
@Test public void testWriteAndRead() throws IOException { FileSystemDataset<Record> ds = new FileSystemDataset.Builder<Record>() .namespace("ns") .name("test") .configuration(getConfiguration()) .descriptor(new DatasetDescriptor.Builder() .schemaUri(USER_SCHEMA_URL) .format(format) .compressionType(compressionType) .location(testDirectory) .build()) .type(Record.class) .build(); Assert.assertFalse("Dataset is not partitioned", ds.getDescriptor() .isPartitioned()); writeTestUsers(ds, 10); checkTestUsers(ds, 10); }
Example 22
Source Project: HiveKa Source File: KafkaAvroMessageDecoder.java License: Apache License 2.0 | 6 votes |
public AvroGenericRecordWritable decode(byte[] payload) { try { MessageDecoderHelper helper = new MessageDecoderHelper(registry, topicName, payload).invoke(); DatumReader<Record> reader = new GenericDatumReader<Record>(helper.getTargetSchema()); log.debug("Trying to read kafka payload"); log.debug("buffer: " + helper.getBuffer()); log.debug("start: " + helper.getStart()); log.debug("length: " + helper.getLength()); log.debug("target schema: " + helper.getTargetSchema()); log.debug("schema: " + helper.getSchema()); GenericRecord record = reader.read(null, decoderFactory.binaryDecoder(helper.getBuffer().array(), helper.getStart(), helper.getLength(), null)); log.debug("Read kafka payload as " + record); AvroGenericRecordWritable grw = new AvroGenericRecordWritable(record); grw.setFileSchema(latestSchema); return grw; } catch (IOException e) { throw new MessageDecoderException(e); } }
Example 23
Source Project: components Source File: MarketoInputWriterTest.java License: Apache License 2.0 | 6 votes |
@Before public void setUp() throws Exception { super.setUp(); props = new TMarketoInputProperties("test"); props.connection.setupProperties(); props.setupProperties(); props.leadKeyValues.setValue("email"); props.connection.maxReconnAttemps.setValue(2); props.connection.attemptsIntervalTime.setValue(500); props.updateSchemaRelated(); when(sink.getProperties()).thenReturn(props); wop = new MarketoWriteOperation(sink); writer = new MarketoInputWriter(wop, null); writer.properties = props; assertTrue(writer instanceof MarketoInputWriter); record = new Record(MarketoConstants.getRESTSchemaForGetLeadOrGetMultipleLeads()); record.put(1, "[email protected]"); }
Example 24
Source Project: localization_nifi Source File: TestAvroRecordConverter.java License: Apache License 2.0 | 6 votes |
/** * Tests the case where we try to convert a string to a long incorrectly. */ @Test(expected = org.apache.nifi.processors.kite.AvroRecordConverter.AvroConversionException.class) public void testIllegalConversion() throws Exception { // We will convert s1 from string to long (or leave it null), ignore s2, // convert l1 from long to string, and leave l2 the same. Schema input = SchemaBuilder.record("Input") .namespace("com.cloudera.edh").fields() .nullableString("s1", "").requiredString("s2") .optionalLong("l1").requiredLong("l2").endRecord(); Schema output = SchemaBuilder.record("Output") .namespace("com.cloudera.edh").fields().optionalLong("s1") .optionalString("l1").requiredLong("l2").endRecord(); AvroRecordConverter converter = new AvroRecordConverter(input, output, EMPTY_MAPPING); Record inputRecord = new Record(input); inputRecord.put("s1", "blah"); inputRecord.put("s2", "blah"); inputRecord.put("l1", null); inputRecord.put("l2", 5L); converter.convert(inputRecord); }
Example 25
Source Project: localization_nifi Source File: TestConvertAvroSchema.java License: Apache License 2.0 | 6 votes |
private Record convertBasic(Record inputRecord, Locale locale) { Record result = new Record(OUTPUT_SCHEMA); result.put("id", Long.parseLong(inputRecord.get("id").toString())); result.put("color", inputRecord.get("primaryColor").toString()); if (inputRecord.get("price") == null) { result.put("price", null); } else { final NumberFormat format = NumberFormat.getInstance(locale); double price; try { price = format.parse(inputRecord.get("price").toString()).doubleValue(); } catch (ParseException e) { // Shouldn't happen throw new RuntimeException(e); } result.put("price", price); } return result; }
Example 26
Source Project: nifi Source File: TestAvroTypeUtil.java License: Apache License 2.0 | 6 votes |
/** * The issue consists on having an Avro's schema with a default value in an * array. See * <a href="https://issues.apache.org/jira/browse/NIFI-4893">NIFI-4893</a>. * @throws IOException * schema not found. */ @Test public void testDefaultArrayValuesInRecordsCase2() throws IOException { Schema avroSchema = new Schema.Parser().parse(getClass().getResourceAsStream("defaultArrayInRecords2.json")); GenericRecordBuilder builder = new GenericRecordBuilder(avroSchema); Record field1Record = new GenericRecordBuilder(avroSchema.getField("field1").schema()).build(); builder.set("field1", field1Record); Record r = builder.build(); @SuppressWarnings("unchecked") GenericData.Array<Integer> values = (GenericData.Array<Integer>) ((GenericRecord) r.get("field1")) .get("listOfInt"); assertArrayEquals(new Object[] {1,2,3}, values.toArray()); RecordSchema record = AvroTypeUtil.createSchema(avroSchema); RecordField field = record.getField("field1").get(); assertEquals(RecordFieldType.RECORD, field.getDataType().getFieldType()); RecordDataType data = (RecordDataType) field.getDataType(); RecordSchema childSchema = data.getChildSchema(); RecordField childField = childSchema.getField("listOfInt").get(); assertEquals(RecordFieldType.ARRAY, childField.getDataType().getFieldType()); assertTrue(childField.getDefaultValue() instanceof Object[]); assertArrayEquals(new Object[] {1,2,3}, ((Object[]) childField.getDefaultValue())); }
Example 27
Source Project: components Source File: MarketoLeadClientTest.java License: Apache License 2.0 | 5 votes |
@Test public void testDeleteLeads() throws Exception { IndexedRecord record = new Record(MarketoConstants.getDeleteLeadsSchema()); record.put(0, 12345); // doThrow(new MarketoException("REST", "error")).when(client).executePostRequest(eq(SyncResult.class), any(JsonObject.class)); mktoSR = client.deleteLeads(new Integer[] { 12345 }); mktoSR = client.deleteLeads(Arrays.asList(record)); assertFalse(mktoSR.isSuccess()); assertFalse(mktoSR.getErrorsString().isEmpty()); // doReturn(new SyncResult()).when(client).executePostRequest(eq(SyncResult.class), any(JsonObject.class)); mktoSR = client.deleteLeads(new Integer[] { 12345 }); assertFalse(mktoSR.isSuccess()); // SyncResult sr = new SyncResult(); sr.setSuccess(true); List<SyncStatus> ssr = new ArrayList<>(); SyncStatus ss = new SyncStatus(); ss.setStatus("created"); ss.setMarketoGUID("mkto-123456"); ss.setSeq(0); ss.setId(12345); ss.setErrorMessage(""); ssr.add(ss); sr.setResult(ssr); doReturn(sr).when(client).executePostRequest(eq(SyncResult.class), any(JsonObject.class)); mktoSR = client.deleteLeads(new Integer[] { 12345 }); assertTrue(mktoSR.isSuccess()); assertTrue(mktoSR.getErrorsString().isEmpty()); }
Example 28
Source Project: DataflowTemplates Source File: BigQueryConvertersTest.java License: Apache License 2.0 | 5 votes |
/** Generates an Avro record with a record field type. */ private Record generateNestedAvroRecord() { String avroRecordFieldSchema = new StringBuilder() .append("{") .append(" \"name\" : \"address\",") .append(" \"type\" :") .append(" {") .append(" \"type\" : \"record\",") .append(" \"name\" : \"address\",") .append(" \"namespace\" : \"nothing\",") .append(" \"fields\" : ") .append(" [") .append(" {\"name\" : \"street_number\", \"type\" : \"int\"},") .append(" {\"name\" : \"street_name\", \"type\" : \"string\"}") .append(" ]") .append(" }") .append("}") .toString(); Schema avroSchema = new Schema.Parser().parse(String.format(avroSchemaTemplate, avroRecordFieldSchema)); GenericRecordBuilder addressBuilder = new GenericRecordBuilder(avroSchema.getField("address").schema()); addressBuilder.set("street_number", 12); addressBuilder.set("street_name", "Magnolia street"); GenericRecordBuilder builder = new GenericRecordBuilder(avroSchema); builder.set("address", addressBuilder); return builder.build(); }
Example 29
Source Project: components Source File: MarketoOpportunitiesClientTestIT.java License: Apache License 2.0 | 5 votes |
private MarketoSyncResult createOpportunity(String... externalOIds) throws IOException { oprops.outputOperation.setValue(OutputOperation.syncOpportunities); oprops.afterOutputOperation(); oprops.customObjectSyncAction.setValue(CustomObjectSyncAction.createOrUpdate); List<IndexedRecord> records = new ArrayList<>(); for (String opp : externalOIds) { IndexedRecord r1 = new Record(oprops.schemaInput.schema.getValue()); r1.put(0, opp); records.add(r1); } MarketoSink sink = new MarketoSink(); sink.initialize(null, oprops); MarketoRESTClient client = (MarketoRESTClient) sink.getClientService(null); return client.syncOpportunities(oprops, records); }
Example 30
Source Project: data-highway Source File: ConsumerRecordWriter.java License: Apache License 2.0 | 5 votes |
@Override public void write(ConsumerRecord<Void, Record> record) throws IOException { Schema schema = record.value().getSchema(); writers.computeIfAbsent(schema, this::newRecordWriter).write(record.value()); recordCounter.getAndIncrement(); metrics.consumedBytes(record.serializedValueSize()); metrics.offsetHighwaterMark(record.partition(), record.offset()); if (byteCounter.getAndAdd(ZERO) >= flushBytesThreshold) { flush(); } }