com.streamsets.pipeline.api.StageException Java Examples

The following examples show how to use com.streamsets.pipeline.api.StageException. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: RedisDTargetUpgrader.java    From datacollector with Apache License 2.0 6 votes vote down vote up
@Override
public List<Config> upgrade (
    String library,
    String stageName,
    String stageInstance,
    int fromVersion,
    int toVersion,
    List<Config> configs
) throws StageException {
  switch(fromVersion) {
    case 1:
      upgradeV1ToV2(configs);
      break;
    default:
      throw new IllegalStateException(Utils.format("Unexpected fromVersion {}", fromVersion));
  }
  return configs;
}
 
Example #2
Source File: JmsMessageConsumerImpl.java    From datacollector with Apache License 2.0 6 votes vote down vote up
@Override
public int take(BatchMaker batchMaker, Source.Context context, int batchSize, long messageIndex)
throws StageException {
  long start = System.currentTimeMillis();
  int numMessagesConsumed = 0;
  while (System.currentTimeMillis() - start < basicConfig.maxWaitTime && numMessagesConsumed < batchSize) {
    if (IS_TRACE_ENABLED) {
      LOG.trace("Attempting to take up to '{}' messages", (batchSize - numMessagesConsumed));
    }
    try {
      Message message = messageConsumer.receive(POLL_INTERVAL);
      if (message != null) {
        if (IS_TRACE_ENABLED) {
          LOG.trace("Got message: {}", message);
        }
        String messageId = jmsConfig.destinationName + "::" + messageIndex;
        int consumed = jmsMessageConverter.convert(batchMaker, context, messageId, message);
        messageIndex += consumed;
        numMessagesConsumed += consumed;
      }
    } catch (JMSException ex) {
      throw new StageException(JmsErrors.JMS_07, ex.toString(), ex);
    }
  }
  return numMessagesConsumed;
}
 
Example #3
Source File: HiveMetastoreUtil.java    From datacollector with Apache License 2.0 6 votes vote down vote up
public static Connection getHiveConnection(
    final String jdbcUrl,
    final UserGroupInformation loginUgi,
    final List<ConnectionPropertyBean> driverProperties
) throws StageException {

  Properties resolvedDriverProperties = new Properties();
  for(ConnectionPropertyBean bean : driverProperties) {
    resolvedDriverProperties.setProperty(bean.property, bean.value.get());
  }

  try {
    return loginUgi.doAs((PrivilegedExceptionAction<Connection>) () -> DriverManager.getConnection(jdbcUrl, resolvedDriverProperties));
  } catch (Exception e) {
    LOG.error("Failed to connect to Hive with JDBC URL:" + jdbcUrl, e);
    throw new StageException(Errors.HIVE_22, jdbcUrl, e.getMessage());
  }
}
 
Example #4
Source File: TestSpoolDirSourceUpgrader.java    From datacollector with Apache License 2.0 6 votes vote down vote up
@Test
public void testSpoolDirSourceUpgrader() throws StageException {
  SpoolDirSourceUpgrader spoolDirSourceUpgrader = new SpoolDirSourceUpgrader();

  List<Config> upgrade = spoolDirSourceUpgrader.upgrade("x", "y", "z", 1, 7, new ArrayList<Config>());
  assertEquals(9, upgrade.size());
  assertEquals("conf.dataFormatConfig.compression", upgrade.get(0).getName());
  assertEquals("NONE", upgrade.get(0).getValue());
  assertEquals("conf.dataFormatConfig.csvCustomDelimiter", upgrade.get(1).getName());
  assertEquals('|', upgrade.get(1).getValue());
  assertEquals("conf.dataFormatConfig.csvCustomEscape", upgrade.get(2).getName());
  assertEquals('\\', upgrade.get(2).getValue());
  assertEquals("conf.dataFormatConfig.csvCustomQuote", upgrade.get(3).getName());
  assertEquals('\"', upgrade.get(3).getValue());
  assertEquals("conf.dataFormatConfig.csvRecordType", upgrade.get(4).getName());
  assertEquals("LIST", upgrade.get(4).getValue());
  assertEquals("conf.dataFormatConfig.filePatternInArchive", upgrade.get(5).getName());
  assertEquals("*", upgrade.get(5).getValue());
  assertEquals("conf.dataFormatConfig.csvSkipStartLines", upgrade.get(6).getName());
  assertEquals(0, upgrade.get(6).getValue());
  assertEquals("conf.allowLateDirectory", upgrade.get(7).getName());
  assertEquals(false, upgrade.get(7).getValue());
  assertEquals("conf.useLastModified", upgrade.get(8).getName());
  assertEquals(FileOrdering.LEXICOGRAPHICAL.name(), upgrade.get(8).getValue());
}
 
Example #5
Source File: AvroHiveSchemaGenerator.java    From datacollector with Apache License 2.0 6 votes vote down vote up
/**
 * It takes a record structure in <String, HiveTypeInfo> format.
 * Generate a schema and return in String.
 * @param record : record structure
 * @return String representation of Avro schema.
 * @throws StageException: If record contains unsupported type
 */
@Override
public String inferSchema(Map<String, HiveTypeInfo> record)
    throws StageException
{
  Map<String, Schema> fields = new LinkedHashMap<>();
  for(Map.Entry<String, HiveTypeInfo> pair:  record.entrySet()) {
    if(!HiveMetastoreUtil.validateObjectName(pair.getKey())) {
      throw new HiveStageCheckedException(Errors.HIVE_30, pair.getKey());
    }
    Schema columnSchema = Schema.createUnion(ImmutableList.of(Schema.create(Schema.Type.NULL), traverse(pair)));
    // We always set default value to null
    columnSchema.addProp("default", NullNode.getInstance());
    fields.put(pair.getKey(), columnSchema);
  }
  Schema schema =  buildSchema(fields);
  return schema.toString();
}
 
Example #6
Source File: ScriptingProcessorTestUtil.java    From datacollector with Apache License 2.0 6 votes vote down vote up
public static <C extends Processor> void verifyErrorRecordStopPipeline(
    Class<C> clazz,
    Processor processor
) throws StageException {
  ProcessorRunner runner = new ProcessorRunner.Builder(clazz, processor)
    .setOnRecordError(OnRecordError.STOP_PIPELINE)
    .addOutputLane("lane")
    .build();

  Record record = RecordCreator.create();
  record.set(Field.create("Not Important"));

  runner.runInit();
  try {
    runner.runProcess(Collections.singletonList(record));
    Assert.fail("Expected exception");
  } catch(Exception e) {
    Assert.assertTrue(e.toString(), e.toString().contains("Script sent record to error"));
  } finally {
    runner.runDestroy();
  }
}
 
Example #7
Source File: HttpTarget.java    From datacollector with Apache License 2.0 6 votes vote down vote up
public List<SDCMetricsJson> getRecordsToWrite() throws StageException {
  List<SDCMetricsJson> sdcMetricsJsonList = new ArrayList<>();
  Record tempRecord = null;
  try {
    for (Record currentRecord : sdcIdToRecordMap.values()) {
      tempRecord = currentRecord;
      SDCMetricsJson sdcMetricsJson = createSdcMetricJson(currentRecord);
      sdcMetricsJsonList.add(sdcMetricsJson);
    }
  } catch (IOException e) {
    errorRecordHandler.onError(
        new OnRecordErrorException(
            tempRecord,
            Errors.HTTP_01,
            tempRecord.getHeader().getSourceId(),
            e.toString(),
            e
        )
    );
  }
  return sdcMetricsJsonList;
}
 
Example #8
Source File: TestForceSourceUpgrader.java    From datacollector with Apache License 2.0 6 votes vote down vote up
@Test
public void testUpgradeV1toV2() throws StageException {
  StageUpgrader.Context context = Mockito.mock(StageUpgrader.Context.class);
  Mockito.doReturn(1).when(context).getFromVersion();
  Mockito.doReturn(2).when(context).getToVersion();

  List<Config> configs = new ArrayList<>();

  ForceSourceUpgrader forceSourceUpgrader = new ForceSourceUpgrader();
  forceSourceUpgrader.upgrade(configs, context);

  Assert.assertEquals(1, configs.size());
  Config config = configs.get(0);
  Assert.assertEquals("forceConfig.subscriptionType", config.getName());
  Assert.assertEquals(SubscriptionType.PUSH_TOPIC, config.getValue());
}
 
Example #9
Source File: HttpProcessor.java    From datacollector with Apache License 2.0 6 votes vote down vote up
/**
 * Populates HTTP response headers to the configured location
 *
 * @param record current record to populate
 * @param response HTTP response
 * @throws StageException when writing headers to a field path that already exists
 */
private Field createResponseHeaders(Record record, Response response) throws StageException {
  if (conf.headerOutputLocation == HeaderOutputLocation.NONE) {
    return null;
  }

  Record.Header header = record.getHeader();
  header.setAttribute(REQUEST_STATUS_CONFIG_NAME,String.format("%d",response.getStatus()));

  if (conf.headerOutputLocation == HeaderOutputLocation.FIELD) {
    return createResponseHeaderField(record, response);
  } else if (conf.headerOutputLocation == HeaderOutputLocation.HEADER) {
    createResponseHeaderToRecordHeader(response, header);
    return null;
  }
  return null;
}
 
Example #10
Source File: HttpProcessor.java    From datacollector with Apache License 2.0 6 votes vote down vote up
/**
 * Creates the HTTP response headers to the SDC Record at the configured field path.
 *
 * @param record Record to populate with response headers.
 * @param response HTTP response
 * @throws StageException if the field path already exists
 */
private Field createResponseHeaderField(Record record, Response response) throws StageException {
  if (record.has(conf.headerOutputField) || conf.headerOutputLocation.equals(conf.outputField)) {
    throw new StageException(Errors.HTTP_11, getResponseStatus(response), conf.headerOutputField);
  }
  Map<String, Field> headers = new HashMap<>(response.getStringHeaders().size());

  for (Map.Entry<String, List<String>> entry : response.getStringHeaders().entrySet()) {
    if (!entry.getValue().isEmpty()) {
      String firstValue = entry.getValue().get(0);
      headers.put(entry.getKey(), Field.create(firstValue));
    }
  }

  return Field.create(headers);
}
 
Example #11
Source File: TestDatabricksJobExecutor.java    From datacollector with Apache License 2.0 6 votes vote down vote up
@Test
public void testRunError() throws Exception {
  Map<Object, Object> mapWithJob =
      ImmutableMap.of("settings", ImmutableMap.of("spark_jar_task", 1000));
  doReturn(mapWithJob).when(listResponse).readEntity(Map.class);
  doReturn(200).when(listResponse).getStatus();

  doReturn(500).when(runResponse).getStatus();
  executor = new FakeDatabricksJobExecutor(configBean);
  ExecutorRunner runner = new ExecutorRunner.Builder(DatabricksJobLauncherDExecutor.class, executor)
      .setOnRecordError(OnRecordError.TO_ERROR)
      .build();
  runner.runInit();
  try {
    runner.runWrite(ImmutableList.of(RecordCreator.create()));
    Assert.fail();
  } catch (StageException ex) {
    Assert.assertEquals(Errors.DATABRICKS_06, ex.getErrorCode());
  }
  runner.runDestroy();
}
 
Example #12
Source File: TestWaveAnalyticsUpgrader.java    From datacollector with Apache License 2.0 6 votes vote down vote up
@Ignore
public void testUpgradeV1toV2AppendTimestampTrue() throws StageException {
  List<Config> configs = new ArrayList<>();
  StageUpgrader.Context context = Mockito.mock(StageUpgrader.Context.class);
  Mockito.doReturn(1).when(context).getFromVersion();
  Mockito.doReturn(2).when(context).getToVersion();

  configs.add(new Config(WAVE_ANALYTICS_APPEND_TIMESTAMP, true));

  WaveAnalyticsUpgrader waveAnalyticsUpgrader = new WaveAnalyticsUpgrader();
  waveAnalyticsUpgrader.upgrade(configs, context);

  Assert.assertEquals(1, configs.size());
  Config config = configs.get(0);
  Assert.assertEquals(WAVE_ANALYTICS_APPEND_TIMESTAMP, config.getName());
  Assert.assertEquals(true, config.getValue());
}
 
Example #13
Source File: StartJobSource.java    From datacollector with Apache License 2.0 6 votes vote down vote up
@Override
public String produce(String s, int i, BatchMaker batchMaker) throws StageException {
  Executor executor = Executors.newCachedThreadPool();
  List<CompletableFuture<Field>> startJobFutures = startJobCommon.getStartJobFutures(executor, null);
  try {
    LinkedHashMap<String, Field> outputField = startJobCommon.startJobInParallel(startJobFutures);
    Record outputRecord = CommonUtil.createOrchestratorTaskRecord(
        null,
        getContext(),
        conf.taskName,
        outputField
    );
    batchMaker.addRecord(outputRecord);
  } catch (Exception ex) {
    LOG.error(ex.toString(), ex);
    errorRecordHandler.onError(StartJobErrors.START_JOB_08, ex.toString(), ex);
  }
  return null;
}
 
Example #14
Source File: ElasticsearchSource.java    From datacollector with Apache License 2.0 6 votes vote down vote up
private JsonObject getResults(String scrollId) throws StageException {
  HttpEntity entity = new StringEntity(
      String.format("{\"scroll\":\"%s\",\"scroll_id\":\"%s\"}", conf.cursorTimeout, scrollId),
      ContentType.APPLICATION_JSON
  );

  try {
    Response response = delegate.performRequest("POST",
        "/_search/scroll",
        conf.params,
        entity,
        delegate.getAuthenticationHeader(conf.securityConfig.securityUser.get())
    );

    return parseEntity(response.getEntity());
  } catch (IOException e) {
    LOG.debug("Expired scroll_id: '{}'", scrollId);
    LOG.error(Errors.ELASTICSEARCH_23.getMessage(), e);
    throw new StageException(Errors.ELASTICSEARCH_23);
  }
}
 
Example #15
Source File: TestCouchbaseTarget.java    From datacollector with Apache License 2.0 6 votes vote down vote up
@Test
public void shouldUseDefaultOperationForUnsupportedCdcOperation() throws StageException {
  CouchbaseTargetConfig config = getDefaultConfig();
  config.defaultWriteOperation = WriteOperationType.REPLACE;
  config.unsupportedOperation = UnsupportedOperationType.DEFAULT;

  TargetRunner runner = getMockedTargetRunner(config, ReplaceResponse.class);
  if(runner == null) return;

  runner.runInit();
  runner.runWrite(getTestRecord("999"));
  runner.runDestroy();

  assertTrue(runner.getErrorRecords().isEmpty());
  assertTrue(runner.getErrors().isEmpty());
}
 
Example #16
Source File: HttpClientSource.java    From datacollector with Apache License 2.0 6 votes vote down vote up
/**
 * Used only for HEAD requests.  Sets up a record for output based on headers only
 * with an empty body.
 *
 * @param batchMaker batch to add records to.
 * @return the next source offset to commit
 * @throws StageException if an unhandled error is encountered
 */

String parseHeadersOnly(BatchMaker batchMaker) throws StageException {
  HttpSourceOffset sourceOffset = new HttpSourceOffset(
          getResolvedUrl(),
          currentParameterHash,
          System.currentTimeMillis(),
          getCurrentPage()
  );

  Record record = getContext().createRecord(sourceOffset + "::0");
  addResponseHeaders(record.getHeader());
  record.set(Field.create(new HashMap()));

  batchMaker.addRecord(record);
  recordCount++;
  incrementSourceOffset(sourceOffset, 1);
  lastRequestCompletedTime = System.currentTimeMillis();
  return sourceOffset.toString();

}
 
Example #17
Source File: ProtobufDataParserFactory.java    From datacollector with Apache License 2.0 6 votes vote down vote up
public ProtobufDataParserFactory(Settings settings) throws StageException {
  super(settings);
  this.protoDescriptorFile = settings.getConfig(ProtobufConstants.PROTO_DESCRIPTOR_FILE_KEY);
  this.messageType = settings.getConfig(ProtobufConstants.MESSAGE_TYPE_KEY);
  this.isDelimited = settings.getConfig(ProtobufConstants.DELIMITED_KEY);
  messageTypeToExtensionMap = new HashMap<>();
  defaultValueMap = new HashMap<>();
  // Get the descriptor for the expected message type
  descriptor = ProtobufTypeUtil.getDescriptor(
    settings.getContext(),
    protoDescriptorFile,
    messageType,
    messageTypeToExtensionMap,
    defaultValueMap
  );

  // Build the extension registry based on the cached extension map
  extensionRegistry = ExtensionRegistry.newInstance();
  for(Map.Entry<String, Set<Descriptors.FieldDescriptor>> e : messageTypeToExtensionMap.entrySet()) {
    Set<Descriptors.FieldDescriptor> value = e.getValue();
    for (Descriptors.FieldDescriptor f : value) {
      extensionRegistry.add(f);
    }
  }
}
 
Example #18
Source File: ActiveRecordWriters.java    From datacollector with Apache License 2.0 6 votes vote down vote up
public synchronized void closeAll() throws StageException{
  if (IS_TRACE_ENABLED) {
    LOG.trace("Close all '{}'", toString());
  }
  if(writers != null) {
    for (RecordWriter writer : writers.values()) {
      writer.closeLock();
      try {
        if (!writer.isClosed()) {
          manager.commitWriter(writer);
        }
      } catch (IOException ex) {
        String msg = Utils.format("Error closing writer {} : {}", writer, ex);
        LOG.warn(msg, ex);
      } finally {
        writer.closeUnlock();
      }
    }
  }
  writers = null;
  cutOffQueue = null;
}
 
Example #19
Source File: HdfsTarget.java    From datacollector with Apache License 2.0 6 votes vote down vote up
protected void emptyBatch() throws StageException {
  setBatchTime();
  try {
    hdfsTargetConfigBean.getUGI().doAs(new PrivilegedExceptionAction<Void>() {
      @Override
      public Void run() throws Exception {
        hdfsTargetConfigBean.getCurrentWriters().purge();
        if (hdfsTargetConfigBean.getLateWriters() != null) {
          hdfsTargetConfigBean.getLateWriters().purge();
        }
        return null;
      }
    });
  } catch (Exception ex) {
    throw throwStageException(ex);
  }
}
 
Example #20
Source File: SourcePipe.java    From datacollector with Apache License 2.0 6 votes vote down vote up
protected Map<String, Object> finishBatchAndCalculateMetrics(
  long startTimeInStage,
  PipeBatch pipeBatch,
  BatchMakerImpl batchMaker,
  BatchImpl batchImpl,
  ErrorSink errorSink,
  EventSink eventSink,
  String newOffset
) throws StageException {
  statsCollector.incrementRecordCount(batchMaker.getSize());

  return super.finishBatchAndCalculateMetrics(
    startTimeInStage,
    pipeBatch,
    batchMaker,
    batchImpl,
    errorSink,
    eventSink,
    newOffset
  );
}
 
Example #21
Source File: TestCouchbaseTarget.java    From datacollector with Apache License 2.0 5 votes vote down vote up
@Test
public void shouldDeleteSubdocRecord() throws StageException {
  CouchbaseTargetConfig config = getDefaultConfig();
  config.subdocPathEL = "myPath";
  config.subdocOperationEL = "DELETE";

  TargetRunner runner = getMockedTargetRunner(config);

  runner.runInit();
  runner.runWrite(getTestRecord());
  runner.runDestroy();

  assertTrue(runner.getErrorRecords().isEmpty());
  assertTrue(runner.getErrors().isEmpty());
}
 
Example #22
Source File: TestMapReduceExecutorUpgrader.java    From datacollector with Apache License 2.0 5 votes vote down vote up
@Test
public void testUpgradeV1ToV2() throws StageException {
  final MapReduceExecutorUpgrader upgrader = new MapReduceExecutorUpgrader();

  final List<Config> configs = new LinkedList<>();
  configs.add(new Config("jobConfig.avroParquetConfig.dictionaryPageSize", 50));
  configs.add(new Config("jobConfig.avroParquetConfig.outputDirectory", "/output"));
  configs.add(new Config("jobConfig.avroParquetConfig.inputFile", "/input/something.avro"));
  configs.add(new Config("jobConfig.avroParquetConfig.keepInputFile", false));
  configs.add(new Config("jobConfig.avroParquetConfig.overwriteTmpFile", true));

  UpgraderTestUtils.UpgradeMoveWatcher watcher = UpgraderTestUtils.snapshot(configs);

  final TestUpgraderContext context = new TestUpgraderContext("l", "s", "i", 1, 2);

  upgrader.upgrade(configs, context);

  watcher.assertAllMoved(
      configs,
      "jobConfig.avroParquetConfig.outputDirectory",
      "jobConfig.avroConversionCommonConfig.outputDirectory",
      "jobConfig.avroParquetConfig.inputFile",
      "jobConfig.avroConversionCommonConfig.inputFile",
      "jobConfig.avroParquetConfig.keepInputFile",
      "jobConfig.avroConversionCommonConfig.keepInputFile",
      "jobConfig.avroParquetConfig.overwriteTmpFile",
      "jobConfig.avroConversionCommonConfig.overwriteTmpFile"
  );

}
 
Example #23
Source File: ReferentialTblOrderProvider.java    From datacollector with Apache License 2.0 5 votes vote down vote up
@Override
public Queue<String> calculateOrder() throws SQLException, ExecutionException, StageException {
  if (!areAllEdgesConstructed) {
    orderedTables = new LinkedList<>();
    for (String qualifiedTableName : directedGraph.vertices()) {
      Set<String> referredTableSetForThisContext = referredTables.get(qualifiedTableName);
      TableContext tableContext = getTableContext(qualifiedTableName);
      for (String referredTable : referredTableSetForThisContext) {
        TableContext referredTableContext = getTableContext(tableContext.getSchema(), referredTable);
        //Checking whether the referred table is used by the origin or has the table has a reference to itself.
        if (referredTableContext != null
            && !referredTableContext.getQualifiedName().equals(tableContext.getQualifiedName())) {
          //This edge states referred table should be ingested first
          directedGraph.addDirectedEdge(
              referredTableContext.getQualifiedName(),
              qualifiedTableName
          );
        }
      }
    }

    areAllEdgesConstructed = true;
    try {
      Iterator<String> topologicalOrderIterator =
          new TopologicalSorter<>(directedGraph, String::compareTo).sort().iterator();
      topologicalOrderIterator.forEachRemaining(table -> orderedTables.add(table));
    } catch(IllegalStateException e) {
      throw new StageException(JdbcErrors.JDBC_68, e.getMessage());
    }
  }

  //Return the saved topological order.
  return new LinkedList<>(orderedTables);
}
 
Example #24
Source File: FieldValueReplacerUpgrader.java    From datacollector with Apache License 2.0 5 votes vote down vote up
@Override
public List<Config> upgrade(String library, String stageName, String stageInstance, int fromVersion, int toVersion, List<Config> configs) throws StageException {
  switch (fromVersion) {
    case 1:
      upgradeV1ToV2(configs);
      if (toVersion == 2) {
        break;
      }
      // fall through
    case 2:
      upgradeV2ToV3(configs);
  }
  return configs;
}
 
Example #25
Source File: TestXMLFlatteningProcessorUpgrader.java    From datacollector with Apache License 2.0 5 votes vote down vote up
@Test
public void testV1ToV2() throws StageException {
  XMLFlatteningProcessorUpgrader upgrader = new XMLFlatteningProcessorUpgrader();
  List<Config> upgraded = upgrader.upgrade(null, "xml", "xmll", 1, 2, new LinkedList<Config>());
  Assert.assertEquals(2, upgraded.size());
  Assert.assertEquals("keepOriginalFields", upgraded.get(0).getName());
  Assert.assertEquals(true, upgraded.get(0).getValue());
  Assert.assertEquals("newFieldOverwrites", upgraded.get(1).getName());
  Assert.assertEquals(false, upgraded.get(1).getValue());
}
 
Example #26
Source File: TestSQLServerCDCSourceUpgrader.java    From datacollector with Apache License 2.0 5 votes vote down vote up
@Test
public void testUpgradeV3toV4() throws StageException {
  List<Config> configs = new ArrayList<>();
  List<Map<String, String>> oldTableConfigs = new ArrayList<>();

  final String exclusion1 = "exception-%";
  final String initalOffset1 = "0000";
  final String captureInstance = "dbo_table1_CT";

  Map<String, String> tableConfig1 = ImmutableMap.of(
      TABLE_CAPTURE_INSTANCE_CONFIG, captureInstance,
      TABLE_EXCLUSION_CONFIG, exclusion1,
      TABLE_INITIALOFFSET_CONFIG, initalOffset1
  );

  oldTableConfigs.add(tableConfig1);

  // table config changes from V2
  configs.add(new Config(TABLECONFIG, oldTableConfigs));

  Assert.assertEquals(1, configs.size());

  SQLServerCDCSourceUpgrader sqlServerCDCSourceUpgrader = new SQLServerCDCSourceUpgrader();
  sqlServerCDCSourceUpgrader.upgrade("a", "b", "c", 3, 4, configs);

  Assert.assertEquals(1, configs.size());

  Config tableConfigObj = UpgraderUtils.getConfigWithName(configs, SQLServerCDCSourceUpgrader.TABLECONFIG);
  ArrayList<HashMap<String, String>> tableConfigs = (ArrayList<HashMap<String, String>>) tableConfigObj.getValue();
  Assert.assertEquals(1, tableConfigs.size());

  HashMap<String, String> tableConfig = tableConfigs.get(0);

  Assert.assertEquals("dbo_table1", tableConfig.get(TABLE_CAPTURE_INSTANCE_CONFIG));
  Assert.assertEquals(exclusion1, tableConfig.get(TABLE_EXCLUSION_CONFIG));
  Assert.assertEquals(initalOffset1, tableConfig.get(TABLE_INITIALOFFSET_CONFIG));
}
 
Example #27
Source File: DataLakeGen2BaseConfig.java    From datacollector with Apache License 2.0 5 votes vote down vote up
private String resolveCredentialValue(final Stage.Context context, CredentialValue credentialValue, String configName, List<Stage.ConfigIssue> issues) {
  try {
    return credentialValue.get();
  } catch (StageException e) {
    LOG.error(Errors.ADLS_15.getMessage(), e.toString(), e);
    issues.add(context.createConfigIssue(
        DataLakeGen2TargetGroups.DATALAKE.name(),
        configName,
        Errors.ADLS_15,
        e.toString()
    ));
  }
  return null;
}
 
Example #28
Source File: TestFieldHasherProcessor.java    From datacollector with Apache License 2.0 5 votes vote down vote up
@Test
public void testByteArrayField() throws StageException {
  HasherConfig hasherConfig = createInPlaceHasherProcessor(ImmutableList.of("/byteArray"), hashType);

  FieldHasherProcessor processor = new FieldHasherProcessor(hasherConfig, OnStagePreConditionFailure.CONTINUE);

  ProcessorRunner runner = new ProcessorRunner.Builder(FieldHasherDProcessor.class, processor)
      .addOutputLane("a").build();
  runner.runInit();

  try {
    Map<String, Field> map = new LinkedHashMap<>();
    map.put("byteArray", Field.create(Field.Type.BYTE_ARRAY, "streamsets".getBytes()));
    Record record = RecordCreator.create("s", "s:1");
    record.set(Field.create(map));

    StageRunner.Output output = runner.runProcess(ImmutableList.of(record));
    Assert.assertEquals(1, output.getRecords().get("a").size());
    Field field = output.getRecords().get("a").get(0).get();
    Assert.assertTrue(field.getValue() instanceof Map);
    Map<String, Field> result = field.getValueAsMap();
    Assert.assertTrue(result.size() == 1);
    Assert.assertTrue(result.containsKey("byteArray"));
    Assert.assertEquals(
        computeHash(Field.Type.BYTE_ARRAY, "streamsets".getBytes(), hashType),
        result.get("byteArray").getValue());
  } finally {
    runner.runDestroy();
  }
}
 
Example #29
Source File: TestLogParserServiceImpl.java    From datacollector with Apache License 2.0 5 votes vote down vote up
@Test
public void testLog4jFormat() throws StageException, IOException {

  LogParserServiceImpl logParserServiceImpl = getDataParserService();
  logParserServiceImpl.logParserServiceConfig.logMode = LogMode.LOG4J;

  logParserService.init();

  Map<String, Field> map = new LinkedHashMap<>();
  map.put("text", Field.create(LOG_4J_FORMAT_LINE));
  Record record = RecordCreator.create("s", "s:1");
  record.set(Field.create(map));

  Record parsedRecord = logParserServiceImpl.getLogParser(
      record.getHeader().getSourceId(),
      record.get("/text").getValueAsString()
  ).parse();

  Field parsed = parsedRecord.get();
  parsedRecord.set("/", parsed);

  Assert.assertFalse(parsedRecord.has("/truncated"));

  Assert.assertTrue(parsedRecord.has("/" + Constants.TIMESTAMP));
  Assert.assertEquals("2015-03-20 15:53:31,161", parsedRecord.get("/" + Constants.TIMESTAMP).getValueAsString());

  Assert.assertTrue(parsedRecord.has("/" + Constants.SEVERITY));
  Assert.assertEquals("DEBUG", parsedRecord.get("/" + Constants.SEVERITY).getValueAsString());

  Assert.assertTrue(parsedRecord.has("/" + Constants.CATEGORY));
  Assert.assertEquals("PipelineConfigurationValidator", parsedRecord.get("/" + Constants.CATEGORY).getValueAsString());

  Assert.assertTrue(parsedRecord.has("/" + Constants.MESSAGE));
  Assert.assertEquals("Pipeline 'test:preview' validation. valid=true, canPreview=true, issuesCount=0",
      parsedRecord.get("/" + Constants.MESSAGE).getValueAsString());

}
 
Example #30
Source File: UDPSourceUpgrader.java    From datacollector with Apache License 2.0 5 votes vote down vote up
@Override
public List<Config> upgrade(
    String library,
    String stageName,
    String stageInstance,
    int fromVersion,
    int toVersion,
    List<Config> configs
) throws StageException {
  switch (fromVersion) {
    case 1:
      upgradeV1ToV2(configs);
      if (toVersion == 2) {
        break;
      }
      // fall through
    case 2:
      upgradeV2ToV3(configs);
      if (toVersion == 3) {
        break;
      }
      // fall through
    case 3:
      upgradeV3ToV4(configs);
      break;
    default:
      throw new IllegalStateException(Utils.format("Unexpected fromVersion {}", fromVersion));
  }
  return configs;
}