Java Code Examples for org.pentaho.di.trans.TransMeta#setUsingThreadPriorityManagment()

The following examples show how to use org.pentaho.di.trans.TransMeta#setUsingThreadPriorityManagment() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: MRUtil.java    From pentaho-hadoop-shims with Apache License 2.0 5 votes vote down vote up
public static Trans getTrans( final Configuration conf, final String transXml, boolean singleThreaded )
  throws KettleException {
  initKettleEnvironment( conf );

  TransConfiguration transConfiguration = TransConfiguration.fromXML( transXml );
  TransMeta transMeta = transConfiguration.getTransMeta();
  String carteObjectId = UUID.randomUUID().toString();
  SimpleLoggingObject servletLoggingObject =
    new SimpleLoggingObject( "HADOOP_MAPPER", LoggingObjectType.CARTE, null ); //$NON-NLS-1$
  servletLoggingObject.setContainerObjectId( carteObjectId );
  TransExecutionConfiguration executionConfiguration = transConfiguration.getTransExecutionConfiguration();
  servletLoggingObject.setLogLevel( executionConfiguration.getLogLevel() );

  if ( singleThreaded ) {
    // Set the type to single threaded in case the user forgot...
    //
    transMeta.setTransformationType( TransformationType.SingleThreaded );

    // Disable thread priority management as it will slow things down needlessly.
    // The single threaded engine doesn't use threads and doesn't need row locking.
    //
    transMeta.setUsingThreadPriorityManagment( false );
  } else {
    transMeta.setTransformationType( TransformationType.Normal );
  }

  return new Trans( transMeta, servletLoggingObject );
}
 
Example 2
Source File: CarteIT.java    From pentaho-kettle with Apache License 2.0 5 votes vote down vote up
public static Trans generateTestTransformation() {
  RowGeneratorMeta A = new RowGeneratorMeta();
  A.allocate( 3 );
  A.setRowLimit( "10000000" );

  A.getFieldName()[0] = "ID";
  A.getFieldType()[0] = ValueMetaFactory.getValueMetaName( ValueMetaInterface.TYPE_INTEGER );
  A.getFieldLength()[0] = 7;
  A.getValue()[0] = "1234";

  A.getFieldName()[1] = "Name";
  A.getFieldType()[1] = ValueMetaFactory.getValueMetaName( ValueMetaInterface.TYPE_STRING );
  A.getFieldLength()[1] = 35;
  A.getValue()[1] = "Some name";

  A.getFieldName()[2] = "Last updated";
  A.getFieldType()[2] = ValueMetaFactory.getValueMetaName( ValueMetaInterface.TYPE_DATE );
  A.getFieldFormat()[2] = "yyyy/MM/dd";
  A.getValue()[2] = "2010/02/09";

  TransMeta transMeta = TransPreviewFactory.generatePreviewTransformation( null, A, "A" );
  transMeta.setName( "CarteUnitTest" );
  transMeta.setSizeRowset( 2500 );
  transMeta.setFeedbackSize( 50000 );
  transMeta.setUsingThreadPriorityManagment( false );

  return new Trans( transMeta );
}
 
Example 3
Source File: TransSplitter.java    From pentaho-kettle with Apache License 2.0 4 votes vote down vote up
private TransMeta getOriginalCopy( boolean isSlaveTrans, ClusterSchema clusterSchema,
    SlaveServer slaveServer ) throws KettleException {
  TransMeta transMeta = new TransMeta();
  transMeta.setSlaveTransformation( true );

  if ( isSlaveTrans ) {
    transMeta.setName( getSlaveTransName( originalTransformation.getName(), clusterSchema, slaveServer ) );

    NotePadMeta slaveNote =
      new NotePadMeta( "This is a generated slave transformation.\nIt will be run on slave server: "
        + slaveServer, 0, 0, -1, -1 );
    transMeta.addNote( slaveNote );

    // add the slave partitioning schema's here.
    for ( int i = 0; i < referenceSteps.length; i++ ) {
      StepMeta stepMeta = referenceSteps[i];
      verifySlavePartitioningConfiguration( transMeta, stepMeta, clusterSchema, slaveServer );
    }
  } else {
    transMeta.setName( originalTransformation.getName() + " (master)" );

    NotePadMeta masterNote =
      new NotePadMeta( "This is a generated master transformation.\nIt will be run on server: "
        + getMasterServer(), 0, 0, -1, -1 );
    transMeta.addNote( masterNote );
  }

  // Copy the cluster schemas
  //
  for ( ClusterSchema schema : originalTransformation.getClusterSchemas() ) {
    transMeta.getClusterSchemas().add( schema.clone() );
  }

  transMeta.setDatabases( originalTransformation.getDatabases() );

  // Feedback
  transMeta.setFeedbackShown( originalTransformation.isFeedbackShown() );
  transMeta.setFeedbackSize( originalTransformation.getFeedbackSize() );

  // Priority management
  transMeta.setUsingThreadPriorityManagment( originalTransformation.isUsingThreadPriorityManagment() );

  // Unique connections
  transMeta.setUsingUniqueConnections( originalTransformation.isUsingUniqueConnections() );

  // Repository
  transMeta.setRepository( originalTransformation.getRepository() );
  transMeta.setRepositoryDirectory( originalTransformation.getRepositoryDirectory() );

  // Also set the logging details...
  transMeta.setTransLogTable( (TransLogTable) originalTransformation.getTransLogTable().clone() );

  // Rowset size
  transMeta.setSizeRowset( originalTransformation.getSizeRowset() );

  return transMeta;
}
 
Example 4
Source File: RepositoryTestBase.java    From pentaho-kettle with Apache License 2.0 4 votes vote down vote up
protected TransMeta createTransMeta( final String dbName ) throws Exception {
  RepositoryDirectoryInterface rootDir = loadStartDirectory();
  TransMeta transMeta = new TransMeta();
  transMeta.setName( EXP_TRANS_NAME.concat( dbName ) );
  transMeta.setDescription( EXP_TRANS_DESC );
  transMeta.setExtendedDescription( EXP_TRANS_EXTENDED_DESC );
  transMeta.setRepositoryDirectory( rootDir.findDirectory( DIR_TRANSFORMATIONS ) );
  transMeta.setTransversion( EXP_TRANS_VERSION );
  transMeta.setTransstatus( EXP_TRANS_STATUS );
  transMeta.setCreatedUser( EXP_TRANS_CREATED_USER );
  transMeta.setCreatedDate( EXP_TRANS_CREATED_DATE );
  transMeta.setModifiedUser( EXP_TRANS_MOD_USER );
  transMeta.setModifiedDate( EXP_TRANS_MOD_DATE );
  transMeta.addParameterDefinition( EXP_TRANS_PARAM_1_NAME, EXP_TRANS_PARAM_1_DEF, EXP_TRANS_PARAM_1_DESC );

  // TODO mlowery other transLogTable fields could be set for testing here
  TransLogTable transLogTable = TransLogTable.getDefault( transMeta, transMeta, new ArrayList<StepMeta>( 0 ) );
  transLogTable.setConnectionName( EXP_TRANS_LOG_TABLE_CONN_NAME );
  transLogTable.setLogInterval( EXP_TRANS_LOG_TABLE_INTERVAL );
  transLogTable.setSchemaName( EXP_TRANS_LOG_TABLE_SCHEMA_NAME );
  transLogTable.setLogSizeLimit( EXP_TRANS_LOG_TABLE_SIZE_LIMIT );
  transLogTable.setTableName( EXP_TRANS_LOG_TABLE_TABLE_NAME );
  transLogTable.setTimeoutInDays( EXP_TRANS_LOG_TABLE_TIMEOUT_IN_DAYS );
  transMeta.setTransLogTable( transLogTable );
  // TODO mlowery other perfLogTable fields could be set for testing here
  PerformanceLogTable perfLogTable = PerformanceLogTable.getDefault( transMeta, transMeta );
  perfLogTable.setConnectionName( EXP_TRANS_LOG_TABLE_CONN_NAME );
  perfLogTable.setLogInterval( EXP_TRANS_LOG_TABLE_INTERVAL );
  perfLogTable.setSchemaName( EXP_TRANS_LOG_TABLE_SCHEMA_NAME );
  perfLogTable.setTableName( EXP_TRANS_LOG_TABLE_TABLE_NAME );
  perfLogTable.setTimeoutInDays( EXP_TRANS_LOG_TABLE_TIMEOUT_IN_DAYS );
  transMeta.setPerformanceLogTable( perfLogTable );
  // TODO mlowery other channelLogTable fields could be set for testing here
  ChannelLogTable channelLogTable = ChannelLogTable.getDefault( transMeta, transMeta );
  channelLogTable.setConnectionName( EXP_TRANS_LOG_TABLE_CONN_NAME );
  channelLogTable.setSchemaName( EXP_TRANS_LOG_TABLE_SCHEMA_NAME );
  channelLogTable.setTableName( EXP_TRANS_LOG_TABLE_TABLE_NAME );
  channelLogTable.setTimeoutInDays( EXP_TRANS_LOG_TABLE_TIMEOUT_IN_DAYS );
  transMeta.setChannelLogTable( channelLogTable );
  // TODO mlowery other stepLogTable fields could be set for testing here
  StepLogTable stepLogTable = StepLogTable.getDefault( transMeta, transMeta );
  stepLogTable.setConnectionName( EXP_TRANS_LOG_TABLE_CONN_NAME );
  stepLogTable.setSchemaName( EXP_TRANS_LOG_TABLE_SCHEMA_NAME );
  stepLogTable.setTableName( EXP_TRANS_LOG_TABLE_TABLE_NAME );
  stepLogTable.setTimeoutInDays( EXP_TRANS_LOG_TABLE_TIMEOUT_IN_DAYS );
  transMeta.setStepLogTable( stepLogTable );
  DatabaseMeta dbMeta = createDatabaseMeta( dbName );
  // dbMeta must be saved so that it gets an ID
  repository.save( dbMeta, VERSION_COMMENT_V1, null );
  deleteStack.push( dbMeta );
  transMeta.setMaxDateConnection( dbMeta );
  transMeta.setMaxDateTable( EXP_TRANS_MAX_DATE_TABLE );
  transMeta.setMaxDateField( EXP_TRANS_MAX_DATE_FIELD );
  transMeta.setMaxDateOffset( EXP_TRANS_MAX_DATE_OFFSET );
  transMeta.setMaxDateDifference( EXP_TRANS_MAX_DATE_DIFF );
  transMeta.setSizeRowset( EXP_TRANS_SIZE_ROWSET );
  transMeta.setSleepTimeEmpty( EXP_TRANS_SLEEP_TIME_EMPTY );
  transMeta.setSleepTimeFull( EXP_TRANS_SLEEP_TIME_FULL );
  transMeta.setUsingUniqueConnections( EXP_TRANS_USING_UNIQUE_CONN );
  transMeta.setFeedbackShown( EXP_TRANS_FEEDBACK_SHOWN );
  transMeta.setFeedbackSize( EXP_TRANS_FEEDBACK_SIZE );
  transMeta.setUsingThreadPriorityManagment( EXP_TRANS_USING_THREAD_PRIORITY_MGMT );
  transMeta.setSharedObjectsFile( EXP_TRANS_SHARED_OBJECTS_FILE );
  transMeta.setCapturingStepPerformanceSnapShots( EXP_TRANS_CAPTURE_STEP_PERF_SNAPSHOTS );
  transMeta.setStepPerformanceCapturingDelay( EXP_TRANS_STEP_PERF_CAP_DELAY );
  transMeta.addDependency( new TransDependency( dbMeta, EXP_TRANS_DEP_TABLE_NAME, EXP_TRANS_DEP_FIELD_NAME ) );
  DatabaseMeta stepDbMeta = createDatabaseMeta( EXP_DBMETA_NAME_STEP.concat( dbName ) );
  repository.save( stepDbMeta, VERSION_COMMENT_V1, null );
  deleteStack.push( stepDbMeta );
  Condition cond = new Condition();
  StepMeta step1 = createStepMeta1( transMeta, stepDbMeta, cond );
  transMeta.addStep( step1 );
  StepMeta step2 = createStepMeta2( stepDbMeta, cond );
  transMeta.addStep( step2 );
  transMeta.addTransHop( createTransHopMeta( step1, step2 ) );

  SlaveServer slaveServer = createSlaveServer( dbName );
  PartitionSchema partSchema = createPartitionSchema( dbName );
  // slaveServer, partSchema must be saved so that they get IDs
  repository.save( slaveServer, VERSION_COMMENT_V1, null );
  deleteStack.push( slaveServer );
  repository.save( partSchema, VERSION_COMMENT_V1, null );
  deleteStack.push( partSchema );

  SlaveStepCopyPartitionDistribution slaveStepCopyPartitionDistribution = new SlaveStepCopyPartitionDistribution();
  slaveStepCopyPartitionDistribution.addPartition( EXP_SLAVE_NAME, EXP_PART_SCHEMA_NAME, 0 );
  slaveStepCopyPartitionDistribution.setOriginalPartitionSchemas( Arrays
      .asList( new PartitionSchema[] { partSchema } ) );
  transMeta.setSlaveStepCopyPartitionDistribution( slaveStepCopyPartitionDistribution );
  transMeta.setSlaveTransformation( EXP_TRANS_SLAVE_TRANSFORMATION );
  return transMeta;
}