Java Code Examples for org.pentaho.di.trans.TransMeta#setCreatedUser()

The following examples show how to use org.pentaho.di.trans.TransMeta#setCreatedUser() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: RepositoryImporter.java    From pentaho-kettle with Apache License 2.0 4 votes vote down vote up
/**
 *
 * @param transnode
 *          The XML DOM node to read the transformation from
 * @return false if the import should be canceled.
 * @throws KettleException
 *           in case there is an unexpected error
 */
protected boolean importTransformation( Node transnode, RepositoryImportFeedbackInterface feedback )
  throws KettleException {
  //
  // Load transformation from XML into a directory, possibly created!
  //
  TransMeta transMeta = createTransMetaForNode( transnode ); // ignore shared objects
  feedback.setLabel( BaseMessages.getString( PKG, "RepositoryImporter.ImportTrans.Label", Integer
      .toString( transformationNumber ), transMeta.getName() ) );

  validateImportedElement( importRules, transMeta );

  // What's the directory path?
  String directoryPath = Const.NVL( XMLHandler.getTagValue( transnode, "info", "directory" ), Const.FILE_SEPARATOR );
  if ( transDirOverride != null ) {
    directoryPath = transDirOverride;
  }

  if ( directoryPath.startsWith( "/" ) ) {
    // remove the leading root, we don't need it.
    directoryPath = directoryPath.substring( 1 );
  }

  // If we have a set of source directories to limit ourselves to, consider this.
  //
  if ( limitDirs.size() > 0 && Const.indexOfString( directoryPath, limitDirs ) < 0 ) {
    // Not in the limiting set of source directories, skip the import of this transformation...
    //
    feedback.addLog( BaseMessages.getString( PKG,
        "RepositoryImporter.SkippedTransformationNotPartOfLimitingDirectories.Log", transMeta.getName() ) );
    return true;
  }

  RepositoryDirectoryInterface targetDirectory = getTargetDirectory( directoryPath, transDirOverride, feedback );

  // OK, we loaded the transformation from XML and all went well...
  // See if the transformation already existed!
  ObjectId existingId = rep.getTransformationID( transMeta.getName(), targetDirectory );
  if ( existingId != null && askOverwrite ) {
    overwrite = feedback.transOverwritePrompt( transMeta );
    askOverwrite = feedback.isAskingOverwriteConfirmation();
  } else {
    updateDisplay();
  }

  if ( existingId == null || overwrite ) {
    replaceSharedObjects( transMeta );
    transMeta.setObjectId( existingId );
    transMeta.setRepositoryDirectory( targetDirectory );
    patchTransSteps( transMeta );

    try {
      // Keep info on who & when this transformation was created...
      if ( transMeta.getCreatedUser() == null || transMeta.getCreatedUser().equals( "-" ) ) {
        transMeta.setCreatedDate( new Date() );
        if ( rep.getUserInfo() != null ) {
          transMeta.setCreatedUser( rep.getUserInfo().getLogin() );
        } else {
          transMeta.setCreatedUser( null );
        }
      }
      saveTransMeta( transMeta );
      feedback.addLog( BaseMessages.getString( PKG, "RepositoryImporter.TransSaved.Log", Integer
          .toString( transformationNumber ), transMeta.getName() ) );

      if ( transMeta.hasRepositoryReferences() ) {
        referencingObjects.add( new RepositoryObject( transMeta.getObjectId(), transMeta.getName(), transMeta
            .getRepositoryDirectory(), null, null, RepositoryObjectType.TRANSFORMATION, null, false ) );
      }

    } catch ( Exception e ) {
      feedback.addLog( BaseMessages.getString( PKG, "RepositoryImporter.ErrorSavingTrans.Log", Integer
          .toString( transformationNumber ), transMeta.getName(), Const.getStackTracker( e ) ) );

      if ( !feedback.askContinueOnErrorQuestion( BaseMessages.getString( PKG,
          "RepositoryImporter.DoYouWantToContinue.Title" ), BaseMessages.getString( PKG,
          "RepositoryImporter.DoYouWantToContinue.Message" ) ) ) {
        return false;
      }
    }
  } else {
    feedback.addLog( BaseMessages.getString( PKG, "RepositoryImporter.SkippedExistingTransformation.Log", transMeta
        .getName() ) );
  }
  return true;
}
 
Example 2
Source File: RepositoryTestBase.java    From pentaho-kettle with Apache License 2.0 4 votes vote down vote up
protected TransMeta createTransMeta( final String dbName ) throws Exception {
  RepositoryDirectoryInterface rootDir = loadStartDirectory();
  TransMeta transMeta = new TransMeta();
  transMeta.setName( EXP_TRANS_NAME.concat( dbName ) );
  transMeta.setDescription( EXP_TRANS_DESC );
  transMeta.setExtendedDescription( EXP_TRANS_EXTENDED_DESC );
  transMeta.setRepositoryDirectory( rootDir.findDirectory( DIR_TRANSFORMATIONS ) );
  transMeta.setTransversion( EXP_TRANS_VERSION );
  transMeta.setTransstatus( EXP_TRANS_STATUS );
  transMeta.setCreatedUser( EXP_TRANS_CREATED_USER );
  transMeta.setCreatedDate( EXP_TRANS_CREATED_DATE );
  transMeta.setModifiedUser( EXP_TRANS_MOD_USER );
  transMeta.setModifiedDate( EXP_TRANS_MOD_DATE );
  transMeta.addParameterDefinition( EXP_TRANS_PARAM_1_NAME, EXP_TRANS_PARAM_1_DEF, EXP_TRANS_PARAM_1_DESC );

  // TODO mlowery other transLogTable fields could be set for testing here
  TransLogTable transLogTable = TransLogTable.getDefault( transMeta, transMeta, new ArrayList<StepMeta>( 0 ) );
  transLogTable.setConnectionName( EXP_TRANS_LOG_TABLE_CONN_NAME );
  transLogTable.setLogInterval( EXP_TRANS_LOG_TABLE_INTERVAL );
  transLogTable.setSchemaName( EXP_TRANS_LOG_TABLE_SCHEMA_NAME );
  transLogTable.setLogSizeLimit( EXP_TRANS_LOG_TABLE_SIZE_LIMIT );
  transLogTable.setTableName( EXP_TRANS_LOG_TABLE_TABLE_NAME );
  transLogTable.setTimeoutInDays( EXP_TRANS_LOG_TABLE_TIMEOUT_IN_DAYS );
  transMeta.setTransLogTable( transLogTable );
  // TODO mlowery other perfLogTable fields could be set for testing here
  PerformanceLogTable perfLogTable = PerformanceLogTable.getDefault( transMeta, transMeta );
  perfLogTable.setConnectionName( EXP_TRANS_LOG_TABLE_CONN_NAME );
  perfLogTable.setLogInterval( EXP_TRANS_LOG_TABLE_INTERVAL );
  perfLogTable.setSchemaName( EXP_TRANS_LOG_TABLE_SCHEMA_NAME );
  perfLogTable.setTableName( EXP_TRANS_LOG_TABLE_TABLE_NAME );
  perfLogTable.setTimeoutInDays( EXP_TRANS_LOG_TABLE_TIMEOUT_IN_DAYS );
  transMeta.setPerformanceLogTable( perfLogTable );
  // TODO mlowery other channelLogTable fields could be set for testing here
  ChannelLogTable channelLogTable = ChannelLogTable.getDefault( transMeta, transMeta );
  channelLogTable.setConnectionName( EXP_TRANS_LOG_TABLE_CONN_NAME );
  channelLogTable.setSchemaName( EXP_TRANS_LOG_TABLE_SCHEMA_NAME );
  channelLogTable.setTableName( EXP_TRANS_LOG_TABLE_TABLE_NAME );
  channelLogTable.setTimeoutInDays( EXP_TRANS_LOG_TABLE_TIMEOUT_IN_DAYS );
  transMeta.setChannelLogTable( channelLogTable );
  // TODO mlowery other stepLogTable fields could be set for testing here
  StepLogTable stepLogTable = StepLogTable.getDefault( transMeta, transMeta );
  stepLogTable.setConnectionName( EXP_TRANS_LOG_TABLE_CONN_NAME );
  stepLogTable.setSchemaName( EXP_TRANS_LOG_TABLE_SCHEMA_NAME );
  stepLogTable.setTableName( EXP_TRANS_LOG_TABLE_TABLE_NAME );
  stepLogTable.setTimeoutInDays( EXP_TRANS_LOG_TABLE_TIMEOUT_IN_DAYS );
  transMeta.setStepLogTable( stepLogTable );
  DatabaseMeta dbMeta = createDatabaseMeta( dbName );
  // dbMeta must be saved so that it gets an ID
  repository.save( dbMeta, VERSION_COMMENT_V1, null );
  deleteStack.push( dbMeta );
  transMeta.setMaxDateConnection( dbMeta );
  transMeta.setMaxDateTable( EXP_TRANS_MAX_DATE_TABLE );
  transMeta.setMaxDateField( EXP_TRANS_MAX_DATE_FIELD );
  transMeta.setMaxDateOffset( EXP_TRANS_MAX_DATE_OFFSET );
  transMeta.setMaxDateDifference( EXP_TRANS_MAX_DATE_DIFF );
  transMeta.setSizeRowset( EXP_TRANS_SIZE_ROWSET );
  transMeta.setSleepTimeEmpty( EXP_TRANS_SLEEP_TIME_EMPTY );
  transMeta.setSleepTimeFull( EXP_TRANS_SLEEP_TIME_FULL );
  transMeta.setUsingUniqueConnections( EXP_TRANS_USING_UNIQUE_CONN );
  transMeta.setFeedbackShown( EXP_TRANS_FEEDBACK_SHOWN );
  transMeta.setFeedbackSize( EXP_TRANS_FEEDBACK_SIZE );
  transMeta.setUsingThreadPriorityManagment( EXP_TRANS_USING_THREAD_PRIORITY_MGMT );
  transMeta.setSharedObjectsFile( EXP_TRANS_SHARED_OBJECTS_FILE );
  transMeta.setCapturingStepPerformanceSnapShots( EXP_TRANS_CAPTURE_STEP_PERF_SNAPSHOTS );
  transMeta.setStepPerformanceCapturingDelay( EXP_TRANS_STEP_PERF_CAP_DELAY );
  transMeta.addDependency( new TransDependency( dbMeta, EXP_TRANS_DEP_TABLE_NAME, EXP_TRANS_DEP_FIELD_NAME ) );
  DatabaseMeta stepDbMeta = createDatabaseMeta( EXP_DBMETA_NAME_STEP.concat( dbName ) );
  repository.save( stepDbMeta, VERSION_COMMENT_V1, null );
  deleteStack.push( stepDbMeta );
  Condition cond = new Condition();
  StepMeta step1 = createStepMeta1( transMeta, stepDbMeta, cond );
  transMeta.addStep( step1 );
  StepMeta step2 = createStepMeta2( stepDbMeta, cond );
  transMeta.addStep( step2 );
  transMeta.addTransHop( createTransHopMeta( step1, step2 ) );

  SlaveServer slaveServer = createSlaveServer( dbName );
  PartitionSchema partSchema = createPartitionSchema( dbName );
  // slaveServer, partSchema must be saved so that they get IDs
  repository.save( slaveServer, VERSION_COMMENT_V1, null );
  deleteStack.push( slaveServer );
  repository.save( partSchema, VERSION_COMMENT_V1, null );
  deleteStack.push( partSchema );

  SlaveStepCopyPartitionDistribution slaveStepCopyPartitionDistribution = new SlaveStepCopyPartitionDistribution();
  slaveStepCopyPartitionDistribution.addPartition( EXP_SLAVE_NAME, EXP_PART_SCHEMA_NAME, 0 );
  slaveStepCopyPartitionDistribution.setOriginalPartitionSchemas( Arrays
      .asList( new PartitionSchema[] { partSchema } ) );
  transMeta.setSlaveStepCopyPartitionDistribution( slaveStepCopyPartitionDistribution );
  transMeta.setSlaveTransformation( EXP_TRANS_SLAVE_TRANSFORMATION );
  return transMeta;
}