Java Code Examples for org.pentaho.di.trans.TransMeta#getDatabases()

The following examples show how to use org.pentaho.di.trans.TransMeta#getDatabases() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: SpoonPartitionsDelegate.java    From pentaho-kettle with Apache License 2.0 6 votes vote down vote up
public void editPartitionSchema( TransMeta transMeta, PartitionSchema partitionSchema ) {
  String originalName = partitionSchema.getName();
  PartitionSchemaDialog dialog =
      new PartitionSchemaDialog( spoon.getShell(), partitionSchema, transMeta.getPartitionSchemas(),
          transMeta.getDatabases(), transMeta );
  if ( dialog.open() ) {
    if ( spoon.rep != null && partitionSchema.getObjectId() != null ) {
      try {
        saveSharedObjectToRepository( partitionSchema, null );
        if ( sharedObjectSyncUtil != null ) {
          sharedObjectSyncUtil.synchronizePartitionSchemas( partitionSchema, originalName );
        }
      } catch ( KettleException e ) {
        showSaveErrorDialog( partitionSchema, e );
      }
    }
    refreshTree();
  }
}
 
Example 2
Source File: SpoonPartitionsDelegate.java    From pentaho-kettle with Apache License 2.0 5 votes vote down vote up
public void newPartitioningSchema( TransMeta transMeta ) {
  PartitionSchema partitionSchema = new PartitionSchema();

  PartitionSchemaDialog dialog =
      new PartitionSchemaDialog( spoon.getShell(), partitionSchema, transMeta.getPartitionSchemas(), transMeta
          .getDatabases(), transMeta );
  if ( dialog.open() ) {
    List<PartitionSchema> partitions = transMeta.getPartitionSchemas();
    if ( isDuplicate( partitions, partitionSchema ) ) {
      new ErrorDialog(
        spoon.getShell(), getMessage( "Spoon.Dialog.ErrorSavingPartition.Title" ), getMessage(
        "Spoon.Dialog.ErrorSavingPartition.Message", partitionSchema.getName() ),
        new KettleException( getMessage( "Spoon.Dialog.ErrorSavingPartition.NotUnique" ) ) );
      return;
    }

    partitions.add( partitionSchema );

    if ( spoon.rep != null ) {
      try {
        if ( !spoon.rep.getSecurityProvider().isReadOnly() ) {
          spoon.rep.save( partitionSchema, Const.VERSION_COMMENT_INITIAL_VERSION, null );
          if ( sharedObjectSyncUtil != null ) {
            sharedObjectSyncUtil.reloadTransformationRepositoryObjects( false );
          }
        } else {
          throw new KettleException( BaseMessages.getString(
            PKG, "Spoon.Dialog.Exception.ReadOnlyRepositoryUser" ) );
        }
      } catch ( KettleException e ) {
        showSaveErrorDialog( partitionSchema, e );
      }
    }

    refreshTree();
  }
}
 
Example 3
Source File: SharedObjectSyncUtil.java    From pentaho-kettle with Apache License 2.0 4 votes vote down vote up
@Override
public List<DatabaseMeta> getObjectsForSyncFromTransformation( TransMeta transformation ) {
  return transformation.getDatabases();
}
 
Example 4
Source File: TransDelegate.java    From pentaho-kettle with Apache License 2.0 4 votes vote down vote up
public void saveSharedObjects( final RepositoryElementInterface element, final String versionComment )
  throws KettleException {
  TransMeta transMeta = (TransMeta) element;
  // First store the databases and other depending objects in the transformation.
  //

  // Only store if the database has actually changed or doesn't have an object ID (imported)
  //
  for ( DatabaseMeta databaseMeta : transMeta.getDatabases() ) {
    if ( databaseMeta.hasChanged() || databaseMeta.getObjectId() == null ) {
      if ( databaseMeta.getObjectId() == null
          || unifiedRepositoryConnectionAclService.hasAccess( databaseMeta.getObjectId(),
              RepositoryFilePermission.WRITE ) ) {
        repo.save( databaseMeta, versionComment, null );
      } else {
        log.logError( BaseMessages.getString( PKG, "PurRepository.ERROR_0004_DATABASE_UPDATE_ACCESS_DENIED",
            databaseMeta.getName() ) );
      }
    }
  }

  // Store the slave servers...
  //
  for ( SlaveServer slaveServer : transMeta.getSlaveServers() ) {
    if ( slaveServer.hasChanged() || slaveServer.getObjectId() == null ) {
      repo.save( slaveServer, versionComment, null );
    }
  }

  // Store the cluster schemas
  //
  for ( ClusterSchema clusterSchema : transMeta.getClusterSchemas() ) {
    if ( clusterSchema.hasChanged() || clusterSchema.getObjectId() == null ) {
      repo.save( clusterSchema, versionComment, null );
    }
  }

  // Save the partition schemas
  //
  for ( PartitionSchema partitionSchema : transMeta.getPartitionSchemas() ) {
    if ( partitionSchema.hasChanged() || partitionSchema.getObjectId() == null ) {
      repo.save( partitionSchema, versionComment, null );
    }
  }

}
 
Example 5
Source File: StreamToTransNodeConverter.java    From pentaho-kettle with Apache License 2.0 4 votes vote down vote up
private void saveSharedObjects( final Repository repo, final RepositoryElementInterface element )
  throws KettleException {
  TransMeta transMeta = (TransMeta) element;
  // First store the databases and other depending objects in the transformation.
  List<String> databaseNames = Arrays.asList( repo.getDatabaseNames( true ) );

  int dbIndex = 0;
  boolean updateMeta = Boolean.FALSE;

  List<Integer> transMetaDatabasesToUpdate = new ArrayList<Integer>();

  synchronized ( repo ) {
    for ( DatabaseMeta databaseMeta : transMeta.getDatabases() ) {
      if ( !databaseNames.contains( databaseMeta.getName() ) ) {
        if ( databaseMeta.getObjectId() == null || !StringUtils.isEmpty( databaseMeta.getHostname() ) ) {
          repo.save( databaseMeta, null, null );
        }
      } else if ( databaseMeta.getObjectId() == null ) {
        // add this to the list to update object Ids later
        transMetaDatabasesToUpdate.add( dbIndex );
        updateMeta = Boolean.TRUE;
      }

      dbIndex++;
    }

    if ( updateMeta ) {
      // make sure to update object ids in the transmeta db collection
      for ( Integer databaseMetaIndex : transMetaDatabasesToUpdate ) {
        transMeta.getDatabase( databaseMetaIndex ).setObjectId(
          repo.getDatabaseID( transMeta.getDatabase( databaseMetaIndex ).getName() ) );
      }
    }

    // Store the slave servers...
    //
    for ( SlaveServer slaveServer : transMeta.getSlaveServers() ) {
      if ( slaveServer.hasChanged() || slaveServer.getObjectId() == null ) {
        repo.save( slaveServer, null, null );
      }
    }

    // Store the cluster schemas
    //
    for ( ClusterSchema clusterSchema : transMeta.getClusterSchemas() ) {
      if ( clusterSchema.hasChanged() || clusterSchema.getObjectId() == null ) {
        repo.save( clusterSchema, null, null );
      }
    }

    // Save the partition schemas
    //
    for ( PartitionSchema partitionSchema : transMeta.getPartitionSchemas() ) {
      if ( partitionSchema.hasChanged() || partitionSchema.getObjectId() == null ) {
        repo.save( partitionSchema, null, null );
      }
    }
  }
}