org.pentaho.di.core.DBCache Java Examples

The following examples show how to use org.pentaho.di.core.DBCache. You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: PentahoMapReduceJobBuilderImplTest.java    From pentaho-hadoop-shims with Apache License 2.0 5 votes vote down vote up
@Before
public void setup() throws Exception {
  KettleClientEnvironment.init();
  KettleLogStore.init();
  DBCache.getInstance().setInactive();

  TransConfiguration transConfig = new TransConfiguration( new TransMeta(), new TransExecutionConfiguration() );
  transXml = TransConfiguration.fromXML( transConfig.getXML() ).getXML();

  visitorServices.add( new MockVisitorService() );

  pentahoMapReduceJobBuilder =
    new PentahoMapReduceJobBuilderImpl( namedCluster, hadoopShim, logChannelInterface, variableSpace,
      pluginInterface, vfsPluginDirectory, pmrProperties, transFactory, pmrArchiveGetter, visitorServices );
}
 
Example #2
Source File: SQLEditor.java    From pentaho-kettle with Apache License 2.0 5 votes vote down vote up
private void clearCache() {
  MessageBox mb = new MessageBox( shell, SWT.ICON_QUESTION | SWT.NO | SWT.YES | SWT.CANCEL );
  mb.setMessage( BaseMessages.getString( PKG, "SQLEditor.ClearWholeCache.Message", connection.getName() ) );
  mb.setText( BaseMessages.getString( PKG, "SQLEditor.ClearWholeCache.Title" ) );
  int answer = mb.open();

  switch ( answer ) {
    case SWT.NO:
      DBCache.getInstance().clear( connection.getName() );

      mb = new MessageBox( shell, SWT.ICON_INFORMATION | SWT.OK );
      mb.setMessage( BaseMessages.getString( PKG, "SQLEditor.ConnectionCacheCleared.Message", connection
        .getName() ) );
      mb.setText( BaseMessages.getString( PKG, "SQLEditor.ConnectionCacheCleared.Title" ) );
      mb.open();

      break;
    case SWT.YES:
      DBCache.getInstance().clear( null );

      mb = new MessageBox( shell, SWT.ICON_INFORMATION | SWT.OK );
      mb.setMessage( BaseMessages.getString( PKG, "SQLEditor.WholeCacheCleared.Message" ) );
      mb.setText( BaseMessages.getString( PKG, "SQLEditor.WholeCacheCleared.Title" ) );
      mb.open();

      break;
    case SWT.CANCEL:
      break;
    default:
      break;
  }
}
 
Example #3
Source File: SQLEditor.java    From pentaho-kettle with Apache License 2.0 5 votes vote down vote up
public SQLEditor( VariableSpace space, Shell parent, int style, DatabaseMeta ci, DBCache dbc, String sql ) {
  props = PropsUI.getInstance();
  log = new LogChannel( ci );
  input = sql;
  connection = ci;
  dbcache = dbc;
  this.parentShell = parent;
  this.style = ( style != SWT.None ) ? style : this.style;
  this.variables = space;
}
 
Example #4
Source File: XulDatabaseExplorerController.java    From pentaho-kettle with Apache License 2.0 5 votes vote down vote up
public XulDatabaseExplorerController( Shell shell, DatabaseMeta databaseMeta, List<DatabaseMeta> databases,
  boolean aLook ) {
  this.model = new XulDatabaseExplorerModel( databaseMeta );
  this.shell = shell;
  this.bf = new DefaultBindingFactory();
  this.databases = databases;
  this.dbcache = DBCache.getInstance();
  this.isJustLook = aLook;
}
 
Example #5
Source File: SpoonDBDelegate.java    From pentaho-kettle with Apache License 2.0 5 votes vote down vote up
/**
 * Delete a database connection
 *
 * @param name
 *          The name of the database connection.
 */
public void delConnection( HasDatabasesInterface hasDatabasesInterface, DatabaseMeta db ) {
  int pos = hasDatabasesInterface.indexOfDatabase( db );
  boolean worked = false;

  // delete from repository?
  Repository rep = spoon.getRepository();
  if ( rep != null ) {
    if ( !rep.getSecurityProvider().isReadOnly() ) {
      try {
        rep.deleteDatabaseMeta( db.getName() );
        worked = true;
      } catch ( KettleException dbe ) {
        new ErrorDialog( spoon.getShell(),
          BaseMessages.getString( PKG, "Spoon.Dialog.ErrorDeletingConnection.Title" ),
          BaseMessages.getString( PKG, "Spoon.Dialog.ErrorDeletingConnection.Message", db.getName() ), dbe );
      }
    } else {
      new ErrorDialog( spoon.getShell(),
        BaseMessages.getString( PKG, "Spoon.Dialog.ErrorDeletingConnection.Title" ),
        BaseMessages.getString( PKG, "Spoon.Dialog.ErrorDeletingConnection.Message", db.getName() ),
        new KettleException( BaseMessages.getString( PKG, "Spoon.Dialog.Exception.ReadOnlyUser" ) ) );
    }
  }

  if ( spoon.getRepository() == null || worked ) {
    spoon.addUndoDelete(
      (UndoInterface) hasDatabasesInterface, new DatabaseMeta[] { (DatabaseMeta) db.clone() },
      new int[] { pos } );
    hasDatabasesInterface.removeDatabase( pos );
    DBCache.getInstance().clear( db.getName() ); // remove this from the cache as well.
  }

  refreshTree();
  spoon.setShellText();
}
 
Example #6
Source File: SpoonDBDelegate.java    From pentaho-kettle with Apache License 2.0 5 votes vote down vote up
public void clearDBCache( DatabaseMeta databaseMeta ) {
  if ( databaseMeta != null ) {
    DBCache.getInstance().clear( databaseMeta.getName() );
  } else {
    DBCache.getInstance().clear( null );
  }
}
 
Example #7
Source File: LucidDBBulkLoader.java    From pentaho-kettle with Apache License 2.0 4 votes vote down vote up
public boolean execute( LucidDBBulkLoaderMeta meta, boolean wait ) throws KettleException {
  Runtime rt = Runtime.getRuntime();

  try {
    String tableName = environmentSubstitute( meta.getTableName() );

    // 1) Set up the FIFO folder, create the directory and path to it...
    //
    String fifoVfsDirectory = environmentSubstitute( meta.getFifoDirectory() );
    FileObject directory = KettleVFS.getFileObject( fifoVfsDirectory, getTransMeta() );
    directory.createFolder();
    String fifoDirectory = KettleVFS.getFilename( directory );

    // 2) Create the FIFO file using the "mkfifo" command...
    // Make sure to log all the possible output, also from STDERR
    //
    data.fifoFilename = KettleVFS.getFilename( directory ) + Const.FILE_SEPARATOR + tableName + ".csv";
    data.bcpFilename = KettleVFS.getFilename( directory ) + Const.FILE_SEPARATOR + tableName + ".bcp";

    File fifoFile = new File( data.fifoFilename );
    if ( !fifoFile.exists() ) {
      String mkFifoCmd = "mkfifo " + data.fifoFilename + "";
      logBasic( "Creating FIFO file using this command : " + mkFifoCmd );
      Process mkFifoProcess = rt.exec( mkFifoCmd );
      StreamLogger errorLogger = new StreamLogger( log, mkFifoProcess.getErrorStream(), "mkFifoError" );
      StreamLogger outputLogger = new StreamLogger( log, mkFifoProcess.getInputStream(), "mkFifoOuptut" );
      new Thread( errorLogger ).start();
      new Thread( outputLogger ).start();
      int result = mkFifoProcess.waitFor();
      if ( result != 0 ) {
        throw new Exception( "Return code " + result + " received from statement : " + mkFifoCmd );
      }
    }

    // 3) Make a connection to LucidDB for sending SQL commands
    // (Also, we need a clear cache for getting up-to-date target metadata)
    DBCache.getInstance().clear( meta.getDatabaseMeta().getName() );
    if ( meta.getDatabaseMeta() == null ) {
      logError( BaseMessages.getString( PKG, "LuciDBBulkLoader.Init.ConnectionMissing", getStepname() ) );
      return false;
    }
    data.db = new Database( this, meta.getDatabaseMeta() );
    data.db.shareVariablesWith( this );
    // Connect to the database
    if ( getTransMeta().isUsingUniqueConnections() ) {
      synchronized ( getTrans() ) {
        data.db.connect( getTrans().getTransactionId(), getPartitionID() );
      }
    } else {
      data.db.connect( getPartitionID() );
    }

    logBasic( "Connected to LucidDB" );

    // 4) Now we are ready to create the LucidDB FIFO server that will handle the actual bulk loading.
    //
    String fifoServerStatement = "";
    fifoServerStatement += "create or replace server " + meta.getFifoServerName() + Const.CR;
    fifoServerStatement += "foreign data wrapper sys_file_wrapper" + Const.CR;
    fifoServerStatement += "options (" + Const.CR;
    fifoServerStatement += "directory '" + fifoDirectory + "'," + Const.CR;
    fifoServerStatement += "file_extension 'csv'," + Const.CR;
    fifoServerStatement += "with_header 'no'," + Const.CR;
    fifoServerStatement += "num_rows_scan '0'," + Const.CR;
    fifoServerStatement += "lenient 'no');" + Const.CR;

    logBasic( "Creating LucidDB fifo_server with the following command: " + fifoServerStatement );
    data.db.execStatements( fifoServerStatement );

    // 5) Set the error limit in the LucidDB session
    // REVIEW jvs 13-Dec-2008: is this guaranteed to retain the same
    // connection?
    String errorMaxStatement = "";
    errorMaxStatement += "alter session set \"errorMax\" = " + meta.getMaxErrors() + ";" + Const.CR;
    logBasic( "Setting error limit in LucidDB session with the following command: " + errorMaxStatement );
    data.db.execStatements( errorMaxStatement );

    // 6) Now we also need to create a bulk loader file .bcp
    //
    createBulkLoadConfigFile( data.bcpFilename );

    // 7) execute the actual load command!
    // This will actually block until the load is done in the
    // separate execution thread; see notes in executeLoadCommand
    // on why it's important for this to occur BEFORE
    // opening our end of the FIFO.
    //
    executeLoadCommand( tableName );

    // 8) We have to write rows to the FIFO file later on.
    data.fifoStream = new BufferedOutputStream( new FileOutputStream( fifoFile ) );
  } catch ( Exception ex ) {
    throw new KettleException( ex );
  }

  return true;
}
 
Example #8
Source File: Database.java    From pentaho-kettle with Apache License 2.0 4 votes vote down vote up
public RowMetaInterface getQueryFields( String sql, boolean param, RowMetaInterface inform, Object[] data )
  throws KettleDatabaseException {
  RowMetaInterface fields;
  DBCache dbcache = DBCache.getInstance();

  DBCacheEntry entry = null;

  // Check the cache first!
  //
  if ( dbcache != null ) {
    entry = new DBCacheEntry( databaseMeta.getName(), sql );
    fields = dbcache.get( entry );
    if ( fields != null ) {
      return fields;
    }
  }
  if ( connection == null ) {
    return null; // Cache test without connect.
  }

  // No cache entry found

  // The new method of retrieving the query fields fails on Oracle because
  // they failed to implement the getMetaData method on a prepared statement.
  // (!!!)
  // Even recent drivers like 10.2 fail because of it.
  //
  // There might be other databases that don't support it (we have no
  // knowledge of this at the time of writing).
  // If we discover other RDBMSs, we will create an interface for it.
  // For now, we just try to get the field layout on the re-bound in the
  // exception block below.
  //
  try {
    if ( databaseMeta.supportsPreparedStatementMetadataRetrieval() ) {
      // On with the regular program.
      //
      fields = getQueryFieldsFromPreparedStatement( sql );
    } else {
      if ( isDataServiceConnection() ) {
        fields = getQueryFieldsFromDatabaseMetaData( sql );
      } else {
        fields = getQueryFieldsFromDatabaseMetaData();
      }
    }
  } catch ( Exception e ) {
    fields = getQueryFieldsFallback( sql, param, inform, data );
  }

  // Store in cache!!
  if ( dbcache != null && entry != null && fields != null ) {
    dbcache.put( entry, fields );
  }

  return fields;
}
 
Example #9
Source File: Database.java    From pentaho-kettle with Apache License 2.0 4 votes vote down vote up
public Result execStatement( String rawsql, RowMetaInterface params, Object[] data ) throws KettleDatabaseException {
  Result result = new Result();

  // Replace existing code with a class that removes comments from the raw
  // SQL.
  // The SqlCommentScrubber respects single-quoted strings, so if a
  // double-dash or a multiline comment appears
  // in a single-quoted string, it will be treated as a string instead of
  // comments.
  String sql = databaseMeta.getDatabaseInterface().createSqlScriptParser().removeComments( rawsql ).trim();
  try {
    boolean resultSet;
    int count;
    if ( params != null ) {
      try ( PreparedStatement prepStmt = connection.prepareStatement( databaseMeta.stripCR( sql ) ) ) {
        setValues( params, data, prepStmt ); // set the parameters!
        resultSet = prepStmt.execute();
        count = prepStmt.getUpdateCount();
      }
    } else {
      String sqlStripped = databaseMeta.stripCR( sql );
      try ( Statement stmt = connection.createStatement() ) {
        resultSet = stmt.execute( sqlStripped );
        count = stmt.getUpdateCount();
      }
    }
    String upperSql = sql.toUpperCase();
    if ( !resultSet ) {
      // if the result is a resultset, we don't do anything with it!
      // You should have called something else!
      if ( count > 0 ) {
        if ( upperSql.startsWith( "INSERT" ) ) {
          result.setNrLinesOutput( count );
        } else if ( upperSql.startsWith( "UPDATE" ) ) {
          result.setNrLinesUpdated( count );
        } else if ( upperSql.startsWith( "DELETE" ) ) {
          result.setNrLinesDeleted( count );
        }
      }
    }

    // See if a cache needs to be cleared...
    if ( upperSql.startsWith( "ALTER TABLE" )
      || upperSql.startsWith( "DROP TABLE" ) || upperSql.startsWith( "CREATE TABLE" ) ) {
      DBCache.getInstance().clear( databaseMeta.getName() );
    }
  } catch ( SQLException ex ) {
    throw new KettleDatabaseException( "Couldn't execute SQL: " + sql + Const.CR, ex );
  } catch ( Exception e ) {
    throw new KettleDatabaseException( "Unexpected error executing SQL: " + Const.CR, e );
  }

  return result;
}
 
Example #10
Source File: SQLEditor.java    From pentaho-kettle with Apache License 2.0 4 votes vote down vote up
public SQLEditor( Shell parent, int style, DatabaseMeta ci, DBCache dbc, String sql ) {
  this( null, parent, style, ci, dbc, sql );
}
 
Example #11
Source File: SpoonDBDelegate.java    From pentaho-kettle with Apache License 2.0 4 votes vote down vote up
public void sqlConnection( DatabaseMeta databaseMeta ) {
  SQLEditor sql =
    new SQLEditor( databaseMeta, spoon.getShell(), SWT.NONE, databaseMeta, DBCache.getInstance(), "" );
  sql.open();
}
 
Example #12
Source File: MySQLBulkLoader.java    From pentaho-kettle with Apache License 2.0 4 votes vote down vote up
public boolean execute( MySQLBulkLoaderMeta meta ) throws KettleException {
  Runtime rt = Runtime.getRuntime();

  try {
    // 1) Create the FIFO file using the "mkfifo" command...
    // Make sure to log all the possible output, also from STDERR
    //
    data.fifoFilename = environmentSubstitute( meta.getFifoFileName() );

    File fifoFile = new File( data.fifoFilename );
    if ( !fifoFile.exists() ) {
      // MKFIFO!
      //
      String mkFifoCmd = "mkfifo " + data.fifoFilename;
      //
      logBasic( BaseMessages.getString( PKG, "MySQLBulkLoader.Message.CREATINGFIFO",  data.dbDescription, mkFifoCmd ) );
      Process mkFifoProcess = rt.exec( mkFifoCmd );
      StreamLogger errorLogger = new StreamLogger( log, mkFifoProcess.getErrorStream(), "mkFifoError" );
      StreamLogger outputLogger = new StreamLogger( log, mkFifoProcess.getInputStream(), "mkFifoOuptut" );
      new Thread( errorLogger ).start();
      new Thread( outputLogger ).start();
      int result = mkFifoProcess.waitFor();
      if ( result != 0 ) {
        throw new Exception( BaseMessages.getString( PKG, "MySQLBulkLoader.Message.ERRORFIFORC", result, mkFifoCmd ) );
      }

      String chmodCmd = "chmod 666 " + data.fifoFilename;
      logBasic( BaseMessages.getString( PKG, "MySQLBulkLoader.Message.SETTINGPERMISSIONSFIFO",  data.dbDescription, chmodCmd ) );
      Process chmodProcess = rt.exec( chmodCmd );
      errorLogger = new StreamLogger( log, chmodProcess.getErrorStream(), "chmodError" );
      outputLogger = new StreamLogger( log, chmodProcess.getInputStream(), "chmodOuptut" );
      new Thread( errorLogger ).start();
      new Thread( outputLogger ).start();
      result = chmodProcess.waitFor();
      if ( result != 0 ) {
        throw new Exception( BaseMessages.getString( PKG, "MySQLBulkLoader.Message.ERRORFIFORC", result, chmodCmd ) );
      }
    }

    // 2) Make a connection to MySQL for sending SQL commands
    // (Also, we need a clear cache for getting up-to-date target metadata)
    DBCache.getInstance().clear( meta.getDatabaseMeta().getName() );
    if ( meta.getDatabaseMeta() == null ) {
      logError( BaseMessages.getString( PKG, "MySQLBulkLoader.Init.ConnectionMissing", getStepname() ) );
      return false;
    }
    data.db = new Database( this, meta.getDatabaseMeta() );
    data.db.shareVariablesWith( this );
    PluginInterface dbPlugin =
        PluginRegistry.getInstance().getPlugin( DatabasePluginType.class, meta.getDatabaseMeta().getDatabaseInterface() );
    data.dbDescription = ( dbPlugin != null ) ? dbPlugin.getDescription() : BaseMessages.getString( PKG, "MySQLBulkLoader.UnknownDB" );

    // Connect to the database
    if ( getTransMeta().isUsingUniqueConnections() ) {
      synchronized ( getTrans() ) {
        data.db.connect( getTrans().getTransactionId(), getPartitionID() );
      }
    } else {
      data.db.connect( getPartitionID() );
    }

    logBasic( BaseMessages.getString( PKG, "MySQLBulkLoader.Message.CONNECTED",  data.dbDescription ) );

    // 3) Now we are ready to run the load command...
    //
    executeLoadCommand();
  } catch ( Exception ex ) {
    throw new KettleException( ex );
  }

  return true;
}
 
Example #13
Source File: MondrianHelper.java    From pentaho-kettle with Apache License 2.0 4 votes vote down vote up
public RowMetaInterface getCachedRowMeta() {
  DBCacheEntry cacheEntry = new DBCacheEntry( databaseMeta.getName(), queryString );
  return DBCache.getInstance().get( cacheEntry );
}
 
Example #14
Source File: MondrianHelper.java    From pentaho-kettle with Apache License 2.0 4 votes vote down vote up
/**
 * Retrieve the rows from the opened query. Also create a description of the flattened output of the query. This call
 * populated rowMetaInterface and rows The query needs to be opened beforehand.
 *
 * @throws KettleDatabaseException
 *           in case something goes wrong
 *
 *           TODO: this is not quite working for our purposes.
 */
public void createFlattenedOutput() throws KettleDatabaseException {

  final Axis[] axes = result.getAxes();
  rows = new ArrayList<>();
  headings = new ArrayList<>();

  // Compute headings. Each heading is a hierarchy name. If there are say
  // 2 members on the columns, and 3 members on the rows axis, then there
  // will be 5 headings.
  //
  for ( Axis axis : axes ) {
    final List<Position> positions = axis.getPositions();
    if ( positions.isEmpty() ) {
      // Result set is empty. There is no data to print, and we cannot
      // even deduce column headings.
      return;
    }
    for ( Member member : positions.get( 0 ) ) {
      Hierarchy hierarchy = member.getHierarchy();
      headings.add( hierarchy.getUniqueName() );
    }
  }

  int[] coords = new int[axes.length];
  outputFlattenedRecurse( result, rows, new ArrayList<>(), coords, 0 );

  outputRowMeta = new RowMeta();

  // Just scan the first row to see what data types we received...
  //
  for ( int i = 0; i < rows.size() && i < 1; i++ ) {

    List<Object> rowValues = rows.get( i );

    for ( int c = 0; c < rowValues.size(); c++ ) {
      Object valueData = rowValues.get( c );

      int valueType;

      if ( valueData instanceof String ) {
        valueType = ValueMetaInterface.TYPE_STRING;
      } else if ( valueData instanceof Date ) {
        valueType = ValueMetaInterface.TYPE_DATE;
      } else if ( valueData instanceof Boolean ) {
        valueType = ValueMetaInterface.TYPE_BOOLEAN;
      } else if ( valueData instanceof Integer ) {
        valueType = ValueMetaInterface.TYPE_INTEGER;
        valueData = Long.valueOf( ( (Integer) valueData ).longValue() );
      } else if ( valueData instanceof Short ) {
        valueType = ValueMetaInterface.TYPE_INTEGER;
        valueData = Long.valueOf( ( (Short) valueData ).longValue() );
      } else if ( valueData instanceof Byte ) {
        valueType = ValueMetaInterface.TYPE_INTEGER;
        valueData = Long.valueOf( ( (Byte) valueData ).longValue() );
      } else if ( valueData instanceof Long ) {
        valueType = ValueMetaInterface.TYPE_INTEGER;
      } else if ( valueData instanceof Double ) {
        valueType = ValueMetaInterface.TYPE_NUMBER;
      } else if ( valueData instanceof Float ) {
        valueType = ValueMetaInterface.TYPE_NUMBER;
        valueData = Double.valueOf( ( (Float) valueData ).doubleValue() );
      } else if ( valueData instanceof BigDecimal ) {
        valueType = ValueMetaInterface.TYPE_BIGNUMBER;
      } else {
        throw new KettleDatabaseException( BaseMessages.getString( PKG, "MondrianInputErrorUnhandledType", valueData.getClass().toString() ) );
      }

      try {
        ValueMetaInterface valueMeta = ValueMetaFactory.createValueMeta( headings.get( c ), valueType );
        outputRowMeta.addValueMeta( valueMeta );
        rowValues.set( i, valueData );
      } catch ( Exception e ) {
        throw new KettleDatabaseException( e );
      }
    }
  }

  // Now that we painstakingly found the metadata that comes out of the Mondrian database, cache it please...
  //
  DBCacheEntry cacheEntry = new DBCacheEntry( databaseMeta.getName(), queryString );
  DBCache.getInstance().put( cacheEntry, outputRowMeta );
}
 
Example #15
Source File: TableAgileMart.java    From pentaho-kettle with Apache License 2.0 4 votes vote down vote up
@Override
public boolean adjustSchema() {
  TableOutputMeta meta = getMeta();
  TableOutputData data = getData();
  TransMeta transMeta = getTransMeta();
  StepMeta stepMeta = meta.getParentStepMeta();
  DBCache dbcache = transMeta.getDbCache();
  StringBuilder messageBuffer = new StringBuilder();

  try {
    RowMetaInterface prev = transMeta.getPrevStepFields( stepMeta.getName() );
    if ( log.isDetailed() ) {
      logDetailed( "Attempting to auto adjust table structure" );
    }

    if ( log.isDetailed() ) {
      logDetailed( "getTransMeta: " + getTransMeta() );
    }
    if ( log.isDetailed() ) {
      logDetailed( "getStepname: " + getStepname() );
    }

    SQLStatement statement = meta.getSQLStatements( transMeta, stepMeta, prev, repository, metaStore );

    if ( log.isDetailed() ) {
      logDetailed( "Statement: " + statement );
    }
    if ( log.isDetailed() && statement != null ) {
      logDetailed( "Statement has SQL: " + statement.hasSQL() );
    }

    if ( statement != null && statement.hasSQL() ) {
      String sql = statement.getSQL();
      if ( log.isDetailed() ) {
        logDetailed( "Trying: " + sql );
      }

      try {
        log.logDetailed( "Executing SQL: " + Const.CR + sql );
        data.db.execStatement( sql );

        // Clear the database cache, in case we're using one...
        if ( dbcache != null ) {
          dbcache.clear( data.databaseMeta.getName() );
        }
      } catch ( Exception dbe ) {
        String error = BaseMessages.getString( PKG, "SQLEditor.Log.SQLExecError", sql, dbe.toString() );
        messageBuffer.append( error ).append( Const.CR );
        return false;
      }

      if ( log.isDetailed() ) {
        logDetailed( "Successfull: " + sql );
      }
    } else if ( statement.getError() == null ) {
      // there were no changes to be made
      return true;
    } else {
      this.message = statement.getError();
      logError( statement.getError() );
      return false;
    }
  } catch ( Exception e ) {
    logError( "An error ocurred trying to adjust the table schema", e );
  }
  return true;
}
 
Example #16
Source File: AutoModeler.java    From pentaho-metadata with GNU Lesser General Public License v2.1 4 votes vote down vote up
public Domain generateDomain( final ImportStrategy importStrategy ) throws PentahoMetadataException {
  Domain domain = new Domain();
  domain.setId( modelName );

  List<LocaleType> locales = new ArrayList<LocaleType>();
  locales.add( new LocaleType( "en_US", "English (US)" ) ); //$NON-NLS-1$ //$NON-NLS-2$
  domain.setLocales( locales );

  SqlPhysicalModel physicalModel = new SqlPhysicalModel();
  physicalModel.setId( databaseMeta.getName() );
  physicalModel.setDatasource( ThinModelConverter.convertFromLegacy( databaseMeta ) );

  Database database = database();

  try {
    // Add the database connection to the empty schema...
    //
    domain.addPhysicalModel( physicalModel );

    // Also add a model with the same name as the model name...
    //
    String bmID = Util.getLogicalModelIdPrefix() + "_" + modelName.replaceAll( " ", "_" ).toUpperCase(); //$NON-NLS-1$ //$NON-NLS-2$ //$NON-NLS-3$
    LogicalModel logicalModel = new LogicalModel();
    logicalModel.setId( bmID );
    domain.addLogicalModel( logicalModel );

    // Connect to the database...
    //
    database.connect();

    // clear the cache
    DBCache.getInstance().clear( databaseMeta.getName() );

    for ( int i = 0; i < tableNames.length; i++ ) {
      SchemaTable schemaTable = tableNames[i];

      // Import the specified tables and turn them into PhysicalTable
      // objects...
      //
      SqlPhysicalTable physicalTable =
          PhysicalTableImporter.importTableDefinition( database, schemaTable.getSchemaName(), schemaTable
          .getTableName(), locale, importStrategy );
      physicalModel.addPhysicalTable( physicalTable );

      // At the same time, we will create a business table and add that to the
      // business model...
      //
      LogicalTable businessTable = createBusinessTable( physicalTable, locale );
      logicalModel.addLogicalTable( businessTable );
    }
  } catch ( Exception e ) {
    // For the unexpected stuff, just throw the exception upstairs.
    //
    throw new PentahoMetadataException( e );
  } finally {
    // Make sure to close the connection
    //
    database.disconnect();
  }

  return domain;
}
 
Example #17
Source File: AutoModeler.java    From pentaho-metadata with GNU Lesser General Public License v2.1 4 votes vote down vote up
public SchemaMeta generateSchemaMeta() throws PentahoMetadataException {
  SchemaMeta schemaMeta = new SchemaMeta();
  schemaMeta.setName( modelName );

  Database database = new Database( databaseMeta );
  try {
    // Add the database connection to the empty schema...
    //
    schemaMeta.addDatabase( databaseMeta );

    // Also add a model with the same name as the model name...
    //
    String bmID = Settings.getBusinessModelIDPrefix() + "_" + Const.replace( modelName, " ", "_" ).toUpperCase();
    BusinessModel businessModel = new BusinessModel( bmID );
    schemaMeta.addModel( businessModel );

    // Connect to the database...
    //
    database.connect();

    // clear the cache
    DBCache.getInstance().clear( databaseMeta.getName() );

    for ( int i = 0; i < tableNames.length; i++ ) {
      SchemaTable schemaTable = tableNames[i];

      // Import the specified tables and turn them into PhysicalTable objects...
      //
      PhysicalTable physicalTable =
          PhysicalTableImporter.importTableDefinition( database, schemaTable.getSchemaName(), schemaTable
              .getTableName(), locale );
      schemaMeta.addTable( physicalTable );

      // At the same time, we will create a business table and add that to the business model...
      //
      BusinessTable businessTable = createBusinessTable( physicalTable, locale );
      businessModel.addBusinessTable( businessTable );
    }

    // Set the model as active
    //
    schemaMeta.setActiveModel( businessModel );
  } catch ( Exception e ) {
    // For the unexpected stuff, just throw the exception upstairs.
    //
    throw new PentahoMetadataException( e );
  } finally {
    // Make sure to close the connection
    //
    database.disconnect();
  }

  return schemaMeta;
}