Java Code Examples for org.pentaho.di.core.database.Database#getQueryFields()

The following examples show how to use org.pentaho.di.core.database.Database#getQueryFields() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: DatabaseJoinMeta.java    From pentaho-kettle with Apache License 2.0 5 votes vote down vote up
@Override
public RowMetaInterface getTableFields() {
  // Build a dummy parameter row...
  //
  RowMetaInterface param = new RowMeta();
  for ( int i = 0; i < parameterField.length; i++ ) {
    ValueMetaInterface v;
    try {
      v = ValueMetaFactory.createValueMeta( parameterField[i], parameterType[i] );
    } catch ( KettlePluginException e ) {
      v = new ValueMetaNone( parameterField[i] );
    }
    param.addValueMeta( v );
  }

  RowMetaInterface fields = null;
  if ( databaseMeta != null ) {
    Database db = new Database( loggingObject, databaseMeta );
    databases = new Database[] { db }; // Keep track of this one for cancelQuery

    try {
      db.connect();
      fields =
        db.getQueryFields( databaseMeta.environmentSubstitute( sql ), true, param, new Object[param.size()] );
    } catch ( KettleDatabaseException dbe ) {
      logError( BaseMessages.getString( PKG, "DatabaseJoinMeta.Log.DatabaseErrorOccurred" ) + dbe.getMessage() );
    } finally {
      db.disconnect();
    }
  }
  return fields;
}
 
Example 2
Source File: JobEntryWaitForSQL.java    From pentaho-kettle with Apache License 2.0 4 votes vote down vote up
protected boolean SQLDataOK( Result result, long nrRowsLimit, String realSchemaName, String realTableName,
  String customSQL ) throws KettleException {
  String countStatement = null;
  long rowsCount = 0;
  boolean successOK = false;
  List<Object[]> ar = null;
  RowMetaInterface rowMeta = null;
  Database db = new Database( this, connection );
  db.shareVariablesWith( this );
  try {
    db.connect( parentJob.getTransactionId(), null );
    if ( iscustomSQL ) {
      countStatement = customSQL;
    } else {
      if ( !Utils.isEmpty( realSchemaName ) ) {
        countStatement =
          selectCount + db.getDatabaseMeta().getQuotedSchemaTableCombination( realSchemaName, realTableName );
      } else {
        countStatement = selectCount + db.getDatabaseMeta().quoteField( realTableName );
      }
    }

    if ( countStatement != null ) {
      if ( log.isDetailed() ) {
        logDetailed( BaseMessages.getString( PKG, "JobEntryWaitForSQL.Log.RunSQLStatement", countStatement ) );
      }

      if ( iscustomSQL ) {
        ar = db.getRows( countStatement, 0 );
        if ( ar != null ) {
          rowsCount = ar.size();
        } else {
          if ( log.isDebug() ) {
            logDebug( BaseMessages.getString(
              PKG, "JobEntryWaitForSQL.Log.customSQLreturnedNothing", countStatement ) );
          }
        }

      } else {
        RowMetaAndData row = db.getOneRow( countStatement );
        if ( row != null ) {
          rowsCount = row.getInteger( 0 );
        }
      }
      if ( log.isDetailed() ) {
        logDetailed( BaseMessages.getString( PKG, "JobEntryWaitForSQL.Log.NrRowsReturned", "" + rowsCount ) );
      }

      switch ( successCondition ) {
        case JobEntryWaitForSQL.SUCCESS_CONDITION_ROWS_COUNT_EQUAL:
          successOK = ( rowsCount == nrRowsLimit );
          break;
        case JobEntryWaitForSQL.SUCCESS_CONDITION_ROWS_COUNT_DIFFERENT:
          successOK = ( rowsCount != nrRowsLimit );
          break;
        case JobEntryWaitForSQL.SUCCESS_CONDITION_ROWS_COUNT_SMALLER:
          successOK = ( rowsCount < nrRowsLimit );
          break;
        case JobEntryWaitForSQL.SUCCESS_CONDITION_ROWS_COUNT_SMALLER_EQUAL:
          successOK = ( rowsCount <= nrRowsLimit );
          break;
        case JobEntryWaitForSQL.SUCCESS_CONDITION_ROWS_COUNT_GREATER:
          successOK = ( rowsCount > nrRowsLimit );
          break;
        case JobEntryWaitForSQL.SUCCESS_CONDITION_ROWS_COUNT_GREATER_EQUAL:
          successOK = ( rowsCount >= nrRowsLimit );
          break;
        default:
          break;
      }
    } // end if countStatement!=null
  } catch ( KettleDatabaseException dbe ) {
    logError( BaseMessages.getString( PKG, "JobEntryWaitForSQL.Error.RunningEntry", dbe.getMessage() ) );
  } finally {
    if ( db != null ) {
      if ( isAddRowsResult && iscustomSQL && ar != null ) {
        rowMeta = db.getQueryFields( countStatement, false );
      }
      db.disconnect();
    }
  }

  if ( successOK ) {
    // ad rows to result
    if ( isAddRowsResult && iscustomSQL && ar != null ) {
      List<RowMetaAndData> rows = new ArrayList<RowMetaAndData>();
      for ( int i = 0; i < ar.size(); i++ ) {
        rows.add( new RowMetaAndData( rowMeta, ar.get( i ) ) );
      }
      if ( rows != null ) {
        result.getRows().addAll( rows );
      }
    }
  }
  return successOK;

}
 
Example 3
Source File: TableInputMeta.java    From pentaho-kettle with Apache License 2.0 4 votes vote down vote up
public void getFields( RowMetaInterface row, String origin, RowMetaInterface[] info, StepMeta nextStep,
  VariableSpace space, Repository repository, IMetaStore metaStore ) throws KettleStepException {
  if ( databaseMeta == null ) {
    return; // TODO: throw an exception here
  }

  if ( cachedRowMetaActive ) {
    row.addRowMeta( cachedRowMeta );
    return;
  }

  boolean param = false;

  Database db = getDatabase();
  super.databases = new Database[] { db }; // keep track of it for canceling purposes...

  // First try without connecting to the database... (can be S L O W)
  String sNewSQL = sql;
  if ( isVariableReplacementActive() ) {
    sNewSQL = db.environmentSubstitute( sql );
    if ( space != null ) {
      sNewSQL = space.environmentSubstitute( sNewSQL );
    }
  }

  RowMetaInterface add = null;
  try {
    add = db.getQueryFields( sNewSQL, param );
  } catch ( KettleDatabaseException dbe ) {
    throw new KettleStepException( "Unable to get queryfields for SQL: " + Const.CR + sNewSQL, dbe );
  }

  if ( add != null ) {
    attachOrigin( add, origin );
    row.addRowMeta( add );
  } else {
    try {
      db.connect();

      RowMetaInterface paramRowMeta = null;
      Object[] paramData = null;

      StreamInterface infoStream = getStepIOMeta().getInfoStreams().get( 0 );
      if ( !Utils.isEmpty( infoStream.getStepname() ) ) {
        param = true;
        if ( info.length > 0 && info[ 0 ] != null ) {
          paramRowMeta = info[ 0 ];
          paramData = RowDataUtil.allocateRowData( paramRowMeta.size() );
        }
      }

      add = db.getQueryFields( sNewSQL, param, paramRowMeta, paramData );

      if ( add == null ) {
        return;
      }
      attachOrigin( add, origin );
      row.addRowMeta( add );
    } catch ( KettleException ke ) {
      throw new KettleStepException( "Unable to get queryfields for SQL: " + Const.CR + sNewSQL, ke );
    } finally {
      db.disconnect();
    }
  }
  if ( isLazyConversionActive() ) {
    for ( int i = 0; i < row.size(); i++ ) {
      ValueMetaInterface v = row.getValueMeta( i );
      try {
        if ( v.getType() == ValueMetaInterface.TYPE_STRING ) {
          ValueMetaInterface storageMeta = ValueMetaFactory.cloneValueMeta( v );
          storageMeta.setStorageType( ValueMetaInterface.STORAGE_TYPE_NORMAL );
          v.setStorageMetadata( storageMeta );
          v.setStorageType( ValueMetaInterface.STORAGE_TYPE_BINARY_STRING );
        }
      } catch ( KettlePluginException e ) {
        throw new KettleStepException( "Unable to clone meta for lazy conversion: " + Const.CR + v, e );
      }
    }
  }
}