org.pentaho.di.core.database.Database类的使用及代码示例

x33g5p2x  于2022-01-18 转载在 其他  
字(12.2k)|赞(0)|评价(0)|浏览(436)

本文整理了Java中org.pentaho.di.core.database.Database类的一些代码示例,展示了Database类的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Database类的具体详情如下:
包路径:org.pentaho.di.core.database.Database
类名称:Database

Database介绍

[英]Database handles the process of connecting to, reading from, writing to and updating databases. The database specific parameters are defined in DatabaseInfo.
[中]数据库处理连接、读取、写入和更新数据库的过程。数据库特定参数在DatabaseInfo中定义。

代码示例

代码示例来源:origin: pentaho/pentaho-kettle

protected void checkConnection() throws KettleDatabaseException {
 // check connection
 // connect and disconnect
 Database dbchecked = null;
 try {
  dbchecked = new Database( this, connection );
  dbchecked.shareVariablesWith( this );
  dbchecked.connect( parentJob.getTransactionId(), null );
 } finally {
  if ( dbchecked != null ) {
   dbchecked.disconnect();
  }
 }
}

代码示例来源:origin: pentaho/pentaho-kettle

private RowMetaInterface getTableFields( LoggingObjectInterface parentLoggingObject ) throws KettleDatabaseException {
 Database database = new Database( parentLoggingObject, databaseMeta );
 try {
  database.connect();
  return database.getTableFields( schemaTable );
 } finally {
  database.disconnect();
 }
}

代码示例来源:origin: pentaho/pentaho-kettle

public String getDDL( String tableName, RowMetaInterface fields, String tk, boolean use_autoinc, String pk,
           boolean semicolon ) throws KettleDatabaseException {
 String retval;
 // First, check for reserved SQL in the input row r...
 databaseMeta.quoteReservedWords( fields );
 String quotedTk = tk != null ? databaseMeta.quoteField( tk ) : null;
 if ( checkTableExists( tableName ) ) {
  retval = getAlterTableStatement( tableName, fields, quotedTk, use_autoinc, pk, semicolon );
 } else {
  retval = getCreateTableStatement( tableName, fields, quotedTk, use_autoinc, pk, semicolon );
 }
 return retval;
}

代码示例来源:origin: pentaho/pentaho-kettle

private void disconnectDb( Database db ) throws KettleDatabaseException {
 if ( db == null ) {
  return;
 }
 if ( !db.isAutoCommit() ) {
  db.commit( true );
 }
 db.disconnect();
}

代码示例来源:origin: pentaho/pentaho-kettle

protected RowMetaInterface getDatabaseTableFields( Database db, String schemaName, String tableName )
 throws KettleDatabaseException {
 // First try without connecting to the database... (can be S L O W)
 RowMetaInterface extraFields = db.getTableFieldsMeta( schemaName, tableName );
 if ( extraFields == null ) { // now we need to connect
  db.connect();
  extraFields = db.getTableFieldsMeta( schemaName, tableName );
 }
 return extraFields;
}

代码示例来源:origin: pentaho/pentaho-kettle

if ( prev != null && prev.size() > 0 ) {
 if ( !Utils.isEmpty( tableName ) ) {
  Database db = new Database( loggingObject, databaseMeta );
  db.shareVariablesWith( transMeta );
  try {
   db.connect();
   String cr_table = db.getDDL( schemaTable, prev, null, false, null, true );
   if ( idx_fields != null && idx_fields.length > 0 && !db.checkIndexExists( schemaTable, idx_fields ) ) {
    String indexname = "idx_" + tableName + "_lookup";
    cr_index =
     db.getCreateIndexStatement(
      schemaName, tableName, indexname, idx_fields, false, false, false, true );

代码示例来源:origin: pentaho/pentaho-kettle

db = new Database( jobMeta, logTable.getDatabaseMeta() );
db.shareVariablesWith( jobMeta );
db.connect();
String tableName = db.environmentSubstitute( logTable.getTableName() );
String schemaTable =
 logTable.getDatabaseMeta().getQuotedSchemaTableCombination(
  db.environmentSubstitute( logTable.getSchemaName() ),
  db.environmentSubstitute( logTable.getTableName() ) );
String createTable = db.getDDL( schemaTable, fields );
 if ( !index.isEmpty() ) {
  String createIndex =
   db.getCreateIndexStatement( schemaTable, "IDX_" + tableName + "_" + ( i + 1 ), index
    .getFieldNames(), false, false, false, true );
  if ( !Utils.isEmpty( createIndex ) ) {
 db.disconnect();

代码示例来源:origin: pentaho/pentaho-kettle

public RowMetaInterface getRequiredFields( VariableSpace space ) throws KettleException {
 String realTableName = space.environmentSubstitute( tablename );
 String realSchemaName = space.environmentSubstitute( schemaName );
 if ( databaseMeta != null ) {
  Database db = new Database( loggingObject, databaseMeta );
  try {
   db.connect();
   if ( !Utils.isEmpty( realTableName ) ) {
    // Check if this table exists...
    if ( db.checkTableExists( realSchemaName, realTableName ) ) {
     return db.getTableFieldsMeta( realSchemaName, realTableName );
    } else {
     throw new KettleException( BaseMessages.getString( PKG, "SQLFileOutputMeta.Exception.TableNotFound" ) );
    }
   } else {
    throw new KettleException( BaseMessages.getString( PKG, "SQLFileOutputMeta.Exception.TableNotSpecified" ) );
   }
  } catch ( Exception e ) {
   throw new KettleException(
    BaseMessages.getString( PKG, "SQLFileOutputMeta.Exception.ErrorGettingFields" ), e );
  } finally {
   db.disconnect();
  }
 } else {
  throw new KettleException( BaseMessages.getString( PKG, "SQLFileOutputMeta.Exception.ConnectionNotDefined" ) );
 }
}

代码示例来源:origin: pentaho/pentaho-kettle

if ( !Utils.isEmpty( tablename ) ) {
 String schemaTable = databaseMeta.getQuotedSchemaTableCombination( schemaName, tablename );
 Database db = new Database( loggingObject, databaseMeta );
 try {
  boolean doHash = false;
  String cr_table = null;
  db.connect();
  if ( !db.checkTableExists( schemaTable ) ) {
   RowMetaInterface tabFields = db.getTableFields( schemaTable );
   db.getDDL(
    schemaTable, fields, ( CREATION_METHOD_SEQUENCE.equals( getTechKeyCreation() )
     && sequenceFrom != null && sequenceFrom.length() != 0 ) ? null : technicalKeyField,
   if ( !db.checkIndexExists( schemaTable, techKeyArr ) ) {
    String indexname = "idx_" + tablename + "_pk";
    cr_uniq_index =
     db.getCreateIndexStatement(
      schemaTable, indexname, techKeyArr, true, true, false, true );
    cr_uniq_index += Const.CR;
  if ( !Utils.isEmpty( idx_fields ) && !db.checkIndexExists( schemaTable, idx_fields ) ) {
   String indexname = "idx_" + tablename + "_lookup";
   cr_index =
    db.getCreateIndexStatement(
     schemaTable, indexname, idx_fields, false, false, false, true );

代码示例来源:origin: pentaho/pentaho-kettle

public boolean init( StepMetaInterface smi, StepDataInterface sdi ) {
 meta = (InsertUpdateMeta) smi;
 data = (InsertUpdateData) sdi;
 if ( super.init( smi, sdi ) ) {
  try {
   if ( meta.getDatabaseMeta() == null ) {
    logError( BaseMessages.getString( PKG, "InsertUpdate.Init.ConnectionMissing", getStepname() ) );
    return false;
   }
   data.db = new Database( this, meta.getDatabaseMeta() );
   data.db.shareVariablesWith( this );
   if ( getTransMeta().isUsingUniqueConnections() ) {
    synchronized ( getTrans() ) {
     data.db.connect( getTrans().getTransactionId(), getPartitionID() );
    }
   } else {
    data.db.connect( getPartitionID() );
   }
   data.db.setCommit(  meta.getCommitSize( this ) );
   return true;
  } catch ( KettleException ke ) {
   logError( BaseMessages.getString( PKG, "InsertUpdate.Log.ErrorOccurredDuringStepInitialize" )
    + ke.getMessage() );
  }
 }
 return false;
}

代码示例来源:origin: pentaho/pentaho-kettle

/**
 * Writes information to Job Log table. Cleans old records, in case job is finished.
 */
protected void writeLogTableInformation( JobLogTable jobLogTable, LogStatus status ) throws KettleJobException,
 KettleDatabaseException {
 boolean cleanLogRecords = status.equals( LogStatus.END );
 String tableName = jobLogTable.getActualTableName();
 DatabaseMeta logcon = jobLogTable.getDatabaseMeta();
 Database ldb = createDataBase( logcon );
 ldb.shareVariablesWith( this );
 try {
  ldb.connect();
  ldb.setCommit( logCommitSize );
  ldb.writeLogRecord( jobLogTable, status, this, null );
  if ( cleanLogRecords ) {
   ldb.cleanupLogRecords( jobLogTable );
  }
 } catch ( KettleDatabaseException dbe ) {
  addErrors( 1 );
  throw new KettleJobException( "Unable to end processing by writing log record to table " + tableName, dbe );
 } finally {
  if ( !ldb.isAutoCommit() ) {
   ldb.commitLog( true, jobLogTable );
  }
  ldb.disconnect();
 }
}

代码示例来源:origin: pentaho/pentaho-kettle

public void run( IProgressMonitor monitor ) throws InvocationTargetException, InterruptedException {
  db = new Database( Spoon.loggingObject, dbMeta );
  try {
   db.connect();
   if ( limit > 0 ) {
    db.setQueryLimit( limit );
   }
   rows = db.getFirstRows( tableName, limit, new ProgressMonitorAdapter( monitor ) );
   rowMeta = db.getReturnRowMeta();
  } catch ( KettleException e ) {
   throw new InvocationTargetException( e, "Couldn't find any rows because of an error :" + e.toString() );
  } finally {
   db.disconnect();
  }
 }
};

代码示例来源:origin: pentaho/pentaho-kettle

@Override
public SQLStatement getSQLStatements( TransMeta transMeta, StepMeta stepMeta, RowMetaInterface prev,
 Repository repository, IMetaStore metaStore ) {
 SQLStatement retval = new SQLStatement( stepMeta.getName(), database, null ); // default: nothing to do!
 if ( useDatabase ) {
  // Otherwise, don't bother!
  if ( database != null ) {
   Database db = new Database( loggingObject, database );
   db.shareVariablesWith( transMeta );
   try {
    db.connect();
    if ( !db.checkSequenceExists( schemaName, sequenceName ) ) {
     String cr_table = db.getCreateSequenceStatement( sequenceName, startAt, incrementBy, maxValue, true );
     retval.setSQL( cr_table );
    } else {
     retval.setSQL( null ); // Empty string means: nothing to do: set it to null...
    }
   } catch ( KettleException e ) {
    retval.setError( BaseMessages.getString( PKG, "AddSequenceMeta.ErrorMessage.UnableToConnectDB" )
     + Const.CR + e.getMessage() );
   } finally {
    db.disconnect();
   }
  } else {
   retval.setError( BaseMessages.getString( PKG, "AddSequenceMeta.ErrorMessage.NoConnectionDefined" ) );
  }
 }
 return retval;
}

代码示例来源:origin: pentaho/pentaho-kettle

if ( !Utils.isEmpty( schemaTable ) ) {
 Database db = createDatabaseObject();
 db.shareVariablesWith( transMeta );
 try {
  db.connect();
    db.getDDL( schemaTable, fields, ( sequenceName != null && sequenceName.length() != 0 ) ? null
      : keyField, autoIncrement, null, true );
  if ( !Utils.isEmpty( idx_fields ) && !db.checkIndexExists( schemaTable, idx_fields ) ) {
   String indexname = "idx_" + tableName + "_lookup";
   sql += db.getCreateIndexStatement( schemaTable, indexname, idx_fields, false, false, false, true );
   if ( !db.checkIndexExists( schemaTable, idx_fields ) ) {
    String indexname = "idx_" + tableName + "_tk";
    sql += db.getCreateIndexStatement( schemaTable, indexname, idx_fields, true, false, true, true );
   if ( !db.checkSequenceExists( schemaName, sequenceName ) ) {
    sql += db.getCreateSequenceStatement( schemaName, sequenceName, 1L, 1L, -1L, true );
    .getMessage() );
 } finally {
  db.disconnect();

代码示例来源:origin: pentaho/pentaho-kettle

KettleDatabaseRepository.REP_STRING_LENGTH, 0 ) );
sql =
 database.getDDL(
  schemaTable, table, null, false, KettleDatabaseRepository.FIELD_REPOSITORY_LOG_ID_REPOSITORY_LOG,
  false );
   database.execStatements( sql );
   if ( log.isDetailed() ) {
    log.logDetailed( "Created/altered table " + schemaTable );
sql =
 database
  .getDDL( schemaTable, table, null, false, KettleDatabaseRepository.FIELD_VERSION_ID_VERSION, false );
boolean create = false;
if ( !Utils.isEmpty( sql ) ) {
   database.execStatements( sql );
   if ( log.isDetailed() ) {
    log.logDetailed( "Created/altered table " + schemaTable );
   Boolean.valueOf( upgrade ), };
 if ( dryrun ) {
  sql = database.getSQLOutput( null, KettleDatabaseRepository.TABLE_R_VERSION, table, data, null );
  statements.add( sql );
 } else {
  database.execStatement( "INSERT INTO "
   + databaseMeta.getQuotedSchemaTableCombination( null, KettleDatabaseRepository.TABLE_R_VERSION )
   + " VALUES(?, ?, ?, ?, ?)", table, data );

代码示例来源:origin: pentaho/pentaho-kettle

@Test
public void jobFail_columnNotExist() throws KettleException {
 doReturn( db ).when( jobEntry ).getNewDatabaseFromMeta();
 doNothing().when( db ).connect( anyString(), anyString() );
 doReturn( true ).when( db ).checkTableExists( anyString(), anyString() );
 doReturn( false ).when( db ).checkColumnExists( anyString(), anyString(), anyString() );
 final Result result = jobEntry.execute( new Result(), 0 );
 assertEquals( "Should be some errors", 1, result.getNrErrors() );
 assertFalse( "Result should be false", result.getResult() );
 verify( db, atLeastOnce() ).disconnect();
}

代码示例来源:origin: pentaho/pentaho-kettle

public void run( IProgressMonitor monitor ) throws InvocationTargetException, InterruptedException {
  db = new Database( Spoon.loggingObject, dbMeta );
  try {
   db.connect();
   result = db.getQueryFields( sql, false );
   if ( monitor.isCanceled() ) {
    throw new InvocationTargetException( new Exception( "This operation was cancelled!" ) );
   }
  } catch ( Exception e ) {
   throw new InvocationTargetException( e, "Problem encountered determining query fields: " + e.toString() );
  } finally {
   db.disconnect();
  }
 }
};

代码示例来源:origin: pentaho/pentaho-kettle

protected Database getDatabase() {
 // Added for test purposes
 return new Database( loggingObject, databaseMeta );
}

代码示例来源:origin: pentaho/pentaho-kettle

DatabaseMeta ci = transMeta.findDatabase( connectionName );
if ( ci != null ) {
 Database db = new Database( loggingObject, ci );
 try {
  db.connect();
  RowMetaInterface r = db.getTableFields( schemaTable );
  if ( null != r ) {
   String[] fieldNames = r.getFieldNames();

代码示例来源:origin: pentaho/pentaho-kettle

@Test
public void jobFail_tableNotExist() throws KettleException {
 when( jobEntry.getNewDatabaseFromMeta() ).thenReturn( db );
 doNothing().when( db ).connect( anyString(), any() );
 doReturn( false ).when( db ).checkTableExists( anyString(), anyString() );
 final Result result = jobEntry.execute( new Result(), 0 );
 assertEquals( "Should be error", 1, result.getNrErrors() );
 assertFalse( "Result should be false", result.getResult() );
 verify( db, atLeastOnce() ).disconnect();
}

相关文章

微信公众号

最新文章

更多