本文整理了Java中org.pentaho.di.core.database.Database.shareVariablesWith()
方法的一些代码示例,展示了Database.shareVariablesWith()
的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Database.shareVariablesWith()
方法的具体详情如下:
包路径:org.pentaho.di.core.database.Database
类名称:Database
方法名:shareVariablesWith
暂无
代码示例来源:origin: pentaho/pentaho-kettle
protected void checkConnection() throws KettleDatabaseException {
// check connection
// connect and disconnect
Database dbchecked = null;
try {
dbchecked = new Database( this, connection );
dbchecked.shareVariablesWith( this );
dbchecked.connect( parentJob.getTransactionId(), null );
} finally {
if ( dbchecked != null ) {
dbchecked.disconnect();
}
}
}
代码示例来源:origin: pentaho/pentaho-kettle
/**
* Construct a new Database Connection
*
* @param databaseMeta The Database Connection Info to construct the connection with.
*/
public Database( LoggingObjectInterface parentObject, DatabaseMeta databaseMeta ) {
this.parentLoggingObject = parentObject;
this.databaseMeta = databaseMeta;
shareVariablesWith( databaseMeta );
if ( parentObject instanceof VariableSpace ) {
shareVariablesWith( (VariableSpace) parentObject );
}
log = new LogChannel( this, parentObject );
this.containerObjectId = log.getContainerObjectId();
this.logLevel = log.getLogLevel();
if ( parentObject != null ) {
log.setGatheringMetrics( parentObject.isGatheringMetrics() );
}
pstmt = null;
rowMeta = null;
dbmd = null;
rowlimit = 0;
written = 0;
opened = copy = 0;
if ( log.isDetailed() ) {
log.logDetailed( "New database connection defined" );
}
}
代码示例来源:origin: pentaho/pentaho-kettle
/**
* Construct a new Database Connection
*
* @param databaseMeta The Database Connection Info to construct the connection with.
* @deprecated Please specify the parent object so that we can see which object is initiating a database connection
*/
@Deprecated
public Database( DatabaseMeta databaseMeta ) {
this.parentLoggingObject = null;
this.databaseMeta = databaseMeta;
shareVariablesWith( databaseMeta );
// In this case we don't have the parent object, so we don't know which
// object makes the connection.
// We also don't know what log level to attach to it, so we have to stick to
// the default
// As such, this constructor is @deprecated.
//
log = new LogChannel( this );
logLevel = log.getLogLevel();
containerObjectId = log.getContainerObjectId();
pstmt = null;
rowMeta = null;
dbmd = null;
rowlimit = 0;
written = 0;
opened = copy = 0;
if ( log.isDetailed() ) {
log.logDetailed( "New database connection defined" );
}
}
代码示例来源:origin: pentaho/pentaho-kettle
/**
* {@inheritDoc}
*
* @see org.pentaho.di.trans.step.BaseStepMeta#getRequiredFields(org.pentaho.di.core.variables.VariableSpace)
*/
@Override
public RowMetaInterface getRequiredFields( final VariableSpace space ) throws KettleException {
if ( !this.useControlFile.getValue() ) {
final Database database = connectToDatabase();
database.shareVariablesWith( space );
RowMetaInterface fields =
database.getTableFieldsMeta(
StringUtils.EMPTY,
space.environmentSubstitute( this.targetTable.getValue() ) );
database.disconnect();
if ( fields == null ) {
throw new KettleException( MESSAGES.getString( "TeraFastMeta.Exception.TableNotFound" ) );
}
return fields;
}
return null;
}
代码示例来源:origin: pentaho/pentaho-kettle
private void connectDatabase( Database database ) throws KettleDatabaseException {
database.shareVariablesWith( this );
if ( getTransMeta().isUsingUniqueConnections() ) {
synchronized ( getTrans() ) {
database.connect( getTrans().getTransactionId(), getPartitionID() );
}
} else {
database.connect( getPartitionID() );
}
database.setCommit( 100 ); // we never get a commit, but it just turns off auto-commit.
if ( log.isDetailed() ) {
logDetailed( BaseMessages.getString( PKG, "DatabaseLookup.Log.ConnectedToDatabase" ) );
}
}
}
代码示例来源:origin: pentaho/pentaho-kettle
/**
* Writes step information to a step logging table (if one has been configured).
*
* @throws KettleException if any errors occur during logging
*/
protected void writeStepLogInformation() throws KettleException {
Database db = null;
StepLogTable stepLogTable = getTransMeta().getStepLogTable();
try {
db = createDataBase( stepLogTable.getDatabaseMeta() );
db.shareVariablesWith( this );
db.connect();
db.setCommit( logCommitSize );
for ( StepMetaDataCombi combi : getSteps() ) {
db.writeLogRecord( stepLogTable, LogStatus.START, combi, null );
}
db.cleanupLogRecords( stepLogTable );
} catch ( Exception e ) {
throw new KettleException( BaseMessages.getString( PKG,
"Trans.Exception.UnableToWriteStepInformationToLogTable" ), e );
} finally {
disconnectDb( db );
}
}
代码示例来源:origin: pentaho/pentaho-kettle
/**
* Writes information to Job Log table. Cleans old records, in case job is finished.
*/
protected void writeLogTableInformation( JobLogTable jobLogTable, LogStatus status ) throws KettleJobException,
KettleDatabaseException {
boolean cleanLogRecords = status.equals( LogStatus.END );
String tableName = jobLogTable.getActualTableName();
DatabaseMeta logcon = jobLogTable.getDatabaseMeta();
Database ldb = createDataBase( logcon );
ldb.shareVariablesWith( this );
try {
ldb.connect();
ldb.setCommit( logCommitSize );
ldb.writeLogRecord( jobLogTable, status, this, null );
if ( cleanLogRecords ) {
ldb.cleanupLogRecords( jobLogTable );
}
} catch ( KettleDatabaseException dbe ) {
addErrors( 1 );
throw new KettleJobException( "Unable to end processing by writing log record to table " + tableName, dbe );
} finally {
if ( !ldb.isAutoCommit() ) {
ldb.commitLog( true, jobLogTable );
}
ldb.disconnect();
}
}
代码示例来源:origin: pentaho/pentaho-kettle
@Override
public SQLStatement getSQLStatements( TransMeta transMeta, StepMeta stepMeta, RowMetaInterface prev,
Repository repository, IMetaStore metaStore ) {
SQLStatement retval = new SQLStatement( stepMeta.getName(), database, null ); // default: nothing to do!
if ( useDatabase ) {
// Otherwise, don't bother!
if ( database != null ) {
Database db = new Database( loggingObject, database );
db.shareVariablesWith( transMeta );
try {
db.connect();
if ( !db.checkSequenceExists( schemaName, sequenceName ) ) {
String cr_table = db.getCreateSequenceStatement( sequenceName, startAt, incrementBy, maxValue, true );
retval.setSQL( cr_table );
} else {
retval.setSQL( null ); // Empty string means: nothing to do: set it to null...
}
} catch ( KettleException e ) {
retval.setError( BaseMessages.getString( PKG, "AddSequenceMeta.ErrorMessage.UnableToConnectDB" )
+ Const.CR + e.getMessage() );
} finally {
db.disconnect();
}
} else {
retval.setError( BaseMessages.getString( PKG, "AddSequenceMeta.ErrorMessage.NoConnectionDefined" ) );
}
}
return retval;
}
代码示例来源:origin: pentaho/pentaho-kettle
if ( !Utils.isEmpty( tableName ) ) {
Database db = new Database( loggingObject, databaseMeta );
db.shareVariablesWith( transMeta );
try {
db.connect();
代码示例来源:origin: pentaho/pentaho-kettle
if ( !Utils.isEmpty( tablename ) ) {
Database db = new Database( loggingObject, databaseMeta );
db.shareVariablesWith( transMeta );
try {
db.connect();
代码示例来源:origin: pentaho/pentaho-kettle
db.shareVariablesWith( transMeta );
try {
db.connect();
代码示例来源:origin: pentaho/pentaho-kettle
/**
* Write job entry log information.
*
* @throws KettleException
* the kettle exception
*/
protected void writeJobEntryLogInformation() throws KettleException {
Database db = null;
JobEntryLogTable jobEntryLogTable = getJobMeta().getJobEntryLogTable();
try {
db = createDataBase( jobEntryLogTable.getDatabaseMeta() );
db.shareVariablesWith( this );
db.connect();
db.setCommit( logCommitSize );
for ( JobEntryCopy copy : getJobMeta().getJobCopies() ) {
db.writeLogRecord( jobEntryLogTable, LogStatus.START, copy, this );
}
db.cleanupLogRecords( jobEntryLogTable );
} catch ( Exception e ) {
throw new KettleException( BaseMessages.getString( PKG, "Job.Exception.UnableToJobEntryInformationToLogTable" ),
e );
} finally {
if ( !db.isAutoCommit() ) {
db.commitLog( true, jobEntryLogTable );
}
db.disconnect();
}
}
代码示例来源:origin: pentaho/pentaho-kettle
data.db.shareVariablesWith( this );
try {
if ( getTransMeta().isUsingUniqueConnections() ) {
代码示例来源:origin: pentaho/pentaho-kettle
if ( databaseMeta != null ) {
Database database = new Database( loggingObject, databaseMeta );
database.shareVariablesWith( jobMeta );
try {
database.connect();
代码示例来源:origin: pentaho/pentaho-kettle
if ( databaseMeta != null ) {
Database database = new Database( loggingObject, databaseMeta );
database.shareVariablesWith( transMeta );
try {
database.connect();
代码示例来源:origin: pentaho/pentaho-kettle
public boolean init( StepMetaInterface smi, StepDataInterface sdi ) {
meta = (InsertUpdateMeta) smi;
data = (InsertUpdateData) sdi;
if ( super.init( smi, sdi ) ) {
try {
if ( meta.getDatabaseMeta() == null ) {
logError( BaseMessages.getString( PKG, "InsertUpdate.Init.ConnectionMissing", getStepname() ) );
return false;
}
data.db = new Database( this, meta.getDatabaseMeta() );
data.db.shareVariablesWith( this );
if ( getTransMeta().isUsingUniqueConnections() ) {
synchronized ( getTrans() ) {
data.db.connect( getTrans().getTransactionId(), getPartitionID() );
}
} else {
data.db.connect( getPartitionID() );
}
data.db.setCommit( meta.getCommitSize( this ) );
return true;
} catch ( KettleException ke ) {
logError( BaseMessages.getString( PKG, "InsertUpdate.Log.ErrorOccurredDuringStepInitialize" )
+ ke.getMessage() );
}
}
return false;
}
代码示例来源:origin: pentaho/pentaho-kettle
data.db.shareVariablesWith( this );
try {
if ( getTransMeta().isUsingUniqueConnections() ) {
代码示例来源:origin: pentaho/pentaho-kettle
data.db.shareVariablesWith( this );
try {
if ( getTransMeta().isUsingUniqueConnections() ) {
代码示例来源:origin: pentaho/pentaho-kettle
data.db.shareVariablesWith( this );
try {
if ( getTransMeta().isUsingUniqueConnections() ) {
代码示例来源:origin: pentaho/pentaho-kettle
data.db.shareVariablesWith( this );
if ( !Utils.isEmpty( meta.getSchemaname() ) ) {
data.realSchemaname = environmentSubstitute( meta.getSchemaname() );
内容来源于网络,如有侵权,请联系作者删除!