org.pentaho.di.core.database.Database.setCommit()方法的使用及代码示例

x33g5p2x  于2022-01-18 转载在 其他  
字(6.6k)|赞(0)|评价(0)|浏览(147)

本文整理了Java中org.pentaho.di.core.database.Database.setCommit()方法的一些代码示例,展示了Database.setCommit()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Database.setCommit()方法的具体详情如下:
包路径:org.pentaho.di.core.database.Database
类名称:Database
方法名:setCommit

Database.setCommit介绍

[英]Specify after how many rows a commit needs to occur when inserting or updating values.
[中]指定插入或更新值时需要提交的行数。

代码示例

代码示例来源:origin: pentaho/pentaho-kettle

public synchronized void setAutoCommit( boolean autocommit ) {
 if ( !autocommit ) {
  database.setCommit( 99999999 );
 } else {
  database.setCommit( 0 );
 }
}

代码示例来源:origin: pentaho/pentaho-kettle

@Override
public Long getNextBatchId( DatabaseMeta dbm, Database ldb,
 String schemaName, String tableName, String fieldName ) throws KettleDatabaseException {
 // Always take off autocommit.
 ldb.setCommit( 10 );
 //
 // Temporary work-around to handle batch-id from extended options
 // Eventually want this promoted to proper dialogs and such
 //
 Map<String, String> connectionExtraOptions = this.getExtraOptions();
 String sequenceProp = this.getPluginId() + "." + SEQUENCE_FOR_BATCH_ID;
 String autoIncSQLProp = this.getPluginId() + "." + AUTOINCREMENT_SQL_FOR_BATCH_ID;
 if ( connectionExtraOptions != null ) {
  if ( this.supportsSequences() && connectionExtraOptions.containsKey( sequenceProp ) ) {
   return getNextBatchIdUsingSequence( connectionExtraOptions.get( sequenceProp ), schemaName, dbm, ldb );
  } else if ( this.supportsAutoInc() && connectionExtraOptions.containsKey( autoIncSQLProp ) ) {
   return getNextBatchIdUsingAutoIncSQL( connectionExtraOptions.get( autoIncSQLProp ), dbm, ldb );
  }
 }
 return getNextBatchIdUsingLockTables( dbm, ldb, schemaName, tableName, fieldName );
}

代码示例来源:origin: pentaho/pentaho-kettle

db.shareVariablesWith( this );
db.connect();
db.setCommit( logCommitSize );

代码示例来源:origin: pentaho/pentaho-kettle

ldb.shareVariablesWith( this );
ldb.connect();
ldb.setCommit( logCommitSize );

代码示例来源:origin: pentaho/pentaho-kettle

private void connectDatabase( Database database ) throws KettleDatabaseException {
  database.shareVariablesWith( this );
  if ( getTransMeta().isUsingUniqueConnections() ) {
   synchronized ( getTrans() ) {
    database.connect( getTrans().getTransactionId(), getPartitionID() );
   }
  } else {
   database.connect( getPartitionID() );
  }

  database.setCommit( 100 ); // we never get a commit, but it just turns off auto-commit.

  if ( log.isDetailed() ) {
   logDetailed( BaseMessages.getString( PKG, "DatabaseLookup.Log.ConnectedToDatabase" ) );
  }
 }
}

代码示例来源:origin: pentaho/pentaho-kettle

/**
 * Writes step information to a step logging table (if one has been configured).
 *
 * @throws KettleException if any errors occur during logging
 */
protected void writeStepLogInformation() throws KettleException {
 Database db = null;
 StepLogTable stepLogTable = getTransMeta().getStepLogTable();
 try {
  db = createDataBase( stepLogTable.getDatabaseMeta() );
  db.shareVariablesWith( this );
  db.connect();
  db.setCommit( logCommitSize );
  for ( StepMetaDataCombi combi : getSteps() ) {
   db.writeLogRecord( stepLogTable, LogStatus.START, combi, null );
  }
  db.cleanupLogRecords( stepLogTable );
 } catch ( Exception e ) {
  throw new KettleException( BaseMessages.getString( PKG,
   "Trans.Exception.UnableToWriteStepInformationToLogTable" ), e );
 } finally {
  disconnectDb( db );
 }
}

代码示例来源:origin: pentaho/pentaho-kettle

/**
 * Writes information to Job Log table. Cleans old records, in case job is finished.
 */
protected void writeLogTableInformation( JobLogTable jobLogTable, LogStatus status ) throws KettleJobException,
 KettleDatabaseException {
 boolean cleanLogRecords = status.equals( LogStatus.END );
 String tableName = jobLogTable.getActualTableName();
 DatabaseMeta logcon = jobLogTable.getDatabaseMeta();
 Database ldb = createDataBase( logcon );
 ldb.shareVariablesWith( this );
 try {
  ldb.connect();
  ldb.setCommit( logCommitSize );
  ldb.writeLogRecord( jobLogTable, status, this, null );
  if ( cleanLogRecords ) {
   ldb.cleanupLogRecords( jobLogTable );
  }
 } catch ( KettleDatabaseException dbe ) {
  addErrors( 1 );
  throw new KettleJobException( "Unable to end processing by writing log record to table " + tableName, dbe );
 } finally {
  if ( !ldb.isAutoCommit() ) {
   ldb.commitLog( true, jobLogTable );
  }
  ldb.disconnect();
 }
}

代码示例来源:origin: pentaho/pentaho-kettle

/**
 * Write job entry log information.
 *
 * @throws KettleException
 *           the kettle exception
 */
protected void writeJobEntryLogInformation() throws KettleException {
 Database db = null;
 JobEntryLogTable jobEntryLogTable = getJobMeta().getJobEntryLogTable();
 try {
  db = createDataBase( jobEntryLogTable.getDatabaseMeta() );
  db.shareVariablesWith( this );
  db.connect();
  db.setCommit( logCommitSize );
  for ( JobEntryCopy copy : getJobMeta().getJobCopies() ) {
   db.writeLogRecord( jobEntryLogTable, LogStatus.START, copy, this );
  }
  db.cleanupLogRecords( jobEntryLogTable );
 } catch ( Exception e ) {
  throw new KettleException( BaseMessages.getString( PKG, "Job.Exception.UnableToJobEntryInformationToLogTable" ),
    e );
 } finally {
  if ( !db.isAutoCommit() ) {
   db.commitLog( true, jobEntryLogTable );
  }
  db.disconnect();
 }
}

代码示例来源:origin: pentaho/pentaho-kettle

logDetailed( BaseMessages.getString( PKG, "CombinationLookup.Log.ConnectedToDB" ) );
data.db.setCommit( meta.getCommitSize() );

代码示例来源:origin: pentaho/pentaho-kettle

logDetailed( BaseMessages.getString( PKG, "DBProc.Log.AutoCommit" ) );
data.db.setCommit( 9999 );

代码示例来源:origin: pentaho/pentaho-kettle

db.shareVariablesWith( this );
db.connect();
db.setCommit( logCommitSize );

代码示例来源:origin: pentaho/pentaho-kettle

public boolean init( StepMetaInterface smi, StepDataInterface sdi ) {
 meta = (InsertUpdateMeta) smi;
 data = (InsertUpdateData) sdi;
 if ( super.init( smi, sdi ) ) {
  try {
   if ( meta.getDatabaseMeta() == null ) {
    logError( BaseMessages.getString( PKG, "InsertUpdate.Init.ConnectionMissing", getStepname() ) );
    return false;
   }
   data.db = new Database( this, meta.getDatabaseMeta() );
   data.db.shareVariablesWith( this );
   if ( getTransMeta().isUsingUniqueConnections() ) {
    synchronized ( getTrans() ) {
     data.db.connect( getTrans().getTransactionId(), getPartitionID() );
    }
   } else {
    data.db.connect( getPartitionID() );
   }
   data.db.setCommit(  meta.getCommitSize( this ) );
   return true;
  } catch ( KettleException ke ) {
   logError( BaseMessages.getString( PKG, "InsertUpdate.Log.ErrorOccurredDuringStepInitialize" )
    + ke.getMessage() );
  }
 }
 return false;
}

代码示例来源:origin: pentaho/pentaho-kettle

db.shareVariablesWith( this );
db.connect();
db.setCommit( logCommitSize );

代码示例来源:origin: pentaho/pentaho-kettle

data.db.setCommit( 100 ); // we never get a commit, but it just turns off auto-commit.

代码示例来源:origin: pentaho/pentaho-kettle

data.db.setCommit( meta.getCommitSize( this ) );

代码示例来源:origin: pentaho/pentaho-kettle

data.db.setCommit( meta.getCommitSize( this ) );

代码示例来源:origin: pentaho/pentaho-kettle

data.db.setCommit( 100 ); // needed for PGSQL it seems...

代码示例来源:origin: pentaho/pentaho-kettle

data.db.setCommit( meta.getCommitSize() );

代码示例来源:origin: pentaho/pentaho-kettle

data.db.connect( getPartitionID() );
data.db.setCommit( data.commitSize );

代码示例来源:origin: pentaho/pentaho-kettle

logDetailed( BaseMessages.getString( PKG, "DimensionLookup.Log.ConnectedToDB" ) );
data.db.setCommit( meta.getCommitSize() );

相关文章

微信公众号

最新文章

更多