org.pentaho.di.core.database.Database.isAutoCommit()方法的使用及代码示例

x33g5p2x  于2022-01-18 转载在 其他  
字(7.8k)|赞(0)|评价(0)|浏览(186)

本文整理了Java中org.pentaho.di.core.database.Database.isAutoCommit()方法的一些代码示例,展示了Database.isAutoCommit()的具体用法。这些代码示例主要来源于Github/Stackoverflow/Maven等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Database.isAutoCommit()方法的具体详情如下:
包路径:org.pentaho.di.core.database.Database
类名称:Database
方法名:isAutoCommit

Database.isAutoCommit介绍

暂无

代码示例

代码示例来源:origin: pentaho/pentaho-kettle

private void disconnectDb( Database db ) throws KettleDatabaseException {
 if ( db == null ) {
  return;
 }
 if ( !db.isAutoCommit() ) {
  db.commit( true );
 }
 db.disconnect();
}

代码示例来源:origin: pentaho/pentaho-kettle

/**
 * A MySQL InnoDB hack really... Doesn't like a lock in case there's been a read in another session. It considers it
 * an open transaction.
 *
 * @throws KettleDatabaseException
 */
public void closeReadTransaction() throws KettleDatabaseException {
 if ( databaseMeta.isMySQLVariant() && !database.isAutoCommit() ) {
  database.commit();
 }
}

代码示例来源:origin: pentaho/pentaho-kettle

public synchronized void disconnect() {
 try {
  repository.connectionDelegate.closeStepAttributeLookupPreparedStatement();
  repository.connectionDelegate.closeTransAttributeLookupPreparedStatement();
  repository.connectionDelegate.closeLookupJobEntryAttribute();
  for ( String sql : sqlMap.keySet() ) {
   PreparedStatement ps = sqlMap.get( sql );
   try {
    ps.close();
   } catch ( SQLException e ) {
    log.logError( "Error closing prepared statement: " + sql, e );
   }
  }
  if ( !database.isAutoCommit() ) {
   commit();
  }
  repository.setConnected( false );
 } catch ( KettleException dbe ) {
  log.logError( "Error disconnecting from database : " + dbe.getMessage() );
 } finally {
  database.disconnect();
  sqlMap.clear();
 }
}

代码示例来源:origin: pentaho/pentaho-kettle

if ( !isAutoCommit() ) {
 if ( useBatchInsert ) {
  debug = "insertRow add batch";
 if ( !isAutoCommit() && ( written % commitsize ) == 0 ) {
  if ( useBatchInsert ) {
   isBatchUpdate = true;

代码示例来源:origin: pentaho/pentaho-kettle

try {
 if ( ps != null ) {
  if ( !isAutoCommit() ) {

代码示例来源:origin: pentaho/pentaho-kettle

public synchronized void commit() throws KettleException {
 try {
  closeJobAttributeInsertPreparedStatement();
  closeStepAttributeInsertPreparedStatement();
  closeTransAttributeInsertPreparedStatement();
  if ( !database.isAutoCommit() ) {
   database.commit();
  }
  // Also, clear the counters, reducing the risk of collisions!
  //
  Counters.getInstance().clear();
 } catch ( KettleException dbe ) {
  throw new KettleException( "Unable to commit repository connection", dbe );
 }
}

代码示例来源:origin: pentaho/pentaho-kettle

try {
 if ( ps != null ) {
  if ( !isAutoCommit() ) {

代码示例来源:origin: pentaho/pentaho-kettle

public void dispose( StepMetaInterface smi, StepDataInterface sdi ) {
 meta = (CombinationLookupMeta) smi;
 data = (CombinationLookupData) sdi;
 if ( data.db != null ) {
  try {
   if ( !data.db.isAutoCommit() ) {
    if ( getErrors() == 0 ) {
     data.db.commit();
    } else {
     data.db.rollback();
    }
   }
  } catch ( KettleDatabaseException e ) {
   logError( BaseMessages.getString( PKG, "CombinationLookup.Log.UnexpectedError" ) + " : " + e.toString() );
  } finally {
   data.db.disconnect();
  }
 }
 super.dispose( smi, sdi );
}

代码示例来源:origin: pentaho/pentaho-kettle

@Override
 public void dispose( StepMetaInterface smi, StepDataInterface sdi ) {
  meta = (DimensionLookupMeta) smi;
  data = (DimensionLookupData) sdi;
  if ( data.db != null ) {
   try {
    if ( !data.db.isAutoCommit() ) {
     if ( getErrors() == 0 ) {
      data.db.commit();
     } else {
      data.db.rollback();
     }
    }
   } catch ( KettleDatabaseException e ) {
    logError( BaseMessages.getString( PKG, "DimensionLookup.Log.ErrorOccurredInProcessing" ) + e.getMessage() );
   } finally {
    data.db.disconnect();
   }
  }
  super.dispose( smi, sdi );
 }
}

代码示例来源:origin: pentaho/pentaho-kettle

public void dispose( StepMetaInterface smi, StepDataInterface sdi ) {
 meta = (DeleteMeta) smi;
 data = (DeleteData) sdi;
 if ( data.db != null ) {
  try {
   if ( !data.db.isAutoCommit() ) {
    if ( getErrors() == 0 ) {
     data.db.commit();
    } else {
     data.db.rollback();
    }
   }
   data.db.closeUpdate();
  } catch ( KettleDatabaseException e ) {
   logError( BaseMessages.getString( PKG, "Delete.Log.UnableToCommitUpdateConnection" )
    + data.db + "] :" + e.toString() );
   setErrors( 1 );
  } finally {
   data.db.disconnect();
  }
 }
 super.dispose( smi, sdi );
}

代码示例来源:origin: pentaho/pentaho-kettle

public void dispose( StepMetaInterface smi, StepDataInterface sdi ) {
 meta = (InsertUpdateMeta) smi;
 data = (InsertUpdateData) sdi;
 if ( data.db != null ) {
  try {
   if ( !data.db.isAutoCommit() ) {
    if ( getErrors() == 0 ) {
     data.db.commit();
    } else {
     data.db.rollback();
    }
   }
   data.db.closeUpdate();
   data.db.closeInsert();
  } catch ( KettleDatabaseException e ) {
   logError( BaseMessages.getString( PKG, "InsertUpdate.Log.UnableToCommitConnection" ) + e.toString() );
   setErrors( 1 );
  } finally {
   data.db.disconnect();
  }
 }
 super.dispose( smi, sdi );
}

代码示例来源:origin: pentaho/pentaho-kettle

if ( getConnection().getAutoCommit() != isAutoCommit() ) {
 setAutoCommit( isAutoCommit() );

代码示例来源:origin: pentaho/pentaho-kettle

public void dispose( StepMetaInterface smi, StepDataInterface sdi ) {
 meta = (UpdateMeta) smi;
 data = (UpdateData) sdi;
 if ( data.db != null ) {
  try {
   if ( !data.db.isAutoCommit() ) {
    if ( getErrors() == 0 ) {
     data.db.emptyAndCommit( data.prepStatementUpdate, meta.useBatchUpdate() );
    } else {
     data.db.rollback();
    }
   }
   data.db.closePreparedStatement( data.prepStatementUpdate );
   data.db.closePreparedStatement( data.prepStatementLookup );
  } catch ( KettleDatabaseException e ) {
   logError( BaseMessages.getString( PKG, "Update.Log.UnableToCommitUpdateConnection" )
    + data.db + "] :" + e.toString() );
   setErrors( 1 );
  } finally {
   data.db.disconnect();
  }
 }
 super.dispose( smi, sdi );
}

代码示例来源:origin: pentaho/pentaho-kettle

@Override
public void dispose( StepMetaInterface smi, StepDataInterface sdi ) {
 meta = (ExecSQLRowMeta) smi;
 data = (ExecSQLRowData) sdi;
 if ( log.isBasic() ) {
  logBasic( BaseMessages.getString( PKG, "ExecSQLRow.Log.FinishingReadingQuery" ) );
 }
 if ( data.db != null ) {
  try {
   if ( !data.db.isAutoCommit() ) {
    if ( getErrors() == 0 ) {
     data.db.commit();
    } else {
     data.db.rollback();
    }
   }
  } catch ( KettleDatabaseException e ) {
   logError( BaseMessages.getString( PKG, "Update.Log.UnableToCommitUpdateConnection" )
    + data.db + "] :" + e.toString() );
   setErrors( 1 );
  } finally {
   data.db.disconnect();
  }
 }
 super.dispose( smi, sdi );
}

代码示例来源:origin: pentaho/pentaho-kettle

/**
 * Writes information to Job Log table. Cleans old records, in case job is finished.
 */
protected void writeLogTableInformation( JobLogTable jobLogTable, LogStatus status ) throws KettleJobException,
 KettleDatabaseException {
 boolean cleanLogRecords = status.equals( LogStatus.END );
 String tableName = jobLogTable.getActualTableName();
 DatabaseMeta logcon = jobLogTable.getDatabaseMeta();
 Database ldb = createDataBase( logcon );
 ldb.shareVariablesWith( this );
 try {
  ldb.connect();
  ldb.setCommit( logCommitSize );
  ldb.writeLogRecord( jobLogTable, status, this, null );
  if ( cleanLogRecords ) {
   ldb.cleanupLogRecords( jobLogTable );
  }
 } catch ( KettleDatabaseException dbe ) {
  addErrors( 1 );
  throw new KettleJobException( "Unable to end processing by writing log record to table " + tableName, dbe );
 } finally {
  if ( !ldb.isAutoCommit() ) {
   ldb.commitLog( true, jobLogTable );
  }
  ldb.disconnect();
 }
}

代码示例来源:origin: pentaho/pentaho-kettle

/**
 * Write job entry log information.
 *
 * @throws KettleException
 *           the kettle exception
 */
protected void writeJobEntryLogInformation() throws KettleException {
 Database db = null;
 JobEntryLogTable jobEntryLogTable = getJobMeta().getJobEntryLogTable();
 try {
  db = createDataBase( jobEntryLogTable.getDatabaseMeta() );
  db.shareVariablesWith( this );
  db.connect();
  db.setCommit( logCommitSize );
  for ( JobEntryCopy copy : getJobMeta().getJobCopies() ) {
   db.writeLogRecord( jobEntryLogTable, LogStatus.START, copy, this );
  }
  db.cleanupLogRecords( jobEntryLogTable );
 } catch ( Exception e ) {
  throw new KettleException( BaseMessages.getString( PKG, "Job.Exception.UnableToJobEntryInformationToLogTable" ),
    e );
 } finally {
  if ( !db.isAutoCommit() ) {
   db.commitLog( true, jobEntryLogTable );
  }
  db.disconnect();
 }
}

代码示例来源:origin: pentaho/pentaho-kettle

"Trans.Exception.UnableToWriteLogChannelInformationToLogTable" ), e );
} finally {
 if ( !db.isAutoCommit() ) {
  db.commit( true );

代码示例来源:origin: pentaho/pentaho-kettle

if ( !ldb.isAutoCommit() ) {
 ldb.commitLog( true, transMeta.getTransLogTable() );

代码示例来源:origin: pentaho/pentaho-kettle

data.result = data.db.execStatements( data.sql );
if ( !data.db.isAutoCommit() ) {
 data.db.commit();

代码示例来源:origin: pentaho/pentaho-kettle

if ( !data.db.isAutoCommit() ) {
 if ( meta.getCommitSize() == 1 ) {
  data.db.commit();

相关文章

微信公众号

最新文章

更多