本文整理了Java中org.pentaho.di.core.database.Database.execStatement()
方法的一些代码示例,展示了Database.execStatement()
的具体用法。这些代码示例主要来源于Github
/Stackoverflow
/Maven
等平台,是从一些精选项目中提取出来的代码,具有较强的参考意义,能在一定程度帮忙到你。Database.execStatement()
方法的具体详情如下:
包路径:org.pentaho.di.core.database.Database
类名称:Database
方法名:execStatement
[英]Execute an SQL statement on the database connection (has to be open)
[中]在数据库连接上执行SQL语句(必须打开)
代码示例来源:origin: pentaho/pentaho-kettle
/**
* Execute an SQL statement on the database connection (has to be open)
*
* @param sql The SQL to execute
* @return a Result object indicating the number of lines read, deleted, inserted, updated, ...
* @throws KettleDatabaseException in case anything goes wrong.
*/
public Result execStatement( String sql ) throws KettleDatabaseException {
return execStatement( sql, null, null );
}
代码示例来源:origin: pentaho/pentaho-kettle
data.result = data.db.execStatement( data.sql );
} else {
data.result = data.db.execStatements( data.sql );
代码示例来源:origin: pentaho/pentaho-kettle
@Override
public void run() {
try {
data.db.execStatement( loadCommand );
} catch ( Exception ex ) {
this.ex = ex;
}
}
代码示例来源:origin: pentaho/pentaho-kettle
public void execStatement( String sql ) throws KettleException {
connectionDelegate.getDatabase().execStatement( sql );
}
代码示例来源:origin: pentaho/pentaho-kettle
public void truncateTable( String schema, String tablename ) throws KettleDatabaseException {
if ( Utils.isEmpty( connectionGroup ) ) {
String truncateStatement = databaseMeta.getTruncateTableStatement( schema, tablename );
if ( truncateStatement == null ) {
throw new KettleDatabaseException( "Truncate table not supported by "
+ databaseMeta.getDatabaseInterface().getPluginName() );
}
execStatement( truncateStatement );
} else {
execStatement( "DELETE FROM " + databaseMeta.getQuotedSchemaTableCombination( schema, tablename ) );
}
}
代码示例来源:origin: pentaho/pentaho-kettle
public void truncateTable( String tablename ) throws KettleDatabaseException {
if ( Utils.isEmpty( connectionGroup ) ) {
String truncateStatement = databaseMeta.getTruncateTableStatement( null, tablename );
if ( truncateStatement == null ) {
throw new KettleDatabaseException( "Truncate table not supported by "
+ databaseMeta.getDatabaseInterface().getPluginName() );
}
execStatement( truncateStatement );
} else {
execStatement( "DELETE FROM " + databaseMeta.quoteField( tablename ) );
}
}
代码示例来源:origin: pentaho/pentaho-kettle
public Long getNextBatchIdUsingLockTables( DatabaseMeta dbm, Database ldb, String schemaName, String tableName,
String fieldName ) throws KettleDatabaseException {
// The old way of doing things...
Long rtn = null;
// Make sure we lock that table to avoid concurrency issues
String schemaAndTable = dbm.getQuotedSchemaTableCombination( schemaName, tableName );
ldb.lockTables( new String[] { schemaAndTable, } );
try {
// Now insert value -1 to create a real write lock blocking the other
// requests.. FCFS
String sql = "INSERT INTO " + schemaAndTable + " (" + dbm.quoteField( fieldName ) + ") values (-1)";
ldb.execStatement( sql );
// Now this next lookup will stall on the other connections
//
rtn = ldb.getNextValue( null, schemaName, tableName, fieldName );
} finally {
// Remove the -1 record again...
String sql = "DELETE FROM " + schemaAndTable + " WHERE " + dbm.quoteField( fieldName ) + "= -1";
ldb.execStatement( sql );
ldb.unlockTables( new String[] { schemaAndTable, } );
}
return rtn;
}
代码示例来源:origin: pentaho/pentaho-kettle
/**
* Unlock certain tables in the database for write operations
*
* @param tableNames The tables to unlock
* @throws KettleDatabaseException
*/
public void unlockTables( String[] tableNames ) throws KettleDatabaseException {
if ( Utils.isEmpty( tableNames ) ) {
return;
}
// Quote table names too...
//
String[] quotedTableNames = new String[ tableNames.length ];
for ( int i = 0; i < tableNames.length; i++ ) {
quotedTableNames[ i ] = databaseMeta.getQuotedSchemaTableCombination( null, tableNames[ i ] );
}
// Get the SQL to unlock the (quoted) tables
//
String sql = databaseMeta.getSQLUnlockTables( quotedTableNames );
if ( sql != null ) {
execStatement( sql );
}
}
代码示例来源:origin: pentaho/pentaho-kettle
@Override
public boolean dropTable() {
TableOutputMeta meta = getMeta();
TableOutputData data = getData();
String schema = meta.getSchemaName();
String table = meta.getTableName();
if ( schema != null && !schema.equals( "" ) ) {
table = schema + "." + table;
}
String sql = "drop table " + table + ";";
try {
Result result = data.db.execStatement( sql );
int status = result.getExitStatus();
if ( status == 0 ) {
util.updateMetadata( meta, -1 );
}
return status == 0;
} catch ( KettleDatabaseException e ) {
message = "Could not drop table: " + table;
logError( message, e );
}
return false;
}
代码示例来源:origin: pentaho/pentaho-kettle
execStatement( sql, updateRowMeta, updateRowData );
代码示例来源:origin: pentaho/pentaho-kettle
public synchronized void renameUser( ObjectId id_user, String newname ) throws KettleException {
String sql =
"UPDATE "
+ quoteTable( KettleDatabaseRepository.TABLE_R_USER ) + " SET "
+ quote( KettleDatabaseRepository.FIELD_USER_NAME ) + " = ? WHERE "
+ quote( KettleDatabaseRepository.FIELD_USER_ID_USER ) + " = ?";
RowMetaAndData table = new RowMetaAndData();
table.addValue(
new ValueMetaString( KettleDatabaseRepository.FIELD_USER_NAME ), newname );
table.addValue(
new ValueMetaInteger( KettleDatabaseRepository.FIELD_USER_ID_USER ), id_user );
repository.connectionDelegate.getDatabase().execStatement( sql, table.getRowMeta(), table.getData() );
}
}
代码示例来源:origin: pentaho/pentaho-kettle
execStatement( sql, row.getRowMeta(), row.getData() );
} catch ( Exception e ) {
DatabaseLogExceptionFactory.getExceptionStrategy( logTable )
代码示例来源:origin: pentaho/pentaho-kettle
public synchronized void moveJob( String jobname, ObjectId id_directory_from, ObjectId id_directory_to ) throws KettleException {
String sql =
"UPDATE "
+ quoteTable( KettleDatabaseRepository.TABLE_R_JOB ) + " SET "
+ quote( KettleDatabaseRepository.FIELD_JOB_ID_DIRECTORY ) + " = ? WHERE "
+ quote( KettleDatabaseRepository.FIELD_JOB_NAME ) + " = ? AND "
+ quote( KettleDatabaseRepository.FIELD_JOB_ID_DIRECTORY ) + " = ?";
RowMetaAndData par = new RowMetaAndData();
par.addValue(
new ValueMetaInteger( KettleDatabaseRepository.FIELD_JOB_ID_DIRECTORY ),
id_directory_to );
par.addValue(
new ValueMetaString( KettleDatabaseRepository.FIELD_JOB_NAME ), jobname );
par.addValue(
new ValueMetaInteger( KettleDatabaseRepository.FIELD_JOB_ID_DIRECTORY ),
id_directory_from );
repository.connectionDelegate.getDatabase().execStatement( sql, par.getRowMeta(), par.getData() );
}
代码示例来源:origin: pentaho/pentaho-kettle
param.addValue( valField, ValueMetaInterface.TYPE_INTEGER, Long.valueOf( maximum ) );
db.execStatement( sql, param.getRowMeta(), param.getData() );
代码示例来源:origin: pentaho/pentaho-kettle
.toString() ) );
repository.connectionDelegate.getDatabase().execStatement( sql, r.getRowMeta(), r.getData() );
代码示例来源:origin: pentaho/pentaho-kettle
public synchronized void moveTransformation( String transname, ObjectId id_directory_from,
ObjectId id_directory_to ) throws KettleException {
String nameField = quote( KettleDatabaseRepository.FIELD_TRANSFORMATION_NAME );
String sql =
"UPDATE "
+ quoteTable( KettleDatabaseRepository.TABLE_R_TRANSFORMATION ) + " SET "
+ quote( KettleDatabaseRepository.FIELD_TRANSFORMATION_ID_DIRECTORY ) + " = ? WHERE " + nameField
+ " = ? AND " + quote( KettleDatabaseRepository.FIELD_TRANSFORMATION_ID_DIRECTORY ) + " = ?";
RowMetaAndData par = new RowMetaAndData();
par
.addValue(
new ValueMetaInteger(
KettleDatabaseRepository.FIELD_TRANSFORMATION_ID_DIRECTORY ),
id_directory_to );
par.addValue( new ValueMetaString(
KettleDatabaseRepository.FIELD_TRANSFORMATION_NAME ), transname );
par
.addValue(
new ValueMetaInteger(
KettleDatabaseRepository.FIELD_TRANSFORMATION_ID_DIRECTORY ),
id_directory_from );
repository.connectionDelegate.getDatabase().execStatement( sql, par.getRowMeta(), par.getData() );
}
代码示例来源:origin: pentaho/pentaho-kettle
try {
db.connect();
db.execStatement( "CALL VECTORWISE( COMBINE '" + data.schemaTable + " - " + data.schemaTable + "' )" );
db.execStatement( "CALL VECTORWISE( COMBINE '" + data.schemaTable + " - " + data.schemaTable + "' )" );
log.logDetailed( "Table " + data.schemaTable + " was truncated using a 'combine' statement." );
} catch ( Exception e ) {
代码示例来源:origin: pentaho/pentaho-kettle
public synchronized void renameJob( ObjectId id_job, RepositoryDirectoryInterface newParentDir, String newname ) throws KettleException {
if ( newParentDir != null || newname != null ) {
RowMetaAndData table = new RowMetaAndData();
String sql = "UPDATE " + quoteTable( KettleDatabaseRepository.TABLE_R_JOB ) + " SET ";
boolean additionalParameter = false;
if ( newname != null ) {
additionalParameter = true;
sql += quote( KettleDatabaseRepository.FIELD_JOB_NAME ) + " = ? ";
table.addValue(
new ValueMetaString( KettleDatabaseRepository.FIELD_JOB_NAME ), newname );
}
if ( newParentDir != null ) {
if ( additionalParameter ) {
sql += ", ";
}
sql += quote( KettleDatabaseRepository.FIELD_JOB_ID_DIRECTORY ) + " = ? ";
table.addValue( new ValueMetaInteger(
KettleDatabaseRepository.FIELD_JOB_ID_DIRECTORY ), newParentDir
.getObjectId() );
}
sql += "WHERE " + quote( KettleDatabaseRepository.FIELD_JOB_ID_JOB ) + " = ?";
table.addValue(
new ValueMetaInteger( KettleDatabaseRepository.FIELD_JOB_ID_JOB ), id_job );
log.logBasic( "sql = [" + sql + "]" );
log.logBasic( "row = [" + table + "]" );
repository.connectionDelegate.getDatabase().execStatement( sql, table.getRowMeta(), table.getData() );
}
}
代码示例来源:origin: pentaho/pentaho-kettle
data.db.execStatement( databaseMeta.stripCR( isql ) );
} catch ( KettleException e ) {
throw new KettleDatabaseException( "Error inserting 'unknown' row in dimension ["
代码示例来源:origin: pentaho/pentaho-kettle
log.logBasic( "row = [" + table + "]" );
repository.connectionDelegate.getDatabase().execStatement( sql, table.getRowMeta(), table.getData() );
repository.connectionDelegate.getDatabase().commit();
内容来源于网络,如有侵权,请联系作者删除!