javajdbcibm-midrangejtopen

JDBC ResultSet has more data than selected


I am selecting data from an AS/400 database from different tables using a change indicator. I then export the data to a file and mark the data as exported by updating this change indicator.

While this process is active it is possible that users enter new data and for some reason this new data is added to our ResultSet and gets marked as exported, too. What could cause this behaviour?

Here are excerpts of the code:

stmt = conn.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_UPDATABLE);
extractedData = stmt.executeQuery(sql);
private void writeDataToFile(TableIdentifier table, String tableHeader, ResultSet extractedData,
        TableMetaData tableMetaData, Date currentTimestamp) {
    BufferedFileWriter bfw = null;
    boolean shouldUpdate = true;
    try {
        logger.info(Message.COLLECTOR_EXPORT_START, table.getName());
        ResultSetMetaData rsMetaData = extractedData.getMetaData();
        int numberOfColumns = rsMetaData.getColumnCount();
        for (ExportConfig config : tableQueueMap.get(table)) {
            expDir = WorkDir.getWorkDir(context, applicationDomain + File.separator + EXPDIR + File.separator
                    + (config.isSendComplete() ? "complete_" : "") + config.getSendQueue());
            String[] qualTable = table.getName().split("\\.");
            String tableName = table.getName();
            if (qualTable.length > 1) {
                tableName = qualTable[1];
            }
            File exportFile = new File(expDir, tableName);

            if (!exportFile.exists()) {
                bfw = new BufferedFileWriter(exportFile);
                writeHeader(tableHeader, bfw);
            } else {
                bfw = new BufferedFileWriter(exportFile, true);
            }
            writeLines(tableName, extractedData, bfw, numberOfColumns);
            setTimestamp(table.getName(), currentTimestamp, config);
            bfw.close();
            if (config.isSendComplete()) {
                shouldUpdate = false;
            }
        }
        if (tableMetaData.getUpdate() != null && shouldUpdate) {
            extractedData.beforeFirst();
            if (tableMetaData.getVersion() != null) {
                markDataAsExported(extractedData, tableMetaData);
            } else {
                markResultSetAsExported(extractedData, tableMetaData);
            }
        }
        tableQueueMap.remove(table);
        dataExported = true;
    } catch (IOException ex0) {
        logger.error(Message.COLLECTOR_EXPORT_FILE_ERROR, table.getName(), ex0.getMessage());
    } catch (SQLException ex1) {
        logger.error(Message.COLLECTOR_RESULTSET_ERROR, table.getName(), ex1.getMessage());
    } catch (WorkDirException ex2) {
        logger.error(Message.COLLECTOR_WORKDIR_ERROR, ex2.getMessage());
    } finally {
        if (bfw != null)
            try {
                bfw.close();
            } catch (IOException e) {
                // ignore
            }
    }
}
private void markResultSetAsExported(ResultSet extractedData, TableMetaData tableMetaData) throws SQLException {
    while (extractedData.next()) {
        if (tableMetaData.getUpdateColumnType().equals("N")) {
            extractedData.updateInt(tableMetaData.getUpdateColumn(),
                    Integer.parseInt(tableMetaData.getUpdateValue()));
        } else {
            extractedData.updateString(tableMetaData.getUpdateColumn(), tableMetaData.getUpdateValue());
        }
        extractedData.updateRow();
    }
}

When I debug the code I can see that on extractedData.next() in the method markResultSetAsExported the result set has grown if new data has been added to the table.


Solution

  • It looks like you can't use TYPE_SCROLL_INSENSITIVE and CONCUR_UPDATABLE at the same time on AS/400. The documentation says:

    There is one situation where the application specifies a TYPE_SCROLL_INSENSITIVE, CONCUR_UPDATEABLE ResultSet. Insensitivity is implemented in the database engine by making a copy of the data. You are then not allowed to make updates through that copy to the underlying database. If you specify this combination, the driver changes the sensitivity to TYPE_SCROLL_SENSITIVE and create the warning indicating that your request has been changed.