Hi All,
We are trying to implement a flat file tool wherein users can upload flat files directly to HANA tables in a single insert statement.
This functionality is successfully achieved.
Where we are facing an issue is with the rollback.
We are using conn.rollback in the Catch but this is not getting implemented.
For example : Lets say the user uploads a flat file with 100 records. Record number 90 has a duplicate record which violates the primary constraint rule. We are able to catch an error at record 90 but the earlier 89 records are getting inserted into the table.
Our requirement, is as soon as an error/exception is caught, no record should be inserted from the flat file and rollback should be done.
As per documentation available on HANA xsjs scripting, they advise to use rollback() function to implement this but it is not working. Do you have any advise or guidance on how we can achieve this?
Below is my Upload function where i am doing the rollback in the Catch.
function uploadFile() {
try {
// Query Tabe metadata and get the content type of each column
var pstmt = conn.prepareStatement("SELECT * FROM " + uploadtable
+ " LIMIT 1");
var rs = pstmt.executeQuery();
var rsm = rs.getMetaData();
var colCount = rsm.getColumnCount();
var startdt = Date.now();
if (contents.length > 0) {
var arrLines = contents.split(/\r\n|\n/);
var placeholder = new Array(colCount + 1).join('?').split('').join(
',');
var insertStmnt = "INSERT INTO " + uploadtable + " VALUES ("
+ placeholder + ")";
pstmt = conn.prepareStatement(insertStmnt);
//var pstmt = conn.executeUpdate('INSERT INTO ' + uploadtable + ' VALUES (' + placeholder + ')');
arrLines = checkForBadData(arrLines);
// added today
if (batch === "on") {
pstmt.setBatchSize(arrLines.length);
}
for (var i = 1; i < arrLines.length; i++) {
var line = arrLines[i].split(";");
var col = line.splice(0, colCount);
if (JSON.stringify(arrLines[i]).length > 2) {
for (var a = 1; a <= colCount; a++) {
var val = "";
if (typeof col[a - 1] === 'undefined') {
val = "";
} else {
val = col[a - 1].split("\"").join("");
val = val.replace("\\,", ",");
}
if (typeof val === "undefined"
|| (val === "" && emptyisnull === "on")
|| (val.toLowerCase() === "null" && emptyisnull === "on")) {
pstmt.setNull(a);
} else {
switch (rsm.getColumnType(a)) {
case $.db.types.VARCHAR:
case $.db.types.CHAR:
pstmt.setString(a, val);
break;
case $.db.types.NVARCHAR:
case $.db.types.NCHAR:
case $.db.types.SHORTTEXT:
pstmt.setNString(a, val);
break;
case $.db.types.TINYINT:
case $.db.types.SMALLINT:
case $.db.types.INT:
case $.db.types.BIGINT:
pstmt.setInteger(a, parseInt(val));
break;
case $.db.types.DOUBLE:
pstmt.setDouble(a, val);
break;
case $.db.types.DECIMAL:
pstmt.setDecimal(a, val);
break;
case $.db.types.REAL:
pstmt.setReal(a, val);
break;
case $.db.types.NCLOB:
case $.db.types.TEXT:
pstmt.setNClob(a, val);
break;
case $.db.types.CLOB:
pstmt.setClob(a, val);
break;
case $.db.types.BLOB:
pstmt.setBlob(a, val);
break;
case $.db.types.DATE:
pstmt.setDate(a, val);
break;
case $.db.types.TIME:
pstmt.setTime(a, val);
break;
case $.db.types.TIMESTAMP:
pstmt.setTimestamp(a, parseTimestamp(val)); // 20140522180910526000
break;
case $.db.types.SECONDDATE:
pstmt.setSeconddate(a, val);
break;
default:
pstmt.setString(a, val);
break;
}
}
}
if (batch === "on") {
pstmt.addBatch();
}
else
{
pstmt.executeUpdate();
conn.commit();
}
}
}
messages.push((arrLines.length - 1) + " Lines inserted into "
+ uploadtable);
messages.push(((Date.now() - startdt) / 60).toFixed(2)
+ " Seconds taken to complete");
} else {
messages.push("No data in the submitted file.");
}
} catch (err) {
messages.push(err.message + ": Error on file line - " + i
+ ". Check the data in the preview below at the file line " + i
+ ".");
conn.rollback();
} finally {
//conn.setAutoCommit(true);
conn.commit();
conn.close();
}
}