...
Code Block |
---|
/**
* @author <a href="mailto:jal@etc.to">Frits Jalvingh</a>
* Created on 8-6-17.
*/
import java.sql.*;
import java.util.*;
public class PgInsertTest {
public static void main(String args[]) {
try {
Class.forName("org.postgresql.Driver");
String dbURL = "jdbc:postgresql://localhost:5432/datavault_12_tst";
Properties parameters = new Properties();
parameters.put("user", "jal");
parameters.put("password", "jal");
Connection conn = DriverManager.getConnection(dbURL, parameters);
conn.setAutoCommit(false); // Important
String query = "insert into datavault.h_boekingdetail(id_h,source_system_id,organisation_id,load_dts,boekingdetailid) values(?,?,?,?,?)";
long lStartTime = System.currentTimeMillis();
int size = 0;
int commitSize = 50_000;
int commitCount = 0;
int batchSize = 0;
int batchCount = 0;
try(PreparedStatement ps = conn.prepareStatement(query)) {
long now = System.currentTimeMillis();
for(int row = 0; row < 1_000_000; row++) {
ps.setInt(1, row + 1000_000);
ps.setInt(2, 1);
ps.setInt(3, 12);
ps.setTimestamp(4, new Timestamp(now));
ps.setString(5, "boe" + row + "kingdetail" + row);
size++;
commitCount++;
batchCount++;
if(batchSize <= 0) {
ps.executeUpdate();
if(commitCount > commitSize) {
commitCount = 0;
conn.commit();
}
} else {
batchCount++;
ps.addBatch();
if(batchCount >= batchSize) {
ps.executeBatch();
batchCount = 0;
if(commitCount > commitSize) {
commitCount = 0;
conn.commit();
}
}
}
}
if(batchSize > 0 && batchCount > 0) {
ps.executeBatch();
}
}
conn.commit();
long lEndTime = System.currentTimeMillis();
long difference = lEndTime - lStartTime;
double rps = (double) size / (difference / 1000);
System.out.println("Inserted " + size + " rows in " + difference + " milliseconds, " + rps + " rows per second");
} catch(Exception e) {
e.printStackTrace();
}
}
}
|
Numbers: without batching and commit size 50.000:
...
- Inserted 1000000 rows in 14322 milliseconds, 71428.57142857143 rows per second
- Inserted 1000000 rows in 14272 milliseconds, 71428.57142857143 rows per second
Removing primary key and all indices from the table
Commit size 50.000 and batch size 10.000
- Inserted 1000000 rows in 7500 milliseconds, 142857.14285714287 rows per second
- Inserted 1000000 rows in 7410 milliseconds, 142857.14285714287 rows per second
A two-fold performance increase.
Same settings but setting table to UNLOGGED:
- Inserted 1000000 rows in 7025 milliseconds, 142857.14285714287 rows per second
- Inserted 1000000 rows in 6940 milliseconds, 166666.66666666666 rows per second