Skip to content

Commit 6aecf2e

Browse files
committed
Review feedback
1 parent 2e3c765 commit 6aecf2e

File tree

2 files changed

+25
-6
lines changed

2 files changed

+25
-6
lines changed

firebase-firestore/src/androidTest/java/com/google/firebase/firestore/WriteBatchTest.java

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -259,6 +259,12 @@ public void testCanWriteTheSameDocumentMultipleTimes() {
259259

260260
@Test
261261
public void testCanWriteVeryLargeBatches() {
262+
// On Android, SQLite Cursors are limited reading no more than 2 MB per row (despite being able
263+
// to write very large values). This test verifies that the SQLiteMutationQueue properly works
264+
// around this limitation.
265+
266+
// Create a map containing nearly 1 MB of data. Note that if you use 1024 below this will create
267+
// a document larger than 1 MB, which will be rejected by the backend as too large.
262268
String a = Character.toString('a');
263269
StringBuilder buf = new StringBuilder(1000);
264270
for (int i = 0; i < 1000; i++) {
@@ -273,6 +279,9 @@ public void testCanWriteVeryLargeBatches() {
273279
DocumentReference doc = testDocument();
274280
WriteBatch batch = doc.getFirestore().batch();
275281

282+
// Write a batch containing 3 copies of the data, creating a ~3 MB batch. Writing to the same
283+
// document in a batch is allowed and so long as the net size of the document is under 1 MB the
284+
// batch is allowed.
276285
batch.set(doc, values);
277286
for (int i = 0; i < 2; i++) {
278287
batch.update(doc, values);

firebase-firestore/src/main/java/com/google/firebase/firestore/local/SQLiteMutationQueue.java

Lines changed: 16 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -44,6 +44,16 @@
4444
/** A mutation queue for a specific user, backed by SQLite. */
4545
final class SQLiteMutationQueue implements MutationQueue {
4646

47+
/**
48+
* On Android, SQLite Cursors are limited reading no more than 2 MB per row (despite being able to
49+
* write very large values). All reads of the mutations column in the mutations table need to read
50+
* in chunks with SUBSTR to avoid going over this limit.
51+
*
52+
* <p>The value here has to be 2 MB or smaller, while allowing for all possible other values that
53+
* might be selected out along with the mutations column in any given result set. Nearly 1 MB is
54+
* conservative, but allows all combinations of document paths and batch ids without needing to
55+
* figure out if the row has gotten too large.
56+
*/
4757
private static final int BLOB_MAX_INLINE_LENGTH = 1000000;
4858

4959
private final SQLitePersistence db;
@@ -216,9 +226,9 @@ public MutationBatch getNextMutationBatchAfterBatchId(int batchId) {
216226
int nextBatchId = batchId + 1;
217227

218228
return db.query(
219-
"SELECT m.batch_id, SUBSTR(m.mutations, 1, ?) FROM mutations m "
220-
+ "WHERE m.uid = ? AND m.batch_id >= ? "
221-
+ "ORDER BY m.batch_id ASC LIMIT 1")
229+
"SELECT batch_id, SUBSTR(mutations, 1, ?) FROM mutations "
230+
+ "WHERE uid = ? AND batch_id >= ? "
231+
+ "ORDER BY batch_id ASC LIMIT 1")
222232
.binding(BLOB_MAX_INLINE_LENGTH, uid, nextBatchId)
223233
.firstValue(row -> decodeInlineMutationBatch(row.getInt(0), row.getBlob(1)));
224234
}
@@ -227,8 +237,8 @@ public MutationBatch getNextMutationBatchAfterBatchId(int batchId) {
227237
public List<MutationBatch> getAllMutationBatches() {
228238
List<MutationBatch> result = new ArrayList<>();
229239
db.query(
230-
"SELECT m.batch_id, SUBSTR(m.mutations, 1, ?) "
231-
+ "FROM mutations m "
240+
"SELECT batch_id, SUBSTR(mutations, 1, ?) "
241+
+ "FROM mutations "
232242
+ "WHERE uid = ? ORDER BY batch_id ASC")
233243
.binding(BLOB_MAX_INLINE_LENGTH, uid)
234244
.forEach(row -> result.add(decodeInlineMutationBatch(row.getInt(0), row.getBlob(1))));
@@ -407,7 +417,7 @@ public void performConsistencyCheck() {
407417
}
408418

409419
/**
410-
* Decodes mutation batch bytes obtained via substring. If the blob is too smaller than
420+
* Decodes mutation batch bytes obtained via substring. If the blob is smaller than
411421
* BLOB_MAX_INLINE_LENGTH, executes additional queries to load the rest of the blob.
412422
*
413423
* @param batchId The batch ID of the row containing the bytes, for fallback lookup if the value

0 commit comments

Comments
 (0)