-
Notifications
You must be signed in to change notification settings - Fork 624
Add mutation batch id to index backfill #3202
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Closed
Closed
Changes from all commits
Commits
Show all changes
20 commits
Select commit
Hold shift + click to select a range
6d64d18
add mutation batch id to index backfill
c27c051
fix compareTo in FieldIndex
063c88b
removed submodule
b7fa22d
Update firebase-firestore/src/main/java/com/google/firebase/firestore…
7202745
Merge branch 'bc/mutation-id' of github.com:firebase/firebase-android…
c0457b6
Refactor document ordering into LocalDocumentsView
add5707
remove semantic_comparator unused
7e7164d
Merge branch 'master' into bc/mutation-id
aff17e3
filter out local mutations from getDocumentsMatchingQuery after maste…
0b9b735
fix crashlytics submodule
438592c
merge master resolve conflicts
a82bfbf
refactor LDV to handle returning next documents to index
4c830a4
refactor largest batch id into Mutation and largestBatchId/Mutation i…
c47695f
resolving simple comments, make overlay autovalue
9ea9b27
revert adding batch id to document
2ec7f09
revert batch id to applyToLocalView
1af2624
Merge branch 'master' into bc/mutation-id
273656b
Merge branch 'master' into bc/mutation-id
1345cf6
update tests after merging master
4b10ea0
fetch overlays by collection group
File filter
Filter by extension
Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
There are no files selected for viewing
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -17,6 +17,7 @@ | |
import static com.google.firebase.firestore.model.DocumentCollections.emptyDocumentMap; | ||
import static com.google.firebase.firestore.util.Assert.hardAssert; | ||
|
||
import android.util.Pair; | ||
import androidx.annotation.VisibleForTesting; | ||
import com.google.firebase.Timestamp; | ||
import com.google.firebase.database.collection.ImmutableSortedMap; | ||
|
@@ -29,9 +30,12 @@ | |
import com.google.firebase.firestore.model.mutation.FieldMask; | ||
import com.google.firebase.firestore.model.mutation.Mutation; | ||
import com.google.firebase.firestore.model.mutation.MutationBatch; | ||
import com.google.firebase.firestore.model.mutation.Overlay; | ||
import com.google.firebase.firestore.model.mutation.PatchMutation; | ||
import java.util.ArrayList; | ||
import java.util.HashMap; | ||
import java.util.HashSet; | ||
import java.util.Iterator; | ||
import java.util.List; | ||
import java.util.Map; | ||
import java.util.Set; | ||
|
@@ -88,14 +92,14 @@ IndexManager getIndexManager() { | |
* for it. | ||
*/ | ||
Document getDocument(DocumentKey key) { | ||
Mutation overlay = documentOverlayCache.getOverlay(key); | ||
Overlay overlay = documentOverlayCache.getOverlay(key); | ||
// Only read from remote document cache if overlay is a patch. | ||
MutableDocument document = | ||
(overlay == null || overlay instanceof PatchMutation) | ||
(overlay == null || overlay.getMutation() instanceof PatchMutation) | ||
? remoteDocumentCache.get(key) | ||
: MutableDocument.newInvalidDocument(key); | ||
if (overlay != null) { | ||
overlay.applyToLocalView(document, null, Timestamp.now()); | ||
overlay.getMutation().applyToLocalView(document, null, Timestamp.now()); | ||
} | ||
|
||
return document; | ||
|
@@ -125,18 +129,18 @@ ImmutableSortedMap<DocumentKey, Document> getLocalViewOfDocuments( | |
ImmutableSortedMap<DocumentKey, Document> results = emptyDocumentMap(); | ||
Map<DocumentKey, MutableDocument> recalculateDocuments = new HashMap<>(); | ||
for (Map.Entry<DocumentKey, MutableDocument> entry : docs.entrySet()) { | ||
Mutation overlay = documentOverlayCache.getOverlay(entry.getKey()); | ||
Overlay overlay = documentOverlayCache.getOverlay(entry.getKey()); | ||
// Recalculate an overlay if the document's existence state is changed due to a remote | ||
// event *and* the overlay is a PatchMutation. This is because document existence state | ||
// can change if some patch mutation's preconditions are met. | ||
// NOTE: we recalculate when `overlay` is null as well, because there might be a patch | ||
// mutation whose precondition does not match before the change (hence overlay==null), | ||
// but would now match. | ||
if (existenceStateChanged.contains(entry.getKey()) | ||
&& (overlay == null || overlay instanceof PatchMutation)) { | ||
&& (overlay == null || overlay.getMutation() instanceof PatchMutation)) { | ||
recalculateDocuments.put(entry.getKey(), docs.get(entry.getKey())); | ||
} else if (overlay != null) { | ||
overlay.applyToLocalView(entry.getValue(), null, Timestamp.now()); | ||
overlay.getMutation().applyToLocalView(entry.getValue(), null, Timestamp.now()); | ||
} | ||
} | ||
|
||
|
@@ -195,7 +199,10 @@ void recalculateAndSaveOverlays(Set<DocumentKey> documentKeys) { | |
recalculateAndSaveOverlays(docs); | ||
} | ||
|
||
/** Gets the local view of the next {@code count} documents based on their read time. */ | ||
/** | ||
* Gets the local view of the next {@code count} documents based on their read time. The documents | ||
* are ordered by read time and key. | ||
*/ | ||
ImmutableSortedMap<DocumentKey, Document> getDocuments( | ||
String collectionGroup, IndexOffset offset, int count) { | ||
Map<DocumentKey, MutableDocument> docs = | ||
|
@@ -255,16 +262,112 @@ private ImmutableSortedMap<DocumentKey, Document> getDocumentsMatchingCollection | |
return results; | ||
} | ||
|
||
/** Queries the remote documents and overlays by doing a full collection scan. */ | ||
/** | ||
* Given a collection group, returns the next documents that follow the provided offset, along | ||
* with an updated offset. | ||
* | ||
* <p>The documents returned by this method are ordered by remote version from the provided | ||
* offset. If there are no more remote documents after the provided offset, documents with | ||
* mutations in order of batch id from the offset are returned. Since all documents in a batch are | ||
* returned together, the total number of documents returned can exceed {@code count}. | ||
* | ||
* <p>If no documents are found, returns an empty map and an offset with the latest read time in | ||
* the remote document cache. | ||
* | ||
* @param collectionGroup The collection group for the documents. | ||
* @param offset The offset to index into. | ||
* @param count The number of documents to return | ||
* @return A pair containing the next offset that corresponds to the next documents and a map of | ||
* documents that follow the provided offset. | ||
*/ | ||
Pair<IndexOffset, ImmutableSortedMap<DocumentKey, Document>> getNextDocumentsAndOffset( | ||
String collectionGroup, IndexOffset offset, int count) { | ||
// First backfill based on offset read time. | ||
ImmutableSortedMap<DocumentKey, Document> returnedDocuments = | ||
getDocuments(collectionGroup, offset, count); | ||
IndexOffset newOffset = getNewOffset(returnedDocuments, offset); | ||
|
||
// Backfill based on offset batch id if there is still count remaining. | ||
// TODO: combine the read time and batch id fetches into single method | ||
if (returnedDocuments.size() < count) { | ||
int countRemaining = count - returnedDocuments.size(); | ||
Pair<IndexOffset, ImmutableSortedMap<DocumentKey, Document>> pair = | ||
getDocumentsFromOverlay(collectionGroup, newOffset, returnedDocuments, countRemaining); | ||
newOffset = pair.first; | ||
|
||
for (Map.Entry<DocumentKey, Document> entry : pair.second) { | ||
returnedDocuments = returnedDocuments.insert(entry.getKey(), entry.getValue()); | ||
} | ||
} | ||
|
||
return new Pair<>(newOffset, returnedDocuments); | ||
} | ||
|
||
/** | ||
* Returns the next documents that follows the provided offset's largest batch id, along with an | ||
* updated offset. | ||
* | ||
* @param collectionGroup The collectino group for the documents. | ||
* @param offset The offset to index info. | ||
* @param processedDocuments Already processed documents that should not be returned again by this | ||
* method. | ||
* @param count The number of documents to return. | ||
* @return A pair containing the next offset that corresponds to the next documents and a map of | ||
* documents that follow the provided offset's batch id. | ||
*/ | ||
private Pair<IndexOffset, ImmutableSortedMap<DocumentKey, Document>> getDocumentsFromOverlay( | ||
String collectionGroup, | ||
IndexOffset offset, | ||
ImmutableSortedMap<DocumentKey, Document> processedDocuments, | ||
int count) { | ||
SQLiteDocumentOverlayCache cache = (SQLiteDocumentOverlayCache) documentOverlayCache; | ||
ImmutableSortedMap<DocumentKey, Document> returnedDocuments = emptyDocumentMap(); | ||
IndexOffset newOffset = offset; | ||
int newLargestBatchId = offset.getLargestBatchId(); | ||
int documentCount = 0; | ||
|
||
while (documentCount < count) { | ||
Map<DocumentKey, Overlay> overlays = | ||
cache.getNextOverlays(collectionGroup, newLargestBatchId); | ||
if (overlays.isEmpty()) { | ||
break; | ||
} | ||
|
||
// Prune documents that are already in processedDocuments. | ||
List<DocumentKey> documentsToFetch = new ArrayList<>(); | ||
for (Map.Entry<DocumentKey, Overlay> entry : overlays.entrySet()) { | ||
if (!processedDocuments.containsKey(entry.getKey())) { | ||
documentsToFetch.add(entry.getKey()); | ||
documentCount++; | ||
} | ||
newLargestBatchId = entry.getValue().getLargestBatchId(); | ||
} | ||
newOffset = | ||
IndexOffset.create( | ||
newOffset.getReadTime(), newOffset.getDocumentKey(), newLargestBatchId); | ||
|
||
// Fetch the remote documents and apply the mutations before adding them to the results map. | ||
Map<DocumentKey, MutableDocument> remoteDocuments = | ||
remoteDocumentCache.getAll(documentsToFetch); | ||
for (Map.Entry<DocumentKey, MutableDocument> entry : remoteDocuments.entrySet()) { | ||
Overlay overlay = overlays.get(entry.getKey()); | ||
overlay.getMutation().applyToLocalView(entry.getValue(), null, Timestamp.now()); | ||
|
||
returnedDocuments = returnedDocuments.insert(entry.getKey(), entry.getValue()); | ||
} | ||
} | ||
return new Pair<>(newOffset, returnedDocuments); | ||
} | ||
|
||
private ImmutableSortedMap<DocumentKey, Document> getDocumentsMatchingCollectionQuery( | ||
Query query, IndexOffset offset) { | ||
Map<DocumentKey, MutableDocument> remoteDocuments = | ||
remoteDocumentCache.getAll(query.getPath(), offset); | ||
Map<DocumentKey, Mutation> overlays = documentOverlayCache.getOverlays(query.getPath(), -1); | ||
Map<DocumentKey, Overlay> overlays = documentOverlayCache.getOverlays(query.getPath(), -1); | ||
|
||
// As documents might match the query because of their overlay we need to include documents | ||
// for all overlays in the initial document set. | ||
for (Map.Entry<DocumentKey, Mutation> entry : overlays.entrySet()) { | ||
for (Map.Entry<DocumentKey, Overlay> entry : overlays.entrySet()) { | ||
if (!remoteDocuments.containsKey(entry.getKey())) { | ||
remoteDocuments.put(entry.getKey(), MutableDocument.newInvalidDocument(entry.getKey())); | ||
} | ||
|
@@ -273,16 +376,38 @@ private ImmutableSortedMap<DocumentKey, Document> getDocumentsMatchingCollection | |
// Apply the overlays and match against the query. | ||
ImmutableSortedMap<DocumentKey, Document> results = emptyDocumentMap(); | ||
for (Map.Entry<DocumentKey, MutableDocument> docEntry : remoteDocuments.entrySet()) { | ||
Mutation overlay = overlays.get(docEntry.getKey()); | ||
Overlay overlay = overlays.get(docEntry.getKey()); | ||
if (overlay != null) { | ||
overlay.applyToLocalView(docEntry.getValue(), null, Timestamp.now()); | ||
overlay.getMutation().applyToLocalView(docEntry.getValue(), null, Timestamp.now()); | ||
} | ||
// Finally, insert the documents that still match the query | ||
if (query.matches(docEntry.getValue())) { | ||
results = results.insert(docEntry.getKey(), docEntry.getValue()); | ||
} | ||
} | ||
|
||
return results; | ||
} | ||
|
||
/** | ||
* Returns the offset for the index based on the newly indexed documents. | ||
* | ||
* <p>If there are no documents, returns an offset with the latest remote version. | ||
*/ | ||
private IndexOffset getNewOffset( | ||
ImmutableSortedMap<DocumentKey, Document> documents, IndexOffset currentOffset) { | ||
if (documents.isEmpty()) { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. FWIW This should eventually live in the IndexBackfiller. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Discussed offline, this probably does not and we need to leave it here. |
||
return IndexOffset.create( | ||
remoteDocumentCache.getLatestReadTime(), currentOffset.getLargestBatchId()); | ||
} else { | ||
IndexOffset latestOffset = currentOffset; | ||
Iterator<Map.Entry<DocumentKey, Document>> it = documents.iterator(); | ||
while (it.hasNext()) { | ||
IndexOffset newOffset = IndexOffset.fromDocument(it.next().getValue()); | ||
if (newOffset.compareTo(latestOffset) > 0) { | ||
latestOffset = newOffset; | ||
} | ||
} | ||
return latestOffset; | ||
} | ||
} | ||
} |
Oops, something went wrong.
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.
Uh oh!
There was an error while loading. Please reload this page.