@@ -888,6 +888,18 @@ bool __folio_lock_or_retry(struct folio *folio, struct mm_struct *mm,
888
888
void unlock_page (struct page * page );
889
889
void folio_unlock (struct folio * folio );
890
890
891
+ /**
892
+ * folio_trylock() - Attempt to lock a folio.
893
+ * @folio: The folio to attempt to lock.
894
+ *
895
+ * Sometimes it is undesirable to wait for a folio to be unlocked (eg
896
+ * when the locks are being taken in the wrong order, or if making
897
+ * progress through a batch of folios is more important than processing
898
+ * them in order). Usually folio_lock() is the correct function to call.
899
+ *
900
+ * Context: Any context.
901
+ * Return: Whether the lock was successfully acquired.
902
+ */
891
903
static inline bool folio_trylock (struct folio * folio )
892
904
{
893
905
return likely (!test_and_set_bit_lock (PG_locked , folio_flags (folio , 0 )));
@@ -901,13 +913,46 @@ static inline int trylock_page(struct page *page)
901
913
return folio_trylock (page_folio (page ));
902
914
}
903
915
916
+ /**
917
+ * folio_lock() - Lock this folio.
918
+ * @folio: The folio to lock.
919
+ *
920
+ * The folio lock protects against many things, probably more than it
921
+ * should. It is primarily held while a folio is being brought uptodate,
922
+ * either from its backing file or from swap. It is also held while a
923
+ * folio is being truncated from its address_space, so holding the lock
924
+ * is sufficient to keep folio->mapping stable.
925
+ *
926
+ * The folio lock is also held while write() is modifying the page to
927
+ * provide POSIX atomicity guarantees (as long as the write does not
928
+ * cross a page boundary). Other modifications to the data in the folio
929
+ * do not hold the folio lock and can race with writes, eg DMA and stores
930
+ * to mapped pages.
931
+ *
932
+ * Context: May sleep. If you need to acquire the locks of two or
933
+ * more folios, they must be in order of ascending index, if they are
934
+ * in the same address_space. If they are in different address_spaces,
935
+ * acquire the lock of the folio which belongs to the address_space which
936
+ * has the lowest address in memory first.
937
+ */
904
938
static inline void folio_lock (struct folio * folio )
905
939
{
906
940
might_sleep ();
907
941
if (!folio_trylock (folio ))
908
942
__folio_lock (folio );
909
943
}
910
944
945
+ /**
946
+ * lock_page() - Lock the folio containing this page.
947
+ * @page: The page to lock.
948
+ *
949
+ * See folio_lock() for a description of what the lock protects.
950
+ * This is a legacy function and new code should probably use folio_lock()
951
+ * instead.
952
+ *
953
+ * Context: May sleep. Pages in the same folio share a lock, so do not
954
+ * attempt to lock two pages which share a folio.
955
+ */
911
956
static inline void lock_page (struct page * page )
912
957
{
913
958
struct folio * folio ;
@@ -918,6 +963,16 @@ static inline void lock_page(struct page *page)
918
963
__folio_lock (folio );
919
964
}
920
965
966
+ /**
967
+ * folio_lock_killable() - Lock this folio, interruptible by a fatal signal.
968
+ * @folio: The folio to lock.
969
+ *
970
+ * Attempts to lock the folio, like folio_lock(), except that the sleep
971
+ * to acquire the lock is interruptible by a fatal signal.
972
+ *
973
+ * Context: May sleep; see folio_lock().
974
+ * Return: 0 if the lock was acquired; -EINTR if a fatal signal was received.
975
+ */
921
976
static inline int folio_lock_killable (struct folio * folio )
922
977
{
923
978
might_sleep ();
@@ -964,8 +1019,8 @@ int folio_wait_bit_killable(struct folio *folio, int bit_nr);
964
1019
* Wait for a folio to be unlocked.
965
1020
*
966
1021
* This must be called with the caller "holding" the folio,
967
- * ie with increased "page-> count" so that the folio won't
968
- * go away during the wait..
1022
+ * ie with increased folio reference count so that the folio won't
1023
+ * go away during the wait.
969
1024
*/
970
1025
static inline void folio_wait_locked (struct folio * folio )
971
1026
{
0 commit comments