@@ -743,8 +743,11 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
743
743
static struct {
744
744
struct list_head idle_ws ;
745
745
spinlock_t ws_lock ;
746
- int num_ws ;
747
- atomic_t alloc_ws ;
746
+ /* Number of free workspaces */
747
+ int free_ws ;
748
+ /* Total number of allocated workspaces */
749
+ atomic_t total_ws ;
750
+ /* Waiters for a free workspace */
748
751
wait_queue_head_t ws_wait ;
749
752
} btrfs_comp_ws [BTRFS_COMPRESS_TYPES ];
750
753
@@ -760,7 +763,7 @@ void __init btrfs_init_compress(void)
760
763
for (i = 0 ; i < BTRFS_COMPRESS_TYPES ; i ++ ) {
761
764
INIT_LIST_HEAD (& btrfs_comp_ws [i ].idle_ws );
762
765
spin_lock_init (& btrfs_comp_ws [i ].ws_lock );
763
- atomic_set (& btrfs_comp_ws [i ].alloc_ws , 0 );
766
+ atomic_set (& btrfs_comp_ws [i ].total_ws , 0 );
764
767
init_waitqueue_head (& btrfs_comp_ws [i ].ws_wait );
765
768
}
766
769
}
@@ -777,35 +780,35 @@ static struct list_head *find_workspace(int type)
777
780
778
781
struct list_head * idle_ws = & btrfs_comp_ws [idx ].idle_ws ;
779
782
spinlock_t * ws_lock = & btrfs_comp_ws [idx ].ws_lock ;
780
- atomic_t * alloc_ws = & btrfs_comp_ws [idx ].alloc_ws ;
783
+ atomic_t * total_ws = & btrfs_comp_ws [idx ].total_ws ;
781
784
wait_queue_head_t * ws_wait = & btrfs_comp_ws [idx ].ws_wait ;
782
- int * num_ws = & btrfs_comp_ws [idx ].num_ws ;
785
+ int * free_ws = & btrfs_comp_ws [idx ].free_ws ;
783
786
again :
784
787
spin_lock (ws_lock );
785
788
if (!list_empty (idle_ws )) {
786
789
workspace = idle_ws -> next ;
787
790
list_del (workspace );
788
- (* num_ws )-- ;
791
+ (* free_ws )-- ;
789
792
spin_unlock (ws_lock );
790
793
return workspace ;
791
794
792
795
}
793
- if (atomic_read (alloc_ws ) > cpus ) {
796
+ if (atomic_read (total_ws ) > cpus ) {
794
797
DEFINE_WAIT (wait );
795
798
796
799
spin_unlock (ws_lock );
797
800
prepare_to_wait (ws_wait , & wait , TASK_UNINTERRUPTIBLE );
798
- if (atomic_read (alloc_ws ) > cpus && !* num_ws )
801
+ if (atomic_read (total_ws ) > cpus && !* free_ws )
799
802
schedule ();
800
803
finish_wait (ws_wait , & wait );
801
804
goto again ;
802
805
}
803
- atomic_inc (alloc_ws );
806
+ atomic_inc (total_ws );
804
807
spin_unlock (ws_lock );
805
808
806
809
workspace = btrfs_compress_op [idx ]-> alloc_workspace ();
807
810
if (IS_ERR (workspace )) {
808
- atomic_dec (alloc_ws );
811
+ atomic_dec (total_ws );
809
812
wake_up (ws_wait );
810
813
}
811
814
return workspace ;
@@ -820,21 +823,21 @@ static void free_workspace(int type, struct list_head *workspace)
820
823
int idx = type - 1 ;
821
824
struct list_head * idle_ws = & btrfs_comp_ws [idx ].idle_ws ;
822
825
spinlock_t * ws_lock = & btrfs_comp_ws [idx ].ws_lock ;
823
- atomic_t * alloc_ws = & btrfs_comp_ws [idx ].alloc_ws ;
826
+ atomic_t * total_ws = & btrfs_comp_ws [idx ].total_ws ;
824
827
wait_queue_head_t * ws_wait = & btrfs_comp_ws [idx ].ws_wait ;
825
- int * num_ws = & btrfs_comp_ws [idx ].num_ws ;
828
+ int * free_ws = & btrfs_comp_ws [idx ].free_ws ;
826
829
827
830
spin_lock (ws_lock );
828
- if (* num_ws < num_online_cpus ()) {
831
+ if (* free_ws < num_online_cpus ()) {
829
832
list_add (workspace , idle_ws );
830
- (* num_ws )++ ;
833
+ (* free_ws )++ ;
831
834
spin_unlock (ws_lock );
832
835
goto wake ;
833
836
}
834
837
spin_unlock (ws_lock );
835
838
836
839
btrfs_compress_op [idx ]-> free_workspace (workspace );
837
- atomic_dec (alloc_ws );
840
+ atomic_dec (total_ws );
838
841
wake :
839
842
/*
840
843
* Make sure counter is updated before we wake up waiters.
@@ -857,7 +860,7 @@ static void free_workspaces(void)
857
860
workspace = btrfs_comp_ws [i ].idle_ws .next ;
858
861
list_del (workspace );
859
862
btrfs_compress_op [i ]-> free_workspace (workspace );
860
- atomic_dec (& btrfs_comp_ws [i ].alloc_ws );
863
+ atomic_dec (& btrfs_comp_ws [i ].total_ws );
861
864
}
862
865
}
863
866
}
0 commit comments