Skip to content

Commit a88336d

Browse files
committed
Merge branch 'for-chris-4.7' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux into for-linus-4.7
2 parents 02da2d7 + 680834c commit a88336d

27 files changed

+2071
-1661
lines changed

fs/btrfs/backref.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1991,7 +1991,7 @@ struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root,
19911991

19921992
ifp = kmalloc(sizeof(*ifp), GFP_NOFS);
19931993
if (!ifp) {
1994-
kfree(fspath);
1994+
vfree(fspath);
19951995
return ERR_PTR(-ENOMEM);
19961996
}
19971997

fs/btrfs/compression.c

Lines changed: 61 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -743,8 +743,11 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
743743
static struct {
744744
struct list_head idle_ws;
745745
spinlock_t ws_lock;
746-
int num_ws;
747-
atomic_t alloc_ws;
746+
/* Number of free workspaces */
747+
int free_ws;
748+
/* Total number of allocated workspaces */
749+
atomic_t total_ws;
750+
/* Waiters for a free workspace */
748751
wait_queue_head_t ws_wait;
749752
} btrfs_comp_ws[BTRFS_COMPRESS_TYPES];
750753

@@ -758,16 +761,34 @@ void __init btrfs_init_compress(void)
758761
int i;
759762

760763
for (i = 0; i < BTRFS_COMPRESS_TYPES; i++) {
764+
struct list_head *workspace;
765+
761766
INIT_LIST_HEAD(&btrfs_comp_ws[i].idle_ws);
762767
spin_lock_init(&btrfs_comp_ws[i].ws_lock);
763-
atomic_set(&btrfs_comp_ws[i].alloc_ws, 0);
768+
atomic_set(&btrfs_comp_ws[i].total_ws, 0);
764769
init_waitqueue_head(&btrfs_comp_ws[i].ws_wait);
770+
771+
/*
772+
* Preallocate one workspace for each compression type so
773+
* we can guarantee forward progress in the worst case
774+
*/
775+
workspace = btrfs_compress_op[i]->alloc_workspace();
776+
if (IS_ERR(workspace)) {
777+
printk(KERN_WARNING
778+
"BTRFS: cannot preallocate compression workspace, will try later");
779+
} else {
780+
atomic_set(&btrfs_comp_ws[i].total_ws, 1);
781+
btrfs_comp_ws[i].free_ws = 1;
782+
list_add(workspace, &btrfs_comp_ws[i].idle_ws);
783+
}
765784
}
766785
}
767786

768787
/*
769-
* this finds an available workspace or allocates a new one
770-
* ERR_PTR is returned if things go bad.
788+
* This finds an available workspace or allocates a new one.
789+
* If it's not possible to allocate a new one, waits until there's one.
790+
* Preallocation makes a forward progress guarantees and we do not return
791+
* errors.
771792
*/
772793
static struct list_head *find_workspace(int type)
773794
{
@@ -777,36 +798,58 @@ static struct list_head *find_workspace(int type)
777798

778799
struct list_head *idle_ws = &btrfs_comp_ws[idx].idle_ws;
779800
spinlock_t *ws_lock = &btrfs_comp_ws[idx].ws_lock;
780-
atomic_t *alloc_ws = &btrfs_comp_ws[idx].alloc_ws;
801+
atomic_t *total_ws = &btrfs_comp_ws[idx].total_ws;
781802
wait_queue_head_t *ws_wait = &btrfs_comp_ws[idx].ws_wait;
782-
int *num_ws = &btrfs_comp_ws[idx].num_ws;
803+
int *free_ws = &btrfs_comp_ws[idx].free_ws;
783804
again:
784805
spin_lock(ws_lock);
785806
if (!list_empty(idle_ws)) {
786807
workspace = idle_ws->next;
787808
list_del(workspace);
788-
(*num_ws)--;
809+
(*free_ws)--;
789810
spin_unlock(ws_lock);
790811
return workspace;
791812

792813
}
793-
if (atomic_read(alloc_ws) > cpus) {
814+
if (atomic_read(total_ws) > cpus) {
794815
DEFINE_WAIT(wait);
795816

796817
spin_unlock(ws_lock);
797818
prepare_to_wait(ws_wait, &wait, TASK_UNINTERRUPTIBLE);
798-
if (atomic_read(alloc_ws) > cpus && !*num_ws)
819+
if (atomic_read(total_ws) > cpus && !*free_ws)
799820
schedule();
800821
finish_wait(ws_wait, &wait);
801822
goto again;
802823
}
803-
atomic_inc(alloc_ws);
824+
atomic_inc(total_ws);
804825
spin_unlock(ws_lock);
805826

806827
workspace = btrfs_compress_op[idx]->alloc_workspace();
807828
if (IS_ERR(workspace)) {
808-
atomic_dec(alloc_ws);
829+
atomic_dec(total_ws);
809830
wake_up(ws_wait);
831+
832+
/*
833+
* Do not return the error but go back to waiting. There's a
834+
* workspace preallocated for each type and the compression
835+
* time is bounded so we get to a workspace eventually. This
836+
* makes our caller's life easier.
837+
*
838+
* To prevent silent and low-probability deadlocks (when the
839+
* initial preallocation fails), check if there are any
840+
* workspaces at all.
841+
*/
842+
if (atomic_read(total_ws) == 0) {
843+
static DEFINE_RATELIMIT_STATE(_rs,
844+
/* once per minute */ 60 * HZ,
845+
/* no burst */ 1);
846+
847+
if (__ratelimit(&_rs)) {
848+
printk(KERN_WARNING
849+
"no compression workspaces, low memory, retrying");
850+
}
851+
}
852+
goto again;
810853
}
811854
return workspace;
812855
}
@@ -820,21 +863,21 @@ static void free_workspace(int type, struct list_head *workspace)
820863
int idx = type - 1;
821864
struct list_head *idle_ws = &btrfs_comp_ws[idx].idle_ws;
822865
spinlock_t *ws_lock = &btrfs_comp_ws[idx].ws_lock;
823-
atomic_t *alloc_ws = &btrfs_comp_ws[idx].alloc_ws;
866+
atomic_t *total_ws = &btrfs_comp_ws[idx].total_ws;
824867
wait_queue_head_t *ws_wait = &btrfs_comp_ws[idx].ws_wait;
825-
int *num_ws = &btrfs_comp_ws[idx].num_ws;
868+
int *free_ws = &btrfs_comp_ws[idx].free_ws;
826869

827870
spin_lock(ws_lock);
828-
if (*num_ws < num_online_cpus()) {
871+
if (*free_ws < num_online_cpus()) {
829872
list_add(workspace, idle_ws);
830-
(*num_ws)++;
873+
(*free_ws)++;
831874
spin_unlock(ws_lock);
832875
goto wake;
833876
}
834877
spin_unlock(ws_lock);
835878

836879
btrfs_compress_op[idx]->free_workspace(workspace);
837-
atomic_dec(alloc_ws);
880+
atomic_dec(total_ws);
838881
wake:
839882
/*
840883
* Make sure counter is updated before we wake up waiters.
@@ -857,7 +900,7 @@ static void free_workspaces(void)
857900
workspace = btrfs_comp_ws[i].idle_ws.next;
858901
list_del(workspace);
859902
btrfs_compress_op[i]->free_workspace(workspace);
860-
atomic_dec(&btrfs_comp_ws[i].alloc_ws);
903+
atomic_dec(&btrfs_comp_ws[i].total_ws);
861904
}
862905
}
863906
}
@@ -894,8 +937,6 @@ int btrfs_compress_pages(int type, struct address_space *mapping,
894937
int ret;
895938

896939
workspace = find_workspace(type);
897-
if (IS_ERR(workspace))
898-
return PTR_ERR(workspace);
899940

900941
ret = btrfs_compress_op[type-1]->compress_pages(workspace, mapping,
901942
start, len, pages,
@@ -930,8 +971,6 @@ static int btrfs_decompress_biovec(int type, struct page **pages_in,
930971
int ret;
931972

932973
workspace = find_workspace(type);
933-
if (IS_ERR(workspace))
934-
return PTR_ERR(workspace);
935974

936975
ret = btrfs_compress_op[type-1]->decompress_biovec(workspace, pages_in,
937976
disk_start,
@@ -952,8 +991,6 @@ int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page,
952991
int ret;
953992

954993
workspace = find_workspace(type);
955-
if (IS_ERR(workspace))
956-
return PTR_ERR(workspace);
957994

958995
ret = btrfs_compress_op[type-1]->decompress(workspace, data_in,
959996
dest_page, start_byte,

fs/btrfs/ctree.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1011,7 +1011,7 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
10111011
return ret;
10121012
if (refs == 0) {
10131013
ret = -EROFS;
1014-
btrfs_std_error(root->fs_info, ret, NULL);
1014+
btrfs_handle_fs_error(root->fs_info, ret, NULL);
10151015
return ret;
10161016
}
10171017
} else {
@@ -1928,7 +1928,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
19281928
child = read_node_slot(root, mid, 0);
19291929
if (!child) {
19301930
ret = -EROFS;
1931-
btrfs_std_error(root->fs_info, ret, NULL);
1931+
btrfs_handle_fs_error(root->fs_info, ret, NULL);
19321932
goto enospc;
19331933
}
19341934

@@ -2031,7 +2031,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
20312031
*/
20322032
if (!left) {
20332033
ret = -EROFS;
2034-
btrfs_std_error(root->fs_info, ret, NULL);
2034+
btrfs_handle_fs_error(root->fs_info, ret, NULL);
20352035
goto enospc;
20362036
}
20372037
wret = balance_node_right(trans, root, mid, left);

0 commit comments

Comments
 (0)