Skip to content

Commit b28cf57

Browse files
committed
Merge branch 'misc-cleanups-4.5' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux into for-linus-4.5
Signed-off-by: Chris Mason <[email protected]>
2 parents a305810 + a7ca422 commit b28cf57

24 files changed

+195
-305
lines changed

fs/btrfs/backref.c

Lines changed: 6 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -520,13 +520,10 @@ static inline int ref_for_same_block(struct __prelim_ref *ref1,
520520
static int __add_missing_keys(struct btrfs_fs_info *fs_info,
521521
struct list_head *head)
522522
{
523-
struct list_head *pos;
523+
struct __prelim_ref *ref;
524524
struct extent_buffer *eb;
525525

526-
list_for_each(pos, head) {
527-
struct __prelim_ref *ref;
528-
ref = list_entry(pos, struct __prelim_ref, list);
529-
526+
list_for_each_entry(ref, head, list) {
530527
if (ref->parent)
531528
continue;
532529
if (ref->key_for_search.type)
@@ -563,23 +560,15 @@ static int __add_missing_keys(struct btrfs_fs_info *fs_info,
563560
*/
564561
static void __merge_refs(struct list_head *head, int mode)
565562
{
566-
struct list_head *pos1;
563+
struct __prelim_ref *ref1;
567564

568-
list_for_each(pos1, head) {
569-
struct list_head *n2;
570-
struct list_head *pos2;
571-
struct __prelim_ref *ref1;
565+
list_for_each_entry(ref1, head, list) {
566+
struct __prelim_ref *ref2 = ref1, *tmp;
572567

573-
ref1 = list_entry(pos1, struct __prelim_ref, list);
574-
575-
for (pos2 = pos1->next, n2 = pos2->next; pos2 != head;
576-
pos2 = n2, n2 = pos2->next) {
577-
struct __prelim_ref *ref2;
568+
list_for_each_entry_safe_continue(ref2, tmp, head, list) {
578569
struct __prelim_ref *xchg;
579570
struct extent_inode_elem *eie;
580571

581-
ref2 = list_entry(pos2, struct __prelim_ref, list);
582-
583572
if (!ref_for_same_block(ref1, ref2))
584573
continue;
585574
if (mode == 1) {

fs/btrfs/check-integrity.c

Lines changed: 26 additions & 79 deletions
Original file line numberDiff line numberDiff line change
@@ -531,13 +531,9 @@ static struct btrfsic_block *btrfsic_block_hashtable_lookup(
531531
(((unsigned int)(dev_bytenr >> 16)) ^
532532
((unsigned int)((uintptr_t)bdev))) &
533533
(BTRFSIC_BLOCK_HASHTABLE_SIZE - 1);
534-
struct list_head *elem;
535-
536-
list_for_each(elem, h->table + hashval) {
537-
struct btrfsic_block *const b =
538-
list_entry(elem, struct btrfsic_block,
539-
collision_resolving_node);
534+
struct btrfsic_block *b;
540535

536+
list_for_each_entry(b, h->table + hashval, collision_resolving_node) {
541537
if (b->dev_state->bdev == bdev && b->dev_bytenr == dev_bytenr)
542538
return b;
543539
}
@@ -588,13 +584,9 @@ static struct btrfsic_block_link *btrfsic_block_link_hashtable_lookup(
588584
((unsigned int)((uintptr_t)bdev_ref_to)) ^
589585
((unsigned int)((uintptr_t)bdev_ref_from))) &
590586
(BTRFSIC_BLOCK_LINK_HASHTABLE_SIZE - 1);
591-
struct list_head *elem;
592-
593-
list_for_each(elem, h->table + hashval) {
594-
struct btrfsic_block_link *const l =
595-
list_entry(elem, struct btrfsic_block_link,
596-
collision_resolving_node);
587+
struct btrfsic_block_link *l;
597588

589+
list_for_each_entry(l, h->table + hashval, collision_resolving_node) {
598590
BUG_ON(NULL == l->block_ref_to);
599591
BUG_ON(NULL == l->block_ref_from);
600592
if (l->block_ref_to->dev_state->bdev == bdev_ref_to &&
@@ -639,13 +631,9 @@ static struct btrfsic_dev_state *btrfsic_dev_state_hashtable_lookup(
639631
const unsigned int hashval =
640632
(((unsigned int)((uintptr_t)bdev)) &
641633
(BTRFSIC_DEV2STATE_HASHTABLE_SIZE - 1));
642-
struct list_head *elem;
643-
644-
list_for_each(elem, h->table + hashval) {
645-
struct btrfsic_dev_state *const ds =
646-
list_entry(elem, struct btrfsic_dev_state,
647-
collision_resolving_node);
634+
struct btrfsic_dev_state *ds;
648635

636+
list_for_each_entry(ds, h->table + hashval, collision_resolving_node) {
649637
if (ds->bdev == bdev)
650638
return ds;
651639
}
@@ -1720,29 +1708,20 @@ static int btrfsic_read_block(struct btrfsic_state *state,
17201708

17211709
static void btrfsic_dump_database(struct btrfsic_state *state)
17221710
{
1723-
struct list_head *elem_all;
1711+
const struct btrfsic_block *b_all;
17241712

17251713
BUG_ON(NULL == state);
17261714

17271715
printk(KERN_INFO "all_blocks_list:\n");
1728-
list_for_each(elem_all, &state->all_blocks_list) {
1729-
const struct btrfsic_block *const b_all =
1730-
list_entry(elem_all, struct btrfsic_block,
1731-
all_blocks_node);
1732-
struct list_head *elem_ref_to;
1733-
struct list_head *elem_ref_from;
1716+
list_for_each_entry(b_all, &state->all_blocks_list, all_blocks_node) {
1717+
const struct btrfsic_block_link *l;
17341718

17351719
printk(KERN_INFO "%c-block @%llu (%s/%llu/%d)\n",
17361720
btrfsic_get_block_type(state, b_all),
17371721
b_all->logical_bytenr, b_all->dev_state->name,
17381722
b_all->dev_bytenr, b_all->mirror_num);
17391723

1740-
list_for_each(elem_ref_to, &b_all->ref_to_list) {
1741-
const struct btrfsic_block_link *const l =
1742-
list_entry(elem_ref_to,
1743-
struct btrfsic_block_link,
1744-
node_ref_to);
1745-
1724+
list_for_each_entry(l, &b_all->ref_to_list, node_ref_to) {
17461725
printk(KERN_INFO " %c @%llu (%s/%llu/%d)"
17471726
" refers %u* to"
17481727
" %c @%llu (%s/%llu/%d)\n",
@@ -1757,12 +1736,7 @@ static void btrfsic_dump_database(struct btrfsic_state *state)
17571736
l->block_ref_to->mirror_num);
17581737
}
17591738

1760-
list_for_each(elem_ref_from, &b_all->ref_from_list) {
1761-
const struct btrfsic_block_link *const l =
1762-
list_entry(elem_ref_from,
1763-
struct btrfsic_block_link,
1764-
node_ref_from);
1765-
1739+
list_for_each_entry(l, &b_all->ref_from_list, node_ref_from) {
17661740
printk(KERN_INFO " %c @%llu (%s/%llu/%d)"
17671741
" is ref %u* from"
17681742
" %c @%llu (%s/%llu/%d)\n",
@@ -1845,8 +1819,7 @@ static void btrfsic_process_written_block(struct btrfsic_dev_state *dev_state,
18451819
&state->block_hashtable);
18461820
if (NULL != block) {
18471821
u64 bytenr = 0;
1848-
struct list_head *elem_ref_to;
1849-
struct list_head *tmp_ref_to;
1822+
struct btrfsic_block_link *l, *tmp;
18501823

18511824
if (block->is_superblock) {
18521825
bytenr = btrfs_super_bytenr((struct btrfs_super_block *)
@@ -1967,13 +1940,8 @@ static void btrfsic_process_written_block(struct btrfsic_dev_state *dev_state,
19671940
* because it still carries valueable information
19681941
* like whether it was ever written and IO completed.
19691942
*/
1970-
list_for_each_safe(elem_ref_to, tmp_ref_to,
1971-
&block->ref_to_list) {
1972-
struct btrfsic_block_link *const l =
1973-
list_entry(elem_ref_to,
1974-
struct btrfsic_block_link,
1975-
node_ref_to);
1976-
1943+
list_for_each_entry_safe(l, tmp, &block->ref_to_list,
1944+
node_ref_to) {
19771945
if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
19781946
btrfsic_print_rem_link(state, l);
19791947
l->ref_cnt--;
@@ -2436,7 +2404,7 @@ static int btrfsic_check_all_ref_blocks(struct btrfsic_state *state,
24362404
struct btrfsic_block *const block,
24372405
int recursion_level)
24382406
{
2439-
struct list_head *elem_ref_to;
2407+
const struct btrfsic_block_link *l;
24402408
int ret = 0;
24412409

24422410
if (recursion_level >= 3 + BTRFS_MAX_LEVEL) {
@@ -2464,11 +2432,7 @@ static int btrfsic_check_all_ref_blocks(struct btrfsic_state *state,
24642432
* This algorithm is recursive because the amount of used stack
24652433
* space is very small and the max recursion depth is limited.
24662434
*/
2467-
list_for_each(elem_ref_to, &block->ref_to_list) {
2468-
const struct btrfsic_block_link *const l =
2469-
list_entry(elem_ref_to, struct btrfsic_block_link,
2470-
node_ref_to);
2471-
2435+
list_for_each_entry(l, &block->ref_to_list, node_ref_to) {
24722436
if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
24732437
printk(KERN_INFO
24742438
"rl=%d, %c @%llu (%s/%llu/%d)"
@@ -2561,7 +2525,7 @@ static int btrfsic_is_block_ref_by_superblock(
25612525
const struct btrfsic_block *block,
25622526
int recursion_level)
25632527
{
2564-
struct list_head *elem_ref_from;
2528+
const struct btrfsic_block_link *l;
25652529

25662530
if (recursion_level >= 3 + BTRFS_MAX_LEVEL) {
25672531
/* refer to comment at "abort cyclic linkage (case 1)" */
@@ -2576,11 +2540,7 @@ static int btrfsic_is_block_ref_by_superblock(
25762540
* This algorithm is recursive because the amount of used stack space
25772541
* is very small and the max recursion depth is limited.
25782542
*/
2579-
list_for_each(elem_ref_from, &block->ref_from_list) {
2580-
const struct btrfsic_block_link *const l =
2581-
list_entry(elem_ref_from, struct btrfsic_block_link,
2582-
node_ref_from);
2583-
2543+
list_for_each_entry(l, &block->ref_from_list, node_ref_from) {
25842544
if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
25852545
printk(KERN_INFO
25862546
"rl=%d, %c @%llu (%s/%llu/%d)"
@@ -2669,7 +2629,7 @@ static void btrfsic_dump_tree_sub(const struct btrfsic_state *state,
26692629
const struct btrfsic_block *block,
26702630
int indent_level)
26712631
{
2672-
struct list_head *elem_ref_to;
2632+
const struct btrfsic_block_link *l;
26732633
int indent_add;
26742634
static char buf[80];
26752635
int cursor_position;
@@ -2704,11 +2664,7 @@ static void btrfsic_dump_tree_sub(const struct btrfsic_state *state,
27042664
}
27052665

27062666
cursor_position = indent_level;
2707-
list_for_each(elem_ref_to, &block->ref_to_list) {
2708-
const struct btrfsic_block_link *const l =
2709-
list_entry(elem_ref_to, struct btrfsic_block_link,
2710-
node_ref_to);
2711-
2667+
list_for_each_entry(l, &block->ref_to_list, node_ref_to) {
27122668
while (cursor_position < indent_level) {
27132669
printk(" ");
27142670
cursor_position++;
@@ -3165,8 +3121,7 @@ int btrfsic_mount(struct btrfs_root *root,
31653121
void btrfsic_unmount(struct btrfs_root *root,
31663122
struct btrfs_fs_devices *fs_devices)
31673123
{
3168-
struct list_head *elem_all;
3169-
struct list_head *tmp_all;
3124+
struct btrfsic_block *b_all, *tmp_all;
31703125
struct btrfsic_state *state;
31713126
struct list_head *dev_head = &fs_devices->devices;
31723127
struct btrfs_device *device;
@@ -3206,20 +3161,12 @@ void btrfsic_unmount(struct btrfs_root *root,
32063161
* just free all memory that was allocated dynamically.
32073162
* Free the blocks and the block_links.
32083163
*/
3209-
list_for_each_safe(elem_all, tmp_all, &state->all_blocks_list) {
3210-
struct btrfsic_block *const b_all =
3211-
list_entry(elem_all, struct btrfsic_block,
3212-
all_blocks_node);
3213-
struct list_head *elem_ref_to;
3214-
struct list_head *tmp_ref_to;
3215-
3216-
list_for_each_safe(elem_ref_to, tmp_ref_to,
3217-
&b_all->ref_to_list) {
3218-
struct btrfsic_block_link *const l =
3219-
list_entry(elem_ref_to,
3220-
struct btrfsic_block_link,
3221-
node_ref_to);
3164+
list_for_each_entry_safe(b_all, tmp_all, &state->all_blocks_list,
3165+
all_blocks_node) {
3166+
struct btrfsic_block_link *l, *tmp;
32223167

3168+
list_for_each_entry_safe(l, tmp, &b_all->ref_to_list,
3169+
node_ref_to) {
32233170
if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
32243171
btrfsic_print_rem_link(state, l);
32253172

fs/btrfs/ctree.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1555,7 +1555,7 @@ noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
15551555
return 0;
15561556
}
15571557

1558-
search_start = buf->start & ~((u64)(1024 * 1024 * 1024) - 1);
1558+
search_start = buf->start & ~((u64)SZ_1G - 1);
15591559

15601560
if (parent)
15611561
btrfs_set_lock_blocking(parent);

fs/btrfs/ctree.h

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -35,6 +35,7 @@
3535
#include <linux/btrfs.h>
3636
#include <linux/workqueue.h>
3737
#include <linux/security.h>
38+
#include <linux/sizes.h>
3839
#include "extent_io.h"
3940
#include "extent_map.h"
4041
#include "async-thread.h"
@@ -199,9 +200,9 @@ static const int btrfs_csum_sizes[] = { 4 };
199200
/* ioprio of readahead is set to idle */
200201
#define BTRFS_IOPRIO_READA (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0))
201202

202-
#define BTRFS_DIRTY_METADATA_THRESH (32 * 1024 * 1024)
203+
#define BTRFS_DIRTY_METADATA_THRESH SZ_32M
203204

204-
#define BTRFS_MAX_EXTENT_SIZE (128 * 1024 * 1024)
205+
#define BTRFS_MAX_EXTENT_SIZE SZ_128M
205206

206207
/*
207208
* The key defines the order in the tree, and so it also defines (optimal)
@@ -4347,7 +4348,7 @@ static inline void __btrfs_clear_fs_incompat(struct btrfs_fs_info *fs_info,
43474348
#define btrfs_fs_incompat(fs_info, opt) \
43484349
__btrfs_fs_incompat((fs_info), BTRFS_FEATURE_INCOMPAT_##opt)
43494350

4350-
static inline int __btrfs_fs_incompat(struct btrfs_fs_info *fs_info, u64 flag)
4351+
static inline bool __btrfs_fs_incompat(struct btrfs_fs_info *fs_info, u64 flag)
43514352
{
43524353
struct btrfs_super_block *disk_super;
43534354
disk_super = fs_info->super_copy;

fs/btrfs/delayed-inode.c

Lines changed: 1 addition & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -54,16 +54,11 @@ static inline void btrfs_init_delayed_node(
5454
delayed_node->root = root;
5555
delayed_node->inode_id = inode_id;
5656
atomic_set(&delayed_node->refs, 0);
57-
delayed_node->count = 0;
58-
delayed_node->flags = 0;
5957
delayed_node->ins_root = RB_ROOT;
6058
delayed_node->del_root = RB_ROOT;
6159
mutex_init(&delayed_node->mutex);
62-
delayed_node->index_cnt = 0;
6360
INIT_LIST_HEAD(&delayed_node->n_list);
6461
INIT_LIST_HEAD(&delayed_node->p_list);
65-
delayed_node->bytes_reserved = 0;
66-
memset(&delayed_node->inode_item, 0, sizeof(delayed_node->inode_item));
6762
}
6863

6964
static inline int btrfs_is_continuous_delayed_item(
@@ -132,7 +127,7 @@ static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node(
132127
if (node)
133128
return node;
134129

135-
node = kmem_cache_alloc(delayed_node_cache, GFP_NOFS);
130+
node = kmem_cache_zalloc(delayed_node_cache, GFP_NOFS);
136131
if (!node)
137132
return ERR_PTR(-ENOMEM);
138133
btrfs_init_delayed_node(node, root, ino);

fs/btrfs/disk-io.c

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2824,7 +2824,7 @@ int open_ctree(struct super_block *sb,
28242824

28252825
fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super);
28262826
fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages,
2827-
4 * 1024 * 1024 / PAGE_CACHE_SIZE);
2827+
SZ_4M / PAGE_CACHE_SIZE);
28282828

28292829
tree_root->nodesize = nodesize;
28302830
tree_root->sectorsize = sectorsize;
@@ -3996,7 +3996,6 @@ static void __btrfs_btree_balance_dirty(struct btrfs_root *root,
39963996
balance_dirty_pages_ratelimited(
39973997
root->fs_info->btree_inode->i_mapping);
39983998
}
3999-
return;
40003999
}
40014000

40024001
void btrfs_btree_balance_dirty(struct btrfs_root *root)

fs/btrfs/disk-io.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@
1919
#ifndef __DISKIO__
2020
#define __DISKIO__
2121

22-
#define BTRFS_SUPER_INFO_OFFSET (64 * 1024)
22+
#define BTRFS_SUPER_INFO_OFFSET SZ_64K
2323
#define BTRFS_SUPER_INFO_SIZE 4096
2424

2525
#define BTRFS_SUPER_MIRROR_MAX 3
@@ -35,7 +35,7 @@ enum btrfs_wq_endio_type {
3535

3636
static inline u64 btrfs_sb_offset(int mirror)
3737
{
38-
u64 start = 16 * 1024;
38+
u64 start = SZ_16K;
3939
if (mirror)
4040
return start << (BTRFS_SUPER_MIRROR_SHIFT * mirror);
4141
return BTRFS_SUPER_INFO_OFFSET;

0 commit comments

Comments
 (0)