@@ -121,6 +121,8 @@ const struct xattr_handler *ext2_xattr_handlers[] = {
121
121
NULL
122
122
};
123
123
124
+ #define EA_BLOCK_CACHE (inode ) (EXT2_SB(inode->i_sb)->s_ea_block_cache)
125
+
124
126
static inline const struct xattr_handler *
125
127
ext2_xattr_handler (int name_index )
126
128
{
@@ -150,7 +152,7 @@ ext2_xattr_get(struct inode *inode, int name_index, const char *name,
150
152
size_t name_len , size ;
151
153
char * end ;
152
154
int error ;
153
- struct mb_cache * ext2_mb_cache = EXT2_SB (inode -> i_sb ) -> s_mb_cache ;
155
+ struct mb_cache * ea_block_cache = EA_BLOCK_CACHE (inode ) ;
154
156
155
157
ea_idebug (inode , "name=%d.%s, buffer=%p, buffer_size=%ld" ,
156
158
name_index , name , buffer , (long )buffer_size );
@@ -195,7 +197,7 @@ bad_block: ext2_error(inode->i_sb, "ext2_xattr_get",
195
197
goto found ;
196
198
entry = next ;
197
199
}
198
- if (ext2_xattr_cache_insert (ext2_mb_cache , bh ))
200
+ if (ext2_xattr_cache_insert (ea_block_cache , bh ))
199
201
ea_idebug (inode , "cache insert failed" );
200
202
error = - ENODATA ;
201
203
goto cleanup ;
@@ -208,7 +210,7 @@ bad_block: ext2_error(inode->i_sb, "ext2_xattr_get",
208
210
le16_to_cpu (entry -> e_value_offs ) + size > inode -> i_sb -> s_blocksize )
209
211
goto bad_block ;
210
212
211
- if (ext2_xattr_cache_insert (ext2_mb_cache , bh ))
213
+ if (ext2_xattr_cache_insert (ea_block_cache , bh ))
212
214
ea_idebug (inode , "cache insert failed" );
213
215
if (buffer ) {
214
216
error = - ERANGE ;
@@ -246,7 +248,7 @@ ext2_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size)
246
248
char * end ;
247
249
size_t rest = buffer_size ;
248
250
int error ;
249
- struct mb_cache * ext2_mb_cache = EXT2_SB (inode -> i_sb ) -> s_mb_cache ;
251
+ struct mb_cache * ea_block_cache = EA_BLOCK_CACHE (inode ) ;
250
252
251
253
ea_idebug (inode , "buffer=%p, buffer_size=%ld" ,
252
254
buffer , (long )buffer_size );
@@ -281,7 +283,7 @@ bad_block: ext2_error(inode->i_sb, "ext2_xattr_list",
281
283
goto bad_block ;
282
284
entry = next ;
283
285
}
284
- if (ext2_xattr_cache_insert (ext2_mb_cache , bh ))
286
+ if (ext2_xattr_cache_insert (ea_block_cache , bh ))
285
287
ea_idebug (inode , "cache insert failed" );
286
288
287
289
/* list the attribute names */
@@ -493,7 +495,7 @@ bad_block: ext2_error(sb, "ext2_xattr_set",
493
495
* This must happen under buffer lock for
494
496
* ext2_xattr_set2() to reliably detect modified block
495
497
*/
496
- mb_cache_entry_delete (EXT2_SB ( sb ) -> s_mb_cache , hash ,
498
+ mb_cache_entry_delete (EA_BLOCK_CACHE ( inode ) , hash ,
497
499
bh -> b_blocknr );
498
500
499
501
/* keep the buffer locked while modifying it. */
@@ -627,7 +629,7 @@ ext2_xattr_set2(struct inode *inode, struct buffer_head *old_bh,
627
629
struct super_block * sb = inode -> i_sb ;
628
630
struct buffer_head * new_bh = NULL ;
629
631
int error ;
630
- struct mb_cache * ext2_mb_cache = EXT2_SB ( sb ) -> s_mb_cache ;
632
+ struct mb_cache * ea_block_cache = EA_BLOCK_CACHE ( inode ) ;
631
633
632
634
if (header ) {
633
635
new_bh = ext2_xattr_cache_find (inode , header );
@@ -655,7 +657,7 @@ ext2_xattr_set2(struct inode *inode, struct buffer_head *old_bh,
655
657
don't need to change the reference count. */
656
658
new_bh = old_bh ;
657
659
get_bh (new_bh );
658
- ext2_xattr_cache_insert (ext2_mb_cache , new_bh );
660
+ ext2_xattr_cache_insert (ea_block_cache , new_bh );
659
661
} else {
660
662
/* We need to allocate a new block */
661
663
ext2_fsblk_t goal = ext2_group_first_block_no (sb ,
@@ -676,7 +678,7 @@ ext2_xattr_set2(struct inode *inode, struct buffer_head *old_bh,
676
678
memcpy (new_bh -> b_data , header , new_bh -> b_size );
677
679
set_buffer_uptodate (new_bh );
678
680
unlock_buffer (new_bh );
679
- ext2_xattr_cache_insert (ext2_mb_cache , new_bh );
681
+ ext2_xattr_cache_insert (ea_block_cache , new_bh );
680
682
681
683
ext2_xattr_update_super_block (sb );
682
684
}
@@ -721,7 +723,7 @@ ext2_xattr_set2(struct inode *inode, struct buffer_head *old_bh,
721
723
* This must happen under buffer lock for
722
724
* ext2_xattr_set2() to reliably detect freed block
723
725
*/
724
- mb_cache_entry_delete (ext2_mb_cache , hash ,
726
+ mb_cache_entry_delete (ea_block_cache , hash ,
725
727
old_bh -> b_blocknr );
726
728
/* Free the old block. */
727
729
ea_bdebug (old_bh , "freeing" );
@@ -795,7 +797,7 @@ ext2_xattr_delete_inode(struct inode *inode)
795
797
* This must happen under buffer lock for ext2_xattr_set2() to
796
798
* reliably detect freed block
797
799
*/
798
- mb_cache_entry_delete (EXT2_SB (inode -> i_sb ) -> s_mb_cache , hash ,
800
+ mb_cache_entry_delete (EA_BLOCK_CACHE (inode ) , hash ,
799
801
bh -> b_blocknr );
800
802
ext2_free_blocks (inode , EXT2_I (inode )-> i_file_acl , 1 );
801
803
get_bh (bh );
@@ -897,13 +899,13 @@ ext2_xattr_cache_find(struct inode *inode, struct ext2_xattr_header *header)
897
899
{
898
900
__u32 hash = le32_to_cpu (header -> h_hash );
899
901
struct mb_cache_entry * ce ;
900
- struct mb_cache * ext2_mb_cache = EXT2_SB (inode -> i_sb ) -> s_mb_cache ;
902
+ struct mb_cache * ea_block_cache = EA_BLOCK_CACHE (inode ) ;
901
903
902
904
if (!header -> h_hash )
903
905
return NULL ; /* never share */
904
906
ea_idebug (inode , "looking for cached blocks [%x]" , (int )hash );
905
907
again :
906
- ce = mb_cache_entry_find_first (ext2_mb_cache , hash );
908
+ ce = mb_cache_entry_find_first (ea_block_cache , hash );
907
909
while (ce ) {
908
910
struct buffer_head * bh ;
909
911
@@ -924,7 +926,7 @@ ext2_xattr_cache_find(struct inode *inode, struct ext2_xattr_header *header)
924
926
* entry is still hashed is reliable.
925
927
*/
926
928
if (hlist_bl_unhashed (& ce -> e_hash_list )) {
927
- mb_cache_entry_put (ext2_mb_cache , ce );
929
+ mb_cache_entry_put (ea_block_cache , ce );
928
930
unlock_buffer (bh );
929
931
brelse (bh );
930
932
goto again ;
@@ -937,14 +939,14 @@ ext2_xattr_cache_find(struct inode *inode, struct ext2_xattr_header *header)
937
939
} else if (!ext2_xattr_cmp (header , HDR (bh ))) {
938
940
ea_bdebug (bh , "b_count=%d" ,
939
941
atomic_read (& (bh -> b_count )));
940
- mb_cache_entry_touch (ext2_mb_cache , ce );
941
- mb_cache_entry_put (ext2_mb_cache , ce );
942
+ mb_cache_entry_touch (ea_block_cache , ce );
943
+ mb_cache_entry_put (ea_block_cache , ce );
942
944
return bh ;
943
945
}
944
946
unlock_buffer (bh );
945
947
brelse (bh );
946
948
}
947
- ce = mb_cache_entry_find_next (ext2_mb_cache , ce );
949
+ ce = mb_cache_entry_find_next (ea_block_cache , ce );
948
950
}
949
951
return NULL ;
950
952
}
0 commit comments