2
2
/*
3
3
* Copyright (C) 2017-2018 HUAWEI, Inc.
4
4
* https://www.huawei.com/
5
+ * Copyright (C) 2021, Alibaba Cloud
5
6
*/
6
7
#include "internal.h"
7
8
#include <linux/prefetch.h>
@@ -36,13 +37,6 @@ static int erofs_map_blocks_flatmode(struct inode *inode,
36
37
nblocks = DIV_ROUND_UP (inode -> i_size , PAGE_SIZE );
37
38
lastblk = nblocks - tailendpacking ;
38
39
39
- if (offset >= inode -> i_size ) {
40
- /* leave out-of-bound access unmapped */
41
- map -> m_flags = 0 ;
42
- map -> m_plen = 0 ;
43
- goto out ;
44
- }
45
-
46
40
/* there is no hole in flatmode */
47
41
map -> m_flags = EROFS_MAP_MAPPED ;
48
42
@@ -77,14 +71,90 @@ static int erofs_map_blocks_flatmode(struct inode *inode,
77
71
goto err_out ;
78
72
}
79
73
80
- out :
81
74
map -> m_llen = map -> m_plen ;
82
-
83
75
err_out :
84
76
trace_erofs_map_blocks_flatmode_exit (inode , map , flags , 0 );
85
77
return err ;
86
78
}
87
79
80
+ static int erofs_map_blocks (struct inode * inode ,
81
+ struct erofs_map_blocks * map , int flags )
82
+ {
83
+ struct super_block * sb = inode -> i_sb ;
84
+ struct erofs_inode * vi = EROFS_I (inode );
85
+ struct erofs_inode_chunk_index * idx ;
86
+ struct page * page ;
87
+ u64 chunknr ;
88
+ unsigned int unit ;
89
+ erofs_off_t pos ;
90
+ int err = 0 ;
91
+
92
+ if (map -> m_la >= inode -> i_size ) {
93
+ /* leave out-of-bound access unmapped */
94
+ map -> m_flags = 0 ;
95
+ map -> m_plen = 0 ;
96
+ goto out ;
97
+ }
98
+
99
+ if (vi -> datalayout != EROFS_INODE_CHUNK_BASED )
100
+ return erofs_map_blocks_flatmode (inode , map , flags );
101
+
102
+ if (vi -> chunkformat & EROFS_CHUNK_FORMAT_INDEXES )
103
+ unit = sizeof (* idx ); /* chunk index */
104
+ else
105
+ unit = EROFS_BLOCK_MAP_ENTRY_SIZE ; /* block map */
106
+
107
+ chunknr = map -> m_la >> vi -> chunkbits ;
108
+ pos = ALIGN (iloc (EROFS_SB (sb ), vi -> nid ) + vi -> inode_isize +
109
+ vi -> xattr_isize , unit ) + unit * chunknr ;
110
+
111
+ page = erofs_get_meta_page (inode -> i_sb , erofs_blknr (pos ));
112
+ if (IS_ERR (page ))
113
+ return PTR_ERR (page );
114
+
115
+ map -> m_la = chunknr << vi -> chunkbits ;
116
+ map -> m_plen = min_t (erofs_off_t , 1UL << vi -> chunkbits ,
117
+ roundup (inode -> i_size - map -> m_la , EROFS_BLKSIZ ));
118
+
119
+ /* handle block map */
120
+ if (!(vi -> chunkformat & EROFS_CHUNK_FORMAT_INDEXES )) {
121
+ __le32 * blkaddr = page_address (page ) + erofs_blkoff (pos );
122
+
123
+ if (le32_to_cpu (* blkaddr ) == EROFS_NULL_ADDR ) {
124
+ map -> m_flags = 0 ;
125
+ } else {
126
+ map -> m_pa = blknr_to_addr (le32_to_cpu (* blkaddr ));
127
+ map -> m_flags = EROFS_MAP_MAPPED ;
128
+ }
129
+ goto out_unlock ;
130
+ }
131
+ /* parse chunk indexes */
132
+ idx = page_address (page ) + erofs_blkoff (pos );
133
+ switch (le32_to_cpu (idx -> blkaddr )) {
134
+ case EROFS_NULL_ADDR :
135
+ map -> m_flags = 0 ;
136
+ break ;
137
+ default :
138
+ /* only one device is supported for now */
139
+ if (idx -> device_id ) {
140
+ erofs_err (sb , "invalid device id %u @ %llu for nid %llu" ,
141
+ le16_to_cpu (idx -> device_id ),
142
+ chunknr , vi -> nid );
143
+ err = - EFSCORRUPTED ;
144
+ goto out_unlock ;
145
+ }
146
+ map -> m_pa = blknr_to_addr (le32_to_cpu (idx -> blkaddr ));
147
+ map -> m_flags = EROFS_MAP_MAPPED ;
148
+ break ;
149
+ }
150
+ out_unlock :
151
+ unlock_page (page );
152
+ put_page (page );
153
+ out :
154
+ map -> m_llen = map -> m_plen ;
155
+ return err ;
156
+ }
157
+
88
158
static int erofs_iomap_begin (struct inode * inode , loff_t offset , loff_t length ,
89
159
unsigned int flags , struct iomap * iomap , struct iomap * srcmap )
90
160
{
@@ -94,7 +164,7 @@ static int erofs_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
94
164
map .m_la = offset ;
95
165
map .m_llen = length ;
96
166
97
- ret = erofs_map_blocks_flatmode (inode , & map , EROFS_GET_BLOCKS_RAW );
167
+ ret = erofs_map_blocks (inode , & map , EROFS_GET_BLOCKS_RAW );
98
168
if (ret < 0 )
99
169
return ret ;
100
170
0 commit comments