12
12
#include <crypto/scatterwalk.h>
13
13
#include <linux/cryptouser.h>
14
14
#include <linux/err.h>
15
+ #include <linux/highmem.h>
15
16
#include <linux/kernel.h>
16
17
#include <linux/module.h>
18
+ #include <linux/overflow.h>
17
19
#include <linux/scatterlist.h>
18
20
#include <linux/seq_file.h>
19
21
#include <linux/slab.h>
23
25
24
26
#include "compress.h"
25
27
28
+ #define SCOMP_SCRATCH_SIZE 65400
29
+
26
30
struct scomp_scratch {
27
31
spinlock_t lock ;
28
- void * src ;
32
+ union {
33
+ void * src ;
34
+ unsigned long saddr ;
35
+ };
29
36
void * dst ;
30
37
};
31
38
@@ -66,7 +73,7 @@ static void crypto_scomp_free_scratches(void)
66
73
for_each_possible_cpu (i ) {
67
74
scratch = per_cpu_ptr (& scomp_scratch , i );
68
75
69
- vfree (scratch -> src );
76
+ free_page (scratch -> saddr );
70
77
vfree (scratch -> dst );
71
78
scratch -> src = NULL ;
72
79
scratch -> dst = NULL ;
@@ -79,14 +86,15 @@ static int crypto_scomp_alloc_scratches(void)
79
86
int i ;
80
87
81
88
for_each_possible_cpu (i ) {
89
+ struct page * page ;
82
90
void * mem ;
83
91
84
92
scratch = per_cpu_ptr (& scomp_scratch , i );
85
93
86
- mem = vmalloc_node ( SCOMP_SCRATCH_SIZE , cpu_to_node (i ));
87
- if (!mem )
94
+ page = alloc_pages_node ( cpu_to_node (i ), GFP_KERNEL , 0 );
95
+ if (!page )
88
96
goto error ;
89
- scratch -> src = mem ;
97
+ scratch -> src = page_address ( page ) ;
90
98
mem = vmalloc_node (SCOMP_SCRATCH_SIZE , cpu_to_node (i ));
91
99
if (!mem )
92
100
goto error ;
@@ -161,76 +169,88 @@ static int crypto_scomp_init_tfm(struct crypto_tfm *tfm)
161
169
162
170
static int scomp_acomp_comp_decomp (struct acomp_req * req , int dir )
163
171
{
172
+ struct scomp_scratch * scratch = raw_cpu_ptr (& scomp_scratch );
164
173
struct crypto_acomp * tfm = crypto_acomp_reqtfm (req );
165
- void * * tfm_ctx = acomp_tfm_ctx (tfm );
174
+ struct crypto_scomp * * tfm_ctx = acomp_tfm_ctx (tfm );
166
175
struct crypto_scomp * scomp = * tfm_ctx ;
167
176
struct crypto_acomp_stream * stream ;
168
- struct scomp_scratch * scratch ;
177
+ unsigned int slen = req -> slen ;
178
+ unsigned int dlen = req -> dlen ;
179
+ struct page * spage , * dpage ;
180
+ unsigned int soff , doff ;
169
181
void * src , * dst ;
170
- unsigned int dlen ;
182
+ unsigned int n ;
171
183
int ret ;
172
184
173
- if (!req -> src || !req -> slen || req -> slen > SCOMP_SCRATCH_SIZE )
185
+ if (!req -> src || !slen )
174
186
return - EINVAL ;
175
187
176
- if (req -> dst && ! req -> dlen )
188
+ if (! req -> dst || ! dlen )
177
189
return - EINVAL ;
178
190
179
- if (!req -> dlen || req -> dlen > SCOMP_SCRATCH_SIZE )
180
- req -> dlen = SCOMP_SCRATCH_SIZE ;
181
-
182
- dlen = req -> dlen ;
191
+ soff = req -> src -> offset ;
192
+ spage = nth_page (sg_page (req -> src ), soff / PAGE_SIZE );
193
+ soff = offset_in_page (soff );
183
194
184
- scratch = raw_cpu_ptr (& scomp_scratch );
185
- spin_lock_bh (& scratch -> lock );
186
-
187
- if (sg_nents (req -> src ) == 1 && !PageHighMem (sg_page (req -> src ))) {
188
- src = page_to_virt (sg_page (req -> src )) + req -> src -> offset ;
189
- } else {
190
- scatterwalk_map_and_copy (scratch -> src , req -> src , 0 ,
191
- req -> slen , 0 );
195
+ n = slen / PAGE_SIZE ;
196
+ n += (offset_in_page (slen ) + soff - 1 ) / PAGE_SIZE ;
197
+ if (slen <= req -> src -> length && (!PageHighMem (nth_page (spage , n )) ||
198
+ size_add (soff , slen ) <= PAGE_SIZE ))
199
+ src = kmap_local_page (spage ) + soff ;
200
+ else
192
201
src = scratch -> src ;
193
- }
194
202
195
- if (req -> dst && sg_nents (req -> dst ) == 1 && !PageHighMem (sg_page (req -> dst )))
196
- dst = page_to_virt (sg_page (req -> dst )) + req -> dst -> offset ;
197
- else
203
+ doff = req -> dst -> offset ;
204
+ dpage = nth_page (sg_page (req -> dst ), doff / PAGE_SIZE );
205
+ doff = offset_in_page (doff );
206
+
207
+ n = dlen / PAGE_SIZE ;
208
+ n += (offset_in_page (dlen ) + doff - 1 ) / PAGE_SIZE ;
209
+ if (dlen <= req -> dst -> length && (!PageHighMem (nth_page (dpage , n )) ||
210
+ size_add (doff , dlen ) <= PAGE_SIZE ))
211
+ dst = kmap_local_page (dpage ) + doff ;
212
+ else {
213
+ if (dlen > SCOMP_SCRATCH_SIZE )
214
+ dlen = SCOMP_SCRATCH_SIZE ;
198
215
dst = scratch -> dst ;
216
+ }
217
+
218
+ spin_lock_bh (& scratch -> lock );
219
+
220
+ if (src == scratch -> src )
221
+ memcpy_from_sglist (src , req -> src , 0 , slen );
199
222
200
223
stream = raw_cpu_ptr (crypto_scomp_alg (scomp )-> stream );
201
224
spin_lock (& stream -> lock );
202
225
if (dir )
203
- ret = crypto_scomp_compress (scomp , src , req -> slen ,
204
- dst , & req -> dlen , stream -> ctx );
226
+ ret = crypto_scomp_compress (scomp , src , slen ,
227
+ dst , & dlen , stream -> ctx );
205
228
else
206
- ret = crypto_scomp_decompress (scomp , src , req -> slen ,
207
- dst , & req -> dlen , stream -> ctx );
229
+ ret = crypto_scomp_decompress (scomp , src , slen ,
230
+ dst , & dlen , stream -> ctx );
231
+
232
+ if (dst == scratch -> dst )
233
+ memcpy_to_sglist (req -> dst , 0 , dst , dlen );
234
+
208
235
spin_unlock (& stream -> lock );
209
- if (!ret ) {
210
- if (!req -> dst ) {
211
- req -> dst = sgl_alloc (req -> dlen , GFP_ATOMIC , NULL );
212
- if (!req -> dst ) {
213
- ret = - ENOMEM ;
214
- goto out ;
215
- }
216
- } else if (req -> dlen > dlen ) {
217
- ret = - ENOSPC ;
218
- goto out ;
219
- }
220
- if (dst == scratch -> dst ) {
221
- scatterwalk_map_and_copy (scratch -> dst , req -> dst , 0 ,
222
- req -> dlen , 1 );
223
- } else {
224
- int nr_pages = DIV_ROUND_UP (req -> dst -> offset + req -> dlen , PAGE_SIZE );
225
- int i ;
226
- struct page * dst_page = sg_page (req -> dst );
227
-
228
- for (i = 0 ; i < nr_pages ; i ++ )
229
- flush_dcache_page (dst_page + i );
236
+ spin_unlock_bh (& scratch -> lock );
237
+
238
+ req -> dlen = dlen ;
239
+
240
+ if (dst != scratch -> dst ) {
241
+ kunmap_local (dst );
242
+ dlen += doff ;
243
+ for (;;) {
244
+ flush_dcache_page (dpage );
245
+ if (dlen <= PAGE_SIZE )
246
+ break ;
247
+ dlen -= PAGE_SIZE ;
248
+ dpage = nth_page (dpage , 1 );
230
249
}
231
250
}
232
- out :
233
- spin_unlock_bh (& scratch -> lock );
251
+ if (src != scratch -> src )
252
+ kunmap_local (src );
253
+
234
254
return ret ;
235
255
}
236
256
@@ -277,7 +297,6 @@ int crypto_init_scomp_ops_async(struct crypto_tfm *tfm)
277
297
278
298
crt -> compress = scomp_acomp_compress ;
279
299
crt -> decompress = scomp_acomp_decompress ;
280
- crt -> dst_free = sgl_free ;
281
300
282
301
return 0 ;
283
302
}
0 commit comments