@@ -242,109 +242,38 @@ unsigned long long poke_real_address_q(unsigned long long addr,
242
242
#define phys_to_virt (address ) (__va(address))
243
243
#endif
244
244
245
- /*
246
- * On 32-bit SH, we traditionally have the whole physical address space
247
- * mapped at all times (as MIPS does), so "ioremap()" and "iounmap()" do
248
- * not need to do anything but place the address in the proper segment.
249
- * This is true for P1 and P2 addresses, as well as some P3 ones.
250
- * However, most of the P3 addresses and newer cores using extended
251
- * addressing need to map through page tables, so the ioremap()
252
- * implementation becomes a bit more complicated.
253
- *
254
- * See arch/sh/mm/ioremap.c for additional notes on this.
255
- *
256
- * We cheat a bit and always return uncachable areas until we've fixed
257
- * the drivers to handle caching properly.
258
- *
259
- * On the SH-5 the concept of segmentation in the 1:1 PXSEG sense simply
260
- * doesn't exist, so everything must go through page tables.
261
- */
262
245
#ifdef CONFIG_MMU
246
+ void iounmap (void __iomem * addr );
263
247
void __iomem * __ioremap_caller (phys_addr_t offset , unsigned long size ,
264
248
pgprot_t prot , void * caller );
265
- void iounmap (void __iomem * addr );
266
-
267
- static inline void __iomem *
268
- __ioremap (phys_addr_t offset , unsigned long size , pgprot_t prot )
269
- {
270
- return __ioremap_caller (offset , size , prot , __builtin_return_address (0 ));
271
- }
272
-
273
- static inline void __iomem *
274
- __ioremap_29bit (phys_addr_t offset , unsigned long size , pgprot_t prot )
275
- {
276
- #ifdef CONFIG_29BIT
277
- phys_addr_t last_addr = offset + size - 1 ;
278
-
279
- /*
280
- * For P1 and P2 space this is trivial, as everything is already
281
- * mapped. Uncached access for P1 addresses are done through P2.
282
- * In the P3 case or for addresses outside of the 29-bit space,
283
- * mapping must be done by the PMB or by using page tables.
284
- */
285
- if (likely (PXSEG (offset ) < P3SEG && PXSEG (last_addr ) < P3SEG )) {
286
- u64 flags = pgprot_val (prot );
287
-
288
- /*
289
- * Anything using the legacy PTEA space attributes needs
290
- * to be kicked down to page table mappings.
291
- */
292
- if (unlikely (flags & _PAGE_PCC_MASK ))
293
- return NULL ;
294
- if (unlikely (flags & _PAGE_CACHABLE ))
295
- return (void __iomem * )P1SEGADDR (offset );
296
-
297
- return (void __iomem * )P2SEGADDR (offset );
298
- }
299
-
300
- /* P4 above the store queues are always mapped. */
301
- if (unlikely (offset >= P3_ADDR_MAX ))
302
- return (void __iomem * )P4SEGADDR (offset );
303
- #endif
304
-
305
- return NULL ;
306
- }
307
-
308
- static inline void __iomem *
309
- __ioremap_mode (phys_addr_t offset , unsigned long size , pgprot_t prot )
310
- {
311
- void __iomem * ret ;
312
-
313
- ret = __ioremap_trapped (offset , size );
314
- if (ret )
315
- return ret ;
316
-
317
- ret = __ioremap_29bit (offset , size , prot );
318
- if (ret )
319
- return ret ;
320
-
321
- return __ioremap (offset , size , prot );
322
- }
323
- #else
324
- #define __ioremap (offset , size , prot ) ((void __iomem *)(offset))
325
- #define __ioremap_mode (offset , size , prot ) ((void __iomem *)(offset))
326
- static inline void iounmap (void __iomem * addr ) {}
327
- #endif /* CONFIG_MMU */
328
249
329
250
static inline void __iomem * ioremap (phys_addr_t offset , unsigned long size )
330
251
{
331
- return __ioremap_mode (offset , size , PAGE_KERNEL_NOCACHE );
252
+ return __ioremap_caller (offset , size , PAGE_KERNEL_NOCACHE ,
253
+ __builtin_return_address (0 ));
332
254
}
333
255
334
256
static inline void __iomem *
335
257
ioremap_cache (phys_addr_t offset , unsigned long size )
336
258
{
337
- return __ioremap_mode (offset , size , PAGE_KERNEL );
259
+ return __ioremap_caller (offset , size , PAGE_KERNEL ,
260
+ __builtin_return_address (0 ));
338
261
}
339
262
#define ioremap_cache ioremap_cache
340
263
341
264
#ifdef CONFIG_HAVE_IOREMAP_PROT
342
- static inline void __iomem *
343
- ioremap_prot ( phys_addr_t offset , unsigned long size , unsigned long flags )
265
+ static inline void __iomem * ioremap_prot ( phys_addr_t offset , unsigned long size ,
266
+ unsigned long flags )
344
267
{
345
- return __ioremap_mode (offset , size , __pgprot (flags ));
268
+ return __ioremap_caller (offset , size , __pgprot (flags ),
269
+ __builtin_return_address (0 ));
346
270
}
347
- #endif
271
+ #endif /* CONFIG_HAVE_IOREMAP_PROT */
272
+
273
+ #else /* CONFIG_MMU */
274
+ #define iounmap (addr ) do { } while (0)
275
+ #define ioremap (offset , size ) ((void __iomem *)(unsigned long)(offset))
276
+ #endif /* CONFIG_MMU */
348
277
349
278
#define ioremap_uc ioremap
350
279
0 commit comments