Skip to content

Commit 13f1fc8

Browse files
Christoph HellwigRich Felker
authored andcommitted
sh: move the ioremap implementation out of line
Move the internal implementation details of ioremap out of line, no need to expose any of this to drivers for a slow path API. Signed-off-by: Christoph Hellwig <[email protected]> Signed-off-by: Rich Felker <[email protected]>
1 parent 3eef6b7 commit 13f1fc8

File tree

2 files changed

+68
-86
lines changed

2 files changed

+68
-86
lines changed

arch/sh/include/asm/io.h

Lines changed: 15 additions & 86 deletions
Original file line numberDiff line numberDiff line change
@@ -242,109 +242,38 @@ unsigned long long poke_real_address_q(unsigned long long addr,
242242
#define phys_to_virt(address) (__va(address))
243243
#endif
244244

245-
/*
246-
* On 32-bit SH, we traditionally have the whole physical address space
247-
* mapped at all times (as MIPS does), so "ioremap()" and "iounmap()" do
248-
* not need to do anything but place the address in the proper segment.
249-
* This is true for P1 and P2 addresses, as well as some P3 ones.
250-
* However, most of the P3 addresses and newer cores using extended
251-
* addressing need to map through page tables, so the ioremap()
252-
* implementation becomes a bit more complicated.
253-
*
254-
* See arch/sh/mm/ioremap.c for additional notes on this.
255-
*
256-
* We cheat a bit and always return uncachable areas until we've fixed
257-
* the drivers to handle caching properly.
258-
*
259-
* On the SH-5 the concept of segmentation in the 1:1 PXSEG sense simply
260-
* doesn't exist, so everything must go through page tables.
261-
*/
262245
#ifdef CONFIG_MMU
246+
void iounmap(void __iomem *addr);
263247
void __iomem *__ioremap_caller(phys_addr_t offset, unsigned long size,
264248
pgprot_t prot, void *caller);
265-
void iounmap(void __iomem *addr);
266-
267-
static inline void __iomem *
268-
__ioremap(phys_addr_t offset, unsigned long size, pgprot_t prot)
269-
{
270-
return __ioremap_caller(offset, size, prot, __builtin_return_address(0));
271-
}
272-
273-
static inline void __iomem *
274-
__ioremap_29bit(phys_addr_t offset, unsigned long size, pgprot_t prot)
275-
{
276-
#ifdef CONFIG_29BIT
277-
phys_addr_t last_addr = offset + size - 1;
278-
279-
/*
280-
* For P1 and P2 space this is trivial, as everything is already
281-
* mapped. Uncached access for P1 addresses are done through P2.
282-
* In the P3 case or for addresses outside of the 29-bit space,
283-
* mapping must be done by the PMB or by using page tables.
284-
*/
285-
if (likely(PXSEG(offset) < P3SEG && PXSEG(last_addr) < P3SEG)) {
286-
u64 flags = pgprot_val(prot);
287-
288-
/*
289-
* Anything using the legacy PTEA space attributes needs
290-
* to be kicked down to page table mappings.
291-
*/
292-
if (unlikely(flags & _PAGE_PCC_MASK))
293-
return NULL;
294-
if (unlikely(flags & _PAGE_CACHABLE))
295-
return (void __iomem *)P1SEGADDR(offset);
296-
297-
return (void __iomem *)P2SEGADDR(offset);
298-
}
299-
300-
/* P4 above the store queues are always mapped. */
301-
if (unlikely(offset >= P3_ADDR_MAX))
302-
return (void __iomem *)P4SEGADDR(offset);
303-
#endif
304-
305-
return NULL;
306-
}
307-
308-
static inline void __iomem *
309-
__ioremap_mode(phys_addr_t offset, unsigned long size, pgprot_t prot)
310-
{
311-
void __iomem *ret;
312-
313-
ret = __ioremap_trapped(offset, size);
314-
if (ret)
315-
return ret;
316-
317-
ret = __ioremap_29bit(offset, size, prot);
318-
if (ret)
319-
return ret;
320-
321-
return __ioremap(offset, size, prot);
322-
}
323-
#else
324-
#define __ioremap(offset, size, prot) ((void __iomem *)(offset))
325-
#define __ioremap_mode(offset, size, prot) ((void __iomem *)(offset))
326-
static inline void iounmap(void __iomem *addr) {}
327-
#endif /* CONFIG_MMU */
328249

329250
static inline void __iomem *ioremap(phys_addr_t offset, unsigned long size)
330251
{
331-
return __ioremap_mode(offset, size, PAGE_KERNEL_NOCACHE);
252+
return __ioremap_caller(offset, size, PAGE_KERNEL_NOCACHE,
253+
__builtin_return_address(0));
332254
}
333255

334256
static inline void __iomem *
335257
ioremap_cache(phys_addr_t offset, unsigned long size)
336258
{
337-
return __ioremap_mode(offset, size, PAGE_KERNEL);
259+
return __ioremap_caller(offset, size, PAGE_KERNEL,
260+
__builtin_return_address(0));
338261
}
339262
#define ioremap_cache ioremap_cache
340263

341264
#ifdef CONFIG_HAVE_IOREMAP_PROT
342-
static inline void __iomem *
343-
ioremap_prot(phys_addr_t offset, unsigned long size, unsigned long flags)
265+
static inline void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size,
266+
unsigned long flags)
344267
{
345-
return __ioremap_mode(offset, size, __pgprot(flags));
268+
return __ioremap_caller(offset, size, __pgprot(flags),
269+
__builtin_return_address(0));
346270
}
347-
#endif
271+
#endif /* CONFIG_HAVE_IOREMAP_PROT */
272+
273+
#else /* CONFIG_MMU */
274+
#define iounmap(addr) do { } while (0)
275+
#define ioremap(offset, size) ((void __iomem *)(unsigned long)(offset))
276+
#endif /* CONFIG_MMU */
348277

349278
#define ioremap_uc ioremap
350279

arch/sh/mm/ioremap.c

Lines changed: 53 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,51 @@
2626
#include <asm/mmu.h>
2727
#include "ioremap.h"
2828

29+
/*
30+
* On 32-bit SH, we traditionally have the whole physical address space mapped
31+
* at all times (as MIPS does), so "ioremap()" and "iounmap()" do not need to do
32+
* anything but place the address in the proper segment. This is true for P1
33+
* and P2 addresses, as well as some P3 ones. However, most of the P3 addresses
34+
* and newer cores using extended addressing need to map through page tables, so
35+
* the ioremap() implementation becomes a bit more complicated.
36+
*/
37+
#ifdef CONFIG_29BIT
38+
static void __iomem *
39+
__ioremap_29bit(phys_addr_t offset, unsigned long size, pgprot_t prot)
40+
{
41+
phys_addr_t last_addr = offset + size - 1;
42+
43+
/*
44+
* For P1 and P2 space this is trivial, as everything is already
45+
* mapped. Uncached access for P1 addresses are done through P2.
46+
* In the P3 case or for addresses outside of the 29-bit space,
47+
* mapping must be done by the PMB or by using page tables.
48+
*/
49+
if (likely(PXSEG(offset) < P3SEG && PXSEG(last_addr) < P3SEG)) {
50+
u64 flags = pgprot_val(prot);
51+
52+
/*
53+
* Anything using the legacy PTEA space attributes needs
54+
* to be kicked down to page table mappings.
55+
*/
56+
if (unlikely(flags & _PAGE_PCC_MASK))
57+
return NULL;
58+
if (unlikely(flags & _PAGE_CACHABLE))
59+
return (void __iomem *)P1SEGADDR(offset);
60+
61+
return (void __iomem *)P2SEGADDR(offset);
62+
}
63+
64+
/* P4 above the store queues are always mapped. */
65+
if (unlikely(offset >= P3_ADDR_MAX))
66+
return (void __iomem *)P4SEGADDR(offset);
67+
68+
return NULL;
69+
}
70+
#else
71+
#define __ioremap_29bit(offset, size, prot) NULL
72+
#endif /* CONFIG_29BIT */
73+
2974
/*
3075
* Remap an arbitrary physical address space into the kernel virtual
3176
* address space. Needed when the kernel wants to access high addresses
@@ -43,6 +88,14 @@ __ioremap_caller(phys_addr_t phys_addr, unsigned long size,
4388
unsigned long offset, last_addr, addr, orig_addr;
4489
void __iomem *mapped;
4590

91+
mapped = __ioremap_trapped(phys_addr, size);
92+
if (mapped)
93+
return mapped;
94+
95+
mapped = __ioremap_29bit(phys_addr, size, pgprot);
96+
if (mapped)
97+
return mapped;
98+
4699
/* Don't allow wraparound or zero size */
47100
last_addr = phys_addr + size - 1;
48101
if (!size || last_addr < phys_addr)

0 commit comments

Comments
 (0)