Skip to content

Commit 95ffa67

Browse files
committed
Merge branch 'parisc-5.9-1' of git://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux
Pull parisc updates from Helge Deller: "The majority of the patches are reverts of previous commits regarding the parisc-specific low level spinlocking code and barrier handling, with which we tried to fix CPU stalls on our build servers. In the end John David Anglin found the culprit: We missed a define for atomic64_set_release(). This seems to have fixed our issues, so now it's good to remove the unnecessary code again. Other than that it's trivial stuff: Spelling fixes, constifications and such" * 'parisc-5.9-1' of git://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux: parisc: make the log level string for register dumps const parisc: Do not use an ordered store in pa_tlb_lock() Revert "parisc: Revert "Release spinlocks using ordered store"" Revert "parisc: Use ldcw instruction for SMP spinlock release barrier" Revert "parisc: Drop LDCW barrier in CAS code when running UP" Revert "parisc: Improve interrupt handling in arch_spin_lock_flags()" parisc: Replace HTTP links with HTTPS ones parisc: elf.h: delete a duplicated word parisc: Report bad pages as HardwareCorrupted parisc: Convert to BIT_MASK() and BIT_WORD()
2 parents 4da9f33 + e2693ec commit 95ffa67

File tree

9 files changed

+66
-105
lines changed

9 files changed

+66
-105
lines changed

arch/parisc/Kconfig

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -285,7 +285,7 @@ config SMP
285285
On a uniprocessor machine, the kernel will run faster if you say N.
286286

287287
See also <file:Documentation/admin-guide/lockup-watchdogs.rst> and the SMP-HOWTO
288-
available at <http://www.tldp.org/docs.html#howto>.
288+
available at <https://www.tldp.org/docs.html#howto>.
289289

290290
If you don't know what to do here, say N.
291291

arch/parisc/include/asm/bitops.h

Lines changed: 13 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -12,21 +12,6 @@
1212
#include <asm/barrier.h>
1313
#include <linux/atomic.h>
1414

15-
/*
16-
* HP-PARISC specific bit operations
17-
* for a detailed description of the functions please refer
18-
* to include/asm-i386/bitops.h or kerneldoc
19-
*/
20-
21-
#if __BITS_PER_LONG == 64
22-
#define SHIFT_PER_LONG 6
23-
#else
24-
#define SHIFT_PER_LONG 5
25-
#endif
26-
27-
#define CHOP_SHIFTCOUNT(x) (((unsigned long) (x)) & (BITS_PER_LONG - 1))
28-
29-
3015
/* See http://marc.theaimsgroup.com/?t=108826637900003 for discussion
3116
* on use of volatile and __*_bit() (set/clear/change):
3217
* *_bit() want use of volatile.
@@ -35,45 +20,45 @@
3520

3621
static __inline__ void set_bit(int nr, volatile unsigned long * addr)
3722
{
38-
unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr);
23+
unsigned long mask = BIT_MASK(nr);
3924
unsigned long flags;
4025

41-
addr += (nr >> SHIFT_PER_LONG);
26+
addr += BIT_WORD(nr);
4227
_atomic_spin_lock_irqsave(addr, flags);
4328
*addr |= mask;
4429
_atomic_spin_unlock_irqrestore(addr, flags);
4530
}
4631

4732
static __inline__ void clear_bit(int nr, volatile unsigned long * addr)
4833
{
49-
unsigned long mask = ~(1UL << CHOP_SHIFTCOUNT(nr));
34+
unsigned long mask = BIT_MASK(nr);
5035
unsigned long flags;
5136

52-
addr += (nr >> SHIFT_PER_LONG);
37+
addr += BIT_WORD(nr);
5338
_atomic_spin_lock_irqsave(addr, flags);
54-
*addr &= mask;
39+
*addr &= ~mask;
5540
_atomic_spin_unlock_irqrestore(addr, flags);
5641
}
5742

5843
static __inline__ void change_bit(int nr, volatile unsigned long * addr)
5944
{
60-
unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr);
45+
unsigned long mask = BIT_MASK(nr);
6146
unsigned long flags;
6247

63-
addr += (nr >> SHIFT_PER_LONG);
48+
addr += BIT_WORD(nr);
6449
_atomic_spin_lock_irqsave(addr, flags);
6550
*addr ^= mask;
6651
_atomic_spin_unlock_irqrestore(addr, flags);
6752
}
6853

6954
static __inline__ int test_and_set_bit(int nr, volatile unsigned long * addr)
7055
{
71-
unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr);
56+
unsigned long mask = BIT_MASK(nr);
7257
unsigned long old;
7358
unsigned long flags;
7459
int set;
7560

76-
addr += (nr >> SHIFT_PER_LONG);
61+
addr += BIT_WORD(nr);
7762
_atomic_spin_lock_irqsave(addr, flags);
7863
old = *addr;
7964
set = (old & mask) ? 1 : 0;
@@ -86,12 +71,12 @@ static __inline__ int test_and_set_bit(int nr, volatile unsigned long * addr)
8671

8772
static __inline__ int test_and_clear_bit(int nr, volatile unsigned long * addr)
8873
{
89-
unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr);
74+
unsigned long mask = BIT_MASK(nr);
9075
unsigned long old;
9176
unsigned long flags;
9277
int set;
9378

94-
addr += (nr >> SHIFT_PER_LONG);
79+
addr += BIT_WORD(nr);
9580
_atomic_spin_lock_irqsave(addr, flags);
9681
old = *addr;
9782
set = (old & mask) ? 1 : 0;
@@ -104,11 +89,11 @@ static __inline__ int test_and_clear_bit(int nr, volatile unsigned long * addr)
10489

10590
static __inline__ int test_and_change_bit(int nr, volatile unsigned long * addr)
10691
{
107-
unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr);
92+
unsigned long mask = BIT_MASK(nr);
10893
unsigned long oldbit;
10994
unsigned long flags;
11095

111-
addr += (nr >> SHIFT_PER_LONG);
96+
addr += BIT_WORD(nr);
11297
_atomic_spin_lock_irqsave(addr, flags);
11398
oldbit = *addr;
11499
*addr = oldbit ^ mask;

arch/parisc/include/asm/elf.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -152,7 +152,7 @@
152152
/* The following are PA function descriptors
153153
*
154154
* addr: the absolute address of the function
155-
* gp: either the data pointer (r27) for non-PIC code or the
155+
* gp: either the data pointer (r27) for non-PIC code or
156156
* the PLT pointer (r19) for PIC code */
157157

158158
/* Format for the Elf32 Function descriptor */

arch/parisc/include/asm/spinlock.h

Lines changed: 10 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -10,34 +10,25 @@
1010
static inline int arch_spin_is_locked(arch_spinlock_t *x)
1111
{
1212
volatile unsigned int *a = __ldcw_align(x);
13-
smp_mb();
1413
return *a == 0;
1514
}
1615

17-
static inline void arch_spin_lock(arch_spinlock_t *x)
18-
{
19-
volatile unsigned int *a;
20-
21-
a = __ldcw_align(x);
22-
while (__ldcw(a) == 0)
23-
while (*a == 0)
24-
cpu_relax();
25-
}
16+
#define arch_spin_lock(lock) arch_spin_lock_flags(lock, 0)
2617

2718
static inline void arch_spin_lock_flags(arch_spinlock_t *x,
2819
unsigned long flags)
2920
{
3021
volatile unsigned int *a;
31-
unsigned long flags_dis;
3222

3323
a = __ldcw_align(x);
34-
while (__ldcw(a) == 0) {
35-
local_save_flags(flags_dis);
36-
local_irq_restore(flags);
24+
while (__ldcw(a) == 0)
3725
while (*a == 0)
38-
cpu_relax();
39-
local_irq_restore(flags_dis);
40-
}
26+
if (flags & PSW_SM_I) {
27+
local_irq_enable();
28+
cpu_relax();
29+
local_irq_disable();
30+
} else
31+
cpu_relax();
4132
}
4233
#define arch_spin_lock_flags arch_spin_lock_flags
4334

@@ -46,12 +37,8 @@ static inline void arch_spin_unlock(arch_spinlock_t *x)
4637
volatile unsigned int *a;
4738

4839
a = __ldcw_align(x);
49-
#ifdef CONFIG_SMP
50-
(void) __ldcw(a);
51-
#else
52-
mb();
53-
#endif
54-
*a = 1;
40+
/* Release with ordered store. */
41+
__asm__ __volatile__("stw,ma %0,0(%1)" : : "r"(1), "r"(a) : "memory");
5542
}
5643

5744
static inline int arch_spin_trylock(arch_spinlock_t *x)

arch/parisc/kernel/entry.S

Lines changed: 25 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -454,7 +454,6 @@
454454
nop
455455
LDREG 0(\ptp),\pte
456456
bb,<,n \pte,_PAGE_PRESENT_BIT,3f
457-
LDCW 0(\tmp),\tmp1
458457
b \fault
459458
stw \spc,0(\tmp)
460459
99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
@@ -464,23 +463,26 @@
464463
3:
465464
.endm
466465

467-
/* Release pa_tlb_lock lock without reloading lock address. */
468-
.macro tlb_unlock0 spc,tmp,tmp1
466+
/* Release pa_tlb_lock lock without reloading lock address.
467+
Note that the values in the register spc are limited to
468+
NR_SPACE_IDS (262144). Thus, the stw instruction always
469+
stores a nonzero value even when register spc is 64 bits.
470+
We use an ordered store to ensure all prior accesses are
471+
performed prior to releasing the lock. */
472+
.macro tlb_unlock0 spc,tmp
469473
#ifdef CONFIG_SMP
470474
98: or,COND(=) %r0,\spc,%r0
471-
LDCW 0(\tmp),\tmp1
472-
or,COND(=) %r0,\spc,%r0
473-
stw \spc,0(\tmp)
475+
stw,ma \spc,0(\tmp)
474476
99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
475477
#endif
476478
.endm
477479

478480
/* Release pa_tlb_lock lock. */
479-
.macro tlb_unlock1 spc,tmp,tmp1
481+
.macro tlb_unlock1 spc,tmp
480482
#ifdef CONFIG_SMP
481483
98: load_pa_tlb_lock \tmp
482484
99: ALTERNATIVE(98b, 99b, ALT_COND_NO_SMP, INSN_NOP)
483-
tlb_unlock0 \spc,\tmp,\tmp1
485+
tlb_unlock0 \spc,\tmp
484486
#endif
485487
.endm
486488

@@ -1163,7 +1165,7 @@ dtlb_miss_20w:
11631165

11641166
idtlbt pte,prot
11651167

1166-
tlb_unlock1 spc,t0,t1
1168+
tlb_unlock1 spc,t0
11671169
rfir
11681170
nop
11691171

@@ -1189,7 +1191,7 @@ nadtlb_miss_20w:
11891191

11901192
idtlbt pte,prot
11911193

1192-
tlb_unlock1 spc,t0,t1
1194+
tlb_unlock1 spc,t0
11931195
rfir
11941196
nop
11951197

@@ -1223,7 +1225,7 @@ dtlb_miss_11:
12231225

12241226
mtsp t1, %sr1 /* Restore sr1 */
12251227

1226-
tlb_unlock1 spc,t0,t1
1228+
tlb_unlock1 spc,t0
12271229
rfir
12281230
nop
12291231

@@ -1256,7 +1258,7 @@ nadtlb_miss_11:
12561258

12571259
mtsp t1, %sr1 /* Restore sr1 */
12581260

1259-
tlb_unlock1 spc,t0,t1
1261+
tlb_unlock1 spc,t0
12601262
rfir
12611263
nop
12621264

@@ -1285,7 +1287,7 @@ dtlb_miss_20:
12851287

12861288
idtlbt pte,prot
12871289

1288-
tlb_unlock1 spc,t0,t1
1290+
tlb_unlock1 spc,t0
12891291
rfir
12901292
nop
12911293

@@ -1313,7 +1315,7 @@ nadtlb_miss_20:
13131315

13141316
idtlbt pte,prot
13151317

1316-
tlb_unlock1 spc,t0,t1
1318+
tlb_unlock1 spc,t0
13171319
rfir
13181320
nop
13191321

@@ -1420,7 +1422,7 @@ itlb_miss_20w:
14201422

14211423
iitlbt pte,prot
14221424

1423-
tlb_unlock1 spc,t0,t1
1425+
tlb_unlock1 spc,t0
14241426
rfir
14251427
nop
14261428

@@ -1444,7 +1446,7 @@ naitlb_miss_20w:
14441446

14451447
iitlbt pte,prot
14461448

1447-
tlb_unlock1 spc,t0,t1
1449+
tlb_unlock1 spc,t0
14481450
rfir
14491451
nop
14501452

@@ -1478,7 +1480,7 @@ itlb_miss_11:
14781480

14791481
mtsp t1, %sr1 /* Restore sr1 */
14801482

1481-
tlb_unlock1 spc,t0,t1
1483+
tlb_unlock1 spc,t0
14821484
rfir
14831485
nop
14841486

@@ -1502,7 +1504,7 @@ naitlb_miss_11:
15021504

15031505
mtsp t1, %sr1 /* Restore sr1 */
15041506

1505-
tlb_unlock1 spc,t0,t1
1507+
tlb_unlock1 spc,t0
15061508
rfir
15071509
nop
15081510

@@ -1532,7 +1534,7 @@ itlb_miss_20:
15321534

15331535
iitlbt pte,prot
15341536

1535-
tlb_unlock1 spc,t0,t1
1537+
tlb_unlock1 spc,t0
15361538
rfir
15371539
nop
15381540

@@ -1552,7 +1554,7 @@ naitlb_miss_20:
15521554

15531555
iitlbt pte,prot
15541556

1555-
tlb_unlock1 spc,t0,t1
1557+
tlb_unlock1 spc,t0
15561558
rfir
15571559
nop
15581560

@@ -1582,7 +1584,7 @@ dbit_trap_20w:
15821584

15831585
idtlbt pte,prot
15841586

1585-
tlb_unlock0 spc,t0,t1
1587+
tlb_unlock0 spc,t0
15861588
rfir
15871589
nop
15881590
#else
@@ -1608,7 +1610,7 @@ dbit_trap_11:
16081610

16091611
mtsp t1, %sr1 /* Restore sr1 */
16101612

1611-
tlb_unlock0 spc,t0,t1
1613+
tlb_unlock0 spc,t0
16121614
rfir
16131615
nop
16141616

@@ -1628,7 +1630,7 @@ dbit_trap_20:
16281630

16291631
idtlbt pte,prot
16301632

1631-
tlb_unlock0 spc,t0,t1
1633+
tlb_unlock0 spc,t0
16321634
rfir
16331635
nop
16341636
#endif

arch/parisc/kernel/pdt.c

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,8 @@
1818
#include <linux/kthread.h>
1919
#include <linux/initrd.h>
2020
#include <linux/pgtable.h>
21+
#include <linux/swap.h>
22+
#include <linux/swapops.h>
2123

2224
#include <asm/pdc.h>
2325
#include <asm/pdcpat.h>
@@ -230,6 +232,7 @@ void __init pdc_pdt_init(void)
230232

231233
/* mark memory page bad */
232234
memblock_reserve(pdt_entry[i] & PAGE_MASK, PAGE_SIZE);
235+
num_poisoned_pages_inc();
233236
}
234237
}
235238

0 commit comments

Comments
 (0)