Skip to content

Commit 805de8f

Browse files
Peter ZijlstraKAGA-KOKO
authored andcommitted
atomic: Replace atomic_{set,clear}_mask() usage
Replace the deprecated atomic_{set,clear}_mask() usage with the now ubiquous atomic_{or,andnot}() functions. Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Signed-off-by: Thomas Gleixner <[email protected]>
1 parent de9e432 commit 805de8f

File tree

14 files changed

+97
-97
lines changed

14 files changed

+97
-97
lines changed

arch/blackfin/mach-common/smp.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -195,7 +195,7 @@ void send_ipi(const struct cpumask *cpumask, enum ipi_message_type msg)
195195
local_irq_save(flags);
196196
for_each_cpu(cpu, cpumask) {
197197
bfin_ipi_data = &per_cpu(bfin_ipi, cpu);
198-
atomic_set_mask((1 << msg), &bfin_ipi_data->bits);
198+
atomic_or((1 << msg), &bfin_ipi_data->bits);
199199
atomic_inc(&bfin_ipi_data->count);
200200
}
201201
local_irq_restore(flags);

arch/m32r/kernel/smp.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -156,7 +156,7 @@ void smp_flush_cache_all(void)
156156
cpumask_clear_cpu(smp_processor_id(), &cpumask);
157157
spin_lock(&flushcache_lock);
158158
mask=cpumask_bits(&cpumask);
159-
atomic_set_mask(*mask, (atomic_t *)&flushcache_cpumask);
159+
atomic_or(*mask, (atomic_t *)&flushcache_cpumask);
160160
send_IPI_mask(&cpumask, INVALIDATE_CACHE_IPI, 0);
161161
_flush_cache_copyback_all();
162162
while (flushcache_cpumask)
@@ -407,7 +407,7 @@ static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
407407
flush_vma = vma;
408408
flush_va = va;
409409
mask=cpumask_bits(&cpumask);
410-
atomic_set_mask(*mask, (atomic_t *)&flush_cpumask);
410+
atomic_or(*mask, (atomic_t *)&flush_cpumask);
411411

412412
/*
413413
* We have to send the IPI only to

arch/mn10300/mm/tlb-smp.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -119,7 +119,7 @@ static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
119119
flush_mm = mm;
120120
flush_va = va;
121121
#if NR_CPUS <= BITS_PER_LONG
122-
atomic_set_mask(cpumask.bits[0], &flush_cpumask.bits[0]);
122+
atomic_or(cpumask.bits[0], (atomic_t *)&flush_cpumask.bits[0]);
123123
#else
124124
#error Not supported.
125125
#endif

arch/s390/kernel/time.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -381,7 +381,7 @@ static void disable_sync_clock(void *dummy)
381381
* increase the "sequence" counter to avoid the race of an
382382
* etr event and the complete recovery against get_sync_clock.
383383
*/
384-
atomic_clear_mask(0x80000000, sw_ptr);
384+
atomic_andnot(0x80000000, sw_ptr);
385385
atomic_inc(sw_ptr);
386386
}
387387

@@ -392,7 +392,7 @@ static void disable_sync_clock(void *dummy)
392392
static void enable_sync_clock(void)
393393
{
394394
atomic_t *sw_ptr = this_cpu_ptr(&clock_sync_word);
395-
atomic_set_mask(0x80000000, sw_ptr);
395+
atomic_or(0x80000000, sw_ptr);
396396
}
397397

398398
/*

arch/s390/kvm/interrupt.c

Lines changed: 15 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -170,20 +170,20 @@ static unsigned long deliverable_irqs(struct kvm_vcpu *vcpu)
170170

171171
static void __set_cpu_idle(struct kvm_vcpu *vcpu)
172172
{
173-
atomic_set_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
173+
atomic_or(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
174174
set_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask);
175175
}
176176

177177
static void __unset_cpu_idle(struct kvm_vcpu *vcpu)
178178
{
179-
atomic_clear_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
179+
atomic_andnot(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
180180
clear_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask);
181181
}
182182

183183
static void __reset_intercept_indicators(struct kvm_vcpu *vcpu)
184184
{
185-
atomic_clear_mask(CPUSTAT_IO_INT | CPUSTAT_EXT_INT | CPUSTAT_STOP_INT,
186-
&vcpu->arch.sie_block->cpuflags);
185+
atomic_andnot(CPUSTAT_IO_INT | CPUSTAT_EXT_INT | CPUSTAT_STOP_INT,
186+
&vcpu->arch.sie_block->cpuflags);
187187
vcpu->arch.sie_block->lctl = 0x0000;
188188
vcpu->arch.sie_block->ictl &= ~(ICTL_LPSW | ICTL_STCTL | ICTL_PINT);
189189

@@ -196,7 +196,7 @@ static void __reset_intercept_indicators(struct kvm_vcpu *vcpu)
196196

197197
static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag)
198198
{
199-
atomic_set_mask(flag, &vcpu->arch.sie_block->cpuflags);
199+
atomic_or(flag, &vcpu->arch.sie_block->cpuflags);
200200
}
201201

202202
static void set_intercept_indicators_io(struct kvm_vcpu *vcpu)
@@ -919,7 +919,7 @@ void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu)
919919
spin_unlock(&li->lock);
920920

921921
/* clear pending external calls set by sigp interpretation facility */
922-
atomic_clear_mask(CPUSTAT_ECALL_PEND, li->cpuflags);
922+
atomic_andnot(CPUSTAT_ECALL_PEND, li->cpuflags);
923923
vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sigp_ctrl = 0;
924924
}
925925

@@ -1020,7 +1020,7 @@ static int __inject_pfault_init(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
10201020

10211021
li->irq.ext = irq->u.ext;
10221022
set_bit(IRQ_PEND_PFAULT_INIT, &li->pending_irqs);
1023-
atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
1023+
atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
10241024
return 0;
10251025
}
10261026

@@ -1035,7 +1035,7 @@ static int __inject_extcall_sigpif(struct kvm_vcpu *vcpu, uint16_t src_id)
10351035
/* another external call is pending */
10361036
return -EBUSY;
10371037
}
1038-
atomic_set_mask(CPUSTAT_ECALL_PEND, &vcpu->arch.sie_block->cpuflags);
1038+
atomic_or(CPUSTAT_ECALL_PEND, &vcpu->arch.sie_block->cpuflags);
10391039
return 0;
10401040
}
10411041

@@ -1061,7 +1061,7 @@ static int __inject_extcall(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
10611061
if (test_and_set_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs))
10621062
return -EBUSY;
10631063
*extcall = irq->u.extcall;
1064-
atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
1064+
atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
10651065
return 0;
10661066
}
10671067

@@ -1133,7 +1133,7 @@ static int __inject_sigp_emergency(struct kvm_vcpu *vcpu,
11331133

11341134
set_bit(irq->u.emerg.code, li->sigp_emerg_pending);
11351135
set_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs);
1136-
atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
1136+
atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
11371137
return 0;
11381138
}
11391139

@@ -1177,7 +1177,7 @@ static int __inject_ckc(struct kvm_vcpu *vcpu)
11771177
0, 0, 2);
11781178

11791179
set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
1180-
atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
1180+
atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
11811181
return 0;
11821182
}
11831183

@@ -1190,7 +1190,7 @@ static int __inject_cpu_timer(struct kvm_vcpu *vcpu)
11901190
0, 0, 2);
11911191

11921192
set_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
1193-
atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
1193+
atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
11941194
return 0;
11951195
}
11961196

@@ -1369,13 +1369,13 @@ static void __floating_irq_kick(struct kvm *kvm, u64 type)
13691369
spin_lock(&li->lock);
13701370
switch (type) {
13711371
case KVM_S390_MCHK:
1372-
atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags);
1372+
atomic_or(CPUSTAT_STOP_INT, li->cpuflags);
13731373
break;
13741374
case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
1375-
atomic_set_mask(CPUSTAT_IO_INT, li->cpuflags);
1375+
atomic_or(CPUSTAT_IO_INT, li->cpuflags);
13761376
break;
13771377
default:
1378-
atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
1378+
atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
13791379
break;
13801380
}
13811381
spin_unlock(&li->lock);

arch/s390/kvm/kvm-s390.c

Lines changed: 16 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -1215,12 +1215,12 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
12151215
}
12161216
restore_access_regs(vcpu->run->s.regs.acrs);
12171217
gmap_enable(vcpu->arch.gmap);
1218-
atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
1218+
atomic_or(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
12191219
}
12201220

12211221
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
12221222
{
1223-
atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
1223+
atomic_andnot(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
12241224
gmap_disable(vcpu->arch.gmap);
12251225
if (test_kvm_facility(vcpu->kvm, 129)) {
12261226
save_fp_ctl(&vcpu->run->s.regs.fpc);
@@ -1320,9 +1320,9 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
13201320
CPUSTAT_STOPPED);
13211321

13221322
if (test_kvm_facility(vcpu->kvm, 78))
1323-
atomic_set_mask(CPUSTAT_GED2, &vcpu->arch.sie_block->cpuflags);
1323+
atomic_or(CPUSTAT_GED2, &vcpu->arch.sie_block->cpuflags);
13241324
else if (test_kvm_facility(vcpu->kvm, 8))
1325-
atomic_set_mask(CPUSTAT_GED, &vcpu->arch.sie_block->cpuflags);
1325+
atomic_or(CPUSTAT_GED, &vcpu->arch.sie_block->cpuflags);
13261326

13271327
kvm_s390_vcpu_setup_model(vcpu);
13281328

@@ -1422,24 +1422,24 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
14221422

14231423
void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu)
14241424
{
1425-
atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
1425+
atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
14261426
exit_sie(vcpu);
14271427
}
14281428

14291429
void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu)
14301430
{
1431-
atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
1431+
atomic_andnot(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
14321432
}
14331433

14341434
static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
14351435
{
1436-
atomic_set_mask(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
1436+
atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
14371437
exit_sie(vcpu);
14381438
}
14391439

14401440
static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
14411441
{
1442-
atomic_clear_mask(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
1442+
atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
14431443
}
14441444

14451445
/*
@@ -1448,7 +1448,7 @@ static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
14481448
* return immediately. */
14491449
void exit_sie(struct kvm_vcpu *vcpu)
14501450
{
1451-
atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
1451+
atomic_or(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
14521452
while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
14531453
cpu_relax();
14541454
}
@@ -1672,19 +1672,19 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
16721672
if (dbg->control & KVM_GUESTDBG_ENABLE) {
16731673
vcpu->guest_debug = dbg->control;
16741674
/* enforce guest PER */
1675-
atomic_set_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
1675+
atomic_or(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
16761676

16771677
if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
16781678
rc = kvm_s390_import_bp_data(vcpu, dbg);
16791679
} else {
1680-
atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
1680+
atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
16811681
vcpu->arch.guestdbg.last_bp = 0;
16821682
}
16831683

16841684
if (rc) {
16851685
vcpu->guest_debug = 0;
16861686
kvm_s390_clear_bp_data(vcpu);
1687-
atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
1687+
atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
16881688
}
16891689

16901690
return rc;
@@ -1771,7 +1771,7 @@ static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
17711771
if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
17721772
if (!ibs_enabled(vcpu)) {
17731773
trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
1774-
atomic_set_mask(CPUSTAT_IBS,
1774+
atomic_or(CPUSTAT_IBS,
17751775
&vcpu->arch.sie_block->cpuflags);
17761776
}
17771777
goto retry;
@@ -1780,7 +1780,7 @@ static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
17801780
if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
17811781
if (ibs_enabled(vcpu)) {
17821782
trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
1783-
atomic_clear_mask(CPUSTAT_IBS,
1783+
atomic_andnot(CPUSTAT_IBS,
17841784
&vcpu->arch.sie_block->cpuflags);
17851785
}
17861786
goto retry;
@@ -2280,7 +2280,7 @@ void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
22802280
__disable_ibs_on_all_vcpus(vcpu->kvm);
22812281
}
22822282

2283-
atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
2283+
atomic_andnot(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
22842284
/*
22852285
* Another VCPU might have used IBS while we were offline.
22862286
* Let's play safe and flush the VCPU at startup.
@@ -2306,7 +2306,7 @@ void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
23062306
/* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
23072307
kvm_s390_clear_stop_irq(vcpu);
23082308

2309-
atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
2309+
atomic_or(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
23102310
__disable_ibs_on_vcpu(vcpu);
23112311

23122312
for (i = 0; i < online_vcpus; i++) {

drivers/gpu/drm/i915/i915_drv.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -748,7 +748,7 @@ static int i915_drm_resume(struct drm_device *dev)
748748
mutex_lock(&dev->struct_mutex);
749749
if (i915_gem_init_hw(dev)) {
750750
DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n");
751-
atomic_set_mask(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
751+
atomic_or(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
752752
}
753753
mutex_unlock(&dev->struct_mutex);
754754

drivers/gpu/drm/i915/i915_gem.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5091,7 +5091,7 @@ int i915_gem_init(struct drm_device *dev)
50915091
* for all other failure, such as an allocation failure, bail.
50925092
*/
50935093
DRM_ERROR("Failed to initialize GPU, declaring it wedged\n");
5094-
atomic_set_mask(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
5094+
atomic_or(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
50955095
ret = 0;
50965096
}
50975097

drivers/gpu/drm/i915/i915_irq.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2446,7 +2446,7 @@ static void i915_reset_and_wakeup(struct drm_device *dev)
24462446
kobject_uevent_env(&dev->primary->kdev->kobj,
24472447
KOBJ_CHANGE, reset_done_event);
24482448
} else {
2449-
atomic_set_mask(I915_WEDGED, &error->reset_counter);
2449+
atomic_or(I915_WEDGED, &error->reset_counter);
24502450
}
24512451

24522452
/*
@@ -2574,7 +2574,7 @@ void i915_handle_error(struct drm_device *dev, bool wedged,
25742574
i915_report_and_clear_eir(dev);
25752575

25762576
if (wedged) {
2577-
atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG,
2577+
atomic_or(I915_RESET_IN_PROGRESS_FLAG,
25782578
&dev_priv->gpu_error.reset_counter);
25792579

25802580
/*

drivers/s390/scsi/zfcp_aux.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -529,7 +529,7 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn,
529529
list_add_tail(&port->list, &adapter->port_list);
530530
write_unlock_irq(&adapter->port_list_lock);
531531

532-
atomic_set_mask(status | ZFCP_STATUS_COMMON_RUNNING, &port->status);
532+
atomic_or(status | ZFCP_STATUS_COMMON_RUNNING, &port->status);
533533

534534
return port;
535535

0 commit comments

Comments
 (0)