@@ -48,6 +48,7 @@ struct rfkill {
48
48
bool persistent ;
49
49
bool polling_paused ;
50
50
bool suspended ;
51
+ bool need_sync ;
51
52
52
53
const struct rfkill_ops * ops ;
53
54
void * data ;
@@ -368,6 +369,17 @@ static void rfkill_set_block(struct rfkill *rfkill, bool blocked)
368
369
rfkill_event (rfkill );
369
370
}
370
371
372
+ static void rfkill_sync (struct rfkill * rfkill )
373
+ {
374
+ lockdep_assert_held (& rfkill_global_mutex );
375
+
376
+ if (!rfkill -> need_sync )
377
+ return ;
378
+
379
+ rfkill_set_block (rfkill , rfkill_global_states [rfkill -> type ].cur );
380
+ rfkill -> need_sync = false;
381
+ }
382
+
371
383
static void rfkill_update_global_state (enum rfkill_type type , bool blocked )
372
384
{
373
385
int i ;
@@ -730,6 +742,10 @@ static ssize_t soft_show(struct device *dev, struct device_attribute *attr,
730
742
{
731
743
struct rfkill * rfkill = to_rfkill (dev );
732
744
745
+ mutex_lock (& rfkill_global_mutex );
746
+ rfkill_sync (rfkill );
747
+ mutex_unlock (& rfkill_global_mutex );
748
+
733
749
return sysfs_emit (buf , "%d\n" , (rfkill -> state & RFKILL_BLOCK_SW ) ? 1 : 0 );
734
750
}
735
751
@@ -751,6 +767,7 @@ static ssize_t soft_store(struct device *dev, struct device_attribute *attr,
751
767
return - EINVAL ;
752
768
753
769
mutex_lock (& rfkill_global_mutex );
770
+ rfkill_sync (rfkill );
754
771
rfkill_set_block (rfkill , state );
755
772
mutex_unlock (& rfkill_global_mutex );
756
773
@@ -783,6 +800,10 @@ static ssize_t state_show(struct device *dev, struct device_attribute *attr,
783
800
{
784
801
struct rfkill * rfkill = to_rfkill (dev );
785
802
803
+ mutex_lock (& rfkill_global_mutex );
804
+ rfkill_sync (rfkill );
805
+ mutex_unlock (& rfkill_global_mutex );
806
+
786
807
return sysfs_emit (buf , "%d\n" , user_state_from_blocked (rfkill -> state ));
787
808
}
788
809
@@ -805,6 +826,7 @@ static ssize_t state_store(struct device *dev, struct device_attribute *attr,
805
826
return - EINVAL ;
806
827
807
828
mutex_lock (& rfkill_global_mutex );
829
+ rfkill_sync (rfkill );
808
830
rfkill_set_block (rfkill , state == RFKILL_USER_STATE_SOFT_BLOCKED );
809
831
mutex_unlock (& rfkill_global_mutex );
810
832
@@ -1032,14 +1054,10 @@ static void rfkill_uevent_work(struct work_struct *work)
1032
1054
1033
1055
static void rfkill_sync_work (struct work_struct * work )
1034
1056
{
1035
- struct rfkill * rfkill ;
1036
- bool cur ;
1037
-
1038
- rfkill = container_of (work , struct rfkill , sync_work );
1057
+ struct rfkill * rfkill = container_of (work , struct rfkill , sync_work );
1039
1058
1040
1059
mutex_lock (& rfkill_global_mutex );
1041
- cur = rfkill_global_states [rfkill -> type ].cur ;
1042
- rfkill_set_block (rfkill , cur );
1060
+ rfkill_sync (rfkill );
1043
1061
mutex_unlock (& rfkill_global_mutex );
1044
1062
}
1045
1063
@@ -1087,6 +1105,7 @@ int __must_check rfkill_register(struct rfkill *rfkill)
1087
1105
round_jiffies_relative (POLL_INTERVAL ));
1088
1106
1089
1107
if (!rfkill -> persistent || rfkill_epo_lock_active ) {
1108
+ rfkill -> need_sync = true;
1090
1109
schedule_work (& rfkill -> sync_work );
1091
1110
} else {
1092
1111
#ifdef CONFIG_RFKILL_INPUT
@@ -1171,6 +1190,7 @@ static int rfkill_fop_open(struct inode *inode, struct file *file)
1171
1190
ev = kzalloc (sizeof (* ev ), GFP_KERNEL );
1172
1191
if (!ev )
1173
1192
goto free ;
1193
+ rfkill_sync (rfkill );
1174
1194
rfkill_fill_event (& ev -> ev , rfkill , RFKILL_OP_ADD );
1175
1195
list_add_tail (& ev -> list , & data -> events );
1176
1196
}
0 commit comments