@@ -566,6 +566,17 @@ void hci_cmd_sync_init(struct hci_dev *hdev)
566
566
INIT_DELAYED_WORK (& hdev -> adv_instance_expire , adv_timeout_expire );
567
567
}
568
568
569
+ static void _hci_cmd_sync_cancel_entry (struct hci_dev * hdev ,
570
+ struct hci_cmd_sync_work_entry * entry ,
571
+ int err )
572
+ {
573
+ if (entry -> destroy )
574
+ entry -> destroy (hdev , entry -> data , err );
575
+
576
+ list_del (& entry -> list );
577
+ kfree (entry );
578
+ }
579
+
569
580
void hci_cmd_sync_clear (struct hci_dev * hdev )
570
581
{
571
582
struct hci_cmd_sync_work_entry * entry , * tmp ;
@@ -574,13 +585,8 @@ void hci_cmd_sync_clear(struct hci_dev *hdev)
574
585
cancel_work_sync (& hdev -> reenable_adv_work );
575
586
576
587
mutex_lock (& hdev -> cmd_sync_work_lock );
577
- list_for_each_entry_safe (entry , tmp , & hdev -> cmd_sync_work_list , list ) {
578
- if (entry -> destroy )
579
- entry -> destroy (hdev , entry -> data , - ECANCELED );
580
-
581
- list_del (& entry -> list );
582
- kfree (entry );
583
- }
588
+ list_for_each_entry_safe (entry , tmp , & hdev -> cmd_sync_work_list , list )
589
+ _hci_cmd_sync_cancel_entry (hdev , entry , - ECANCELED );
584
590
mutex_unlock (& hdev -> cmd_sync_work_lock );
585
591
}
586
592
@@ -669,6 +675,115 @@ int hci_cmd_sync_queue(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
669
675
}
670
676
EXPORT_SYMBOL (hci_cmd_sync_queue );
671
677
678
+ static struct hci_cmd_sync_work_entry *
679
+ _hci_cmd_sync_lookup_entry (struct hci_dev * hdev , hci_cmd_sync_work_func_t func ,
680
+ void * data , hci_cmd_sync_work_destroy_t destroy )
681
+ {
682
+ struct hci_cmd_sync_work_entry * entry , * tmp ;
683
+
684
+ list_for_each_entry_safe (entry , tmp , & hdev -> cmd_sync_work_list , list ) {
685
+ if (func && entry -> func != func )
686
+ continue ;
687
+
688
+ if (data && entry -> data != data )
689
+ continue ;
690
+
691
+ if (destroy && entry -> destroy != destroy )
692
+ continue ;
693
+
694
+ return entry ;
695
+ }
696
+
697
+ return NULL ;
698
+ }
699
+
700
+ /* Queue HCI command entry once:
701
+ *
702
+ * - Lookup if an entry already exist and only if it doesn't creates a new entry
703
+ * and queue it.
704
+ */
705
+ int hci_cmd_sync_queue_once (struct hci_dev * hdev , hci_cmd_sync_work_func_t func ,
706
+ void * data , hci_cmd_sync_work_destroy_t destroy )
707
+ {
708
+ if (hci_cmd_sync_lookup_entry (hdev , func , data , destroy ))
709
+ return 0 ;
710
+
711
+ return hci_cmd_sync_queue (hdev , func , data , destroy );
712
+ }
713
+ EXPORT_SYMBOL (hci_cmd_sync_queue_once );
714
+
715
+ /* Lookup HCI command entry:
716
+ *
717
+ * - Return first entry that matches by function callback or data or
718
+ * destroy callback.
719
+ */
720
+ struct hci_cmd_sync_work_entry *
721
+ hci_cmd_sync_lookup_entry (struct hci_dev * hdev , hci_cmd_sync_work_func_t func ,
722
+ void * data , hci_cmd_sync_work_destroy_t destroy )
723
+ {
724
+ struct hci_cmd_sync_work_entry * entry ;
725
+
726
+ mutex_lock (& hdev -> cmd_sync_work_lock );
727
+ entry = _hci_cmd_sync_lookup_entry (hdev , func , data , destroy );
728
+ mutex_unlock (& hdev -> cmd_sync_work_lock );
729
+
730
+ return entry ;
731
+ }
732
+ EXPORT_SYMBOL (hci_cmd_sync_lookup_entry );
733
+
734
+ /* Cancel HCI command entry */
735
+ void hci_cmd_sync_cancel_entry (struct hci_dev * hdev ,
736
+ struct hci_cmd_sync_work_entry * entry )
737
+ {
738
+ mutex_lock (& hdev -> cmd_sync_work_lock );
739
+ _hci_cmd_sync_cancel_entry (hdev , entry , - ECANCELED );
740
+ mutex_unlock (& hdev -> cmd_sync_work_lock );
741
+ }
742
+ EXPORT_SYMBOL (hci_cmd_sync_cancel_entry );
743
+
744
+ /* Dequeue one HCI command entry:
745
+ *
746
+ * - Lookup and cancel first entry that matches.
747
+ */
748
+ bool hci_cmd_sync_dequeue_once (struct hci_dev * hdev ,
749
+ hci_cmd_sync_work_func_t func ,
750
+ void * data , hci_cmd_sync_work_destroy_t destroy )
751
+ {
752
+ struct hci_cmd_sync_work_entry * entry ;
753
+
754
+ entry = hci_cmd_sync_lookup_entry (hdev , func , data , destroy );
755
+ if (!entry )
756
+ return false;
757
+
758
+ hci_cmd_sync_cancel_entry (hdev , entry );
759
+
760
+ return true;
761
+ }
762
+ EXPORT_SYMBOL (hci_cmd_sync_dequeue_once );
763
+
764
+ /* Dequeue HCI command entry:
765
+ *
766
+ * - Lookup and cancel any entry that matches by function callback or data or
767
+ * destroy callback.
768
+ */
769
+ bool hci_cmd_sync_dequeue (struct hci_dev * hdev , hci_cmd_sync_work_func_t func ,
770
+ void * data , hci_cmd_sync_work_destroy_t destroy )
771
+ {
772
+ struct hci_cmd_sync_work_entry * entry ;
773
+ bool ret = false;
774
+
775
+ mutex_lock (& hdev -> cmd_sync_work_lock );
776
+ while ((entry = _hci_cmd_sync_lookup_entry (hdev , func , data ,
777
+ destroy ))) {
778
+ _hci_cmd_sync_cancel_entry (hdev , entry , - ECANCELED );
779
+ ret = true;
780
+ }
781
+ mutex_unlock (& hdev -> cmd_sync_work_lock );
782
+
783
+ return ret ;
784
+ }
785
+ EXPORT_SYMBOL (hci_cmd_sync_dequeue );
786
+
672
787
int hci_update_eir_sync (struct hci_dev * hdev )
673
788
{
674
789
struct hci_cp_write_eir cp ;
@@ -2881,7 +2996,8 @@ int hci_update_passive_scan(struct hci_dev *hdev)
2881
2996
hci_dev_test_flag (hdev , HCI_UNREGISTER ))
2882
2997
return 0 ;
2883
2998
2884
- return hci_cmd_sync_queue (hdev , update_passive_scan_sync , NULL , NULL );
2999
+ return hci_cmd_sync_queue_once (hdev , update_passive_scan_sync , NULL ,
3000
+ NULL );
2885
3001
}
2886
3002
2887
3003
int hci_write_sc_support_sync (struct hci_dev * hdev , u8 val )
0 commit comments