@@ -727,70 +727,6 @@ static int nvt_ir_raw_set_wakeup_filter(struct rc_dev *dev,
727
727
return ret ;
728
728
}
729
729
730
- /*
731
- * nvt_tx_ir
732
- *
733
- * 1) clean TX fifo first (handled by AP)
734
- * 2) copy data from user space
735
- * 3) disable RX interrupts, enable TX interrupts: TTR & TFU
736
- * 4) send 9 packets to TX FIFO to open TTR
737
- * in interrupt_handler:
738
- * 5) send all data out
739
- * go back to write():
740
- * 6) disable TX interrupts, re-enable RX interupts
741
- *
742
- * The key problem of this function is user space data may larger than
743
- * driver's data buf length. So nvt_tx_ir() will only copy TX_BUF_LEN data to
744
- * buf, and keep current copied data buf num in cur_buf_num. But driver's buf
745
- * number may larger than TXFCONT (0xff). So in interrupt_handler, it has to
746
- * set TXFCONT as 0xff, until buf_count less than 0xff.
747
- */
748
- static int nvt_tx_ir (struct rc_dev * dev , unsigned * txbuf , unsigned n )
749
- {
750
- struct nvt_dev * nvt = dev -> priv ;
751
- unsigned long flags ;
752
- unsigned int i ;
753
- u8 iren ;
754
- int ret ;
755
-
756
- spin_lock_irqsave (& nvt -> lock , flags );
757
-
758
- ret = min ((unsigned )(TX_BUF_LEN / sizeof (unsigned )), n );
759
- nvt -> tx .buf_count = (ret * sizeof (unsigned ));
760
-
761
- memcpy (nvt -> tx .buf , txbuf , nvt -> tx .buf_count );
762
-
763
- nvt -> tx .cur_buf_num = 0 ;
764
-
765
- /* save currently enabled interrupts */
766
- iren = nvt_cir_reg_read (nvt , CIR_IREN );
767
-
768
- /* now disable all interrupts, save TFU & TTR */
769
- nvt_cir_reg_write (nvt , CIR_IREN_TFU | CIR_IREN_TTR , CIR_IREN );
770
-
771
- nvt -> tx .tx_state = ST_TX_REPLY ;
772
-
773
- nvt_cir_reg_write (nvt , CIR_FIFOCON_TX_TRIGGER_LEV_8 |
774
- CIR_FIFOCON_RXFIFOCLR , CIR_FIFOCON );
775
-
776
- /* trigger TTR interrupt by writing out ones, (yes, it's ugly) */
777
- for (i = 0 ; i < 9 ; i ++ )
778
- nvt_cir_reg_write (nvt , 0x01 , CIR_STXFIFO );
779
-
780
- spin_unlock_irqrestore (& nvt -> lock , flags );
781
-
782
- wait_event (nvt -> tx .queue , nvt -> tx .tx_state == ST_TX_REQUEST );
783
-
784
- spin_lock_irqsave (& nvt -> lock , flags );
785
- nvt -> tx .tx_state = ST_TX_NONE ;
786
- spin_unlock_irqrestore (& nvt -> lock , flags );
787
-
788
- /* restore enabled interrupts to prior state */
789
- nvt_cir_reg_write (nvt , iren , CIR_IREN );
790
-
791
- return ret ;
792
- }
793
-
794
730
/* dump contents of the last rx buffer we got from the hw rx fifo */
795
731
static void nvt_dump_rx_buf (struct nvt_dev * nvt )
796
732
{
@@ -895,11 +831,6 @@ static void nvt_cir_log_irqs(u8 status, u8 iren)
895
831
CIR_IRSTS_TFU | CIR_IRSTS_GH ) ? " ?" : "" );
896
832
}
897
833
898
- static bool nvt_cir_tx_inactive (struct nvt_dev * nvt )
899
- {
900
- return nvt -> tx .tx_state == ST_TX_NONE ;
901
- }
902
-
903
834
/* interrupt service routine for incoming and outgoing CIR data */
904
835
static irqreturn_t nvt_cir_isr (int irq , void * data )
905
836
{
@@ -952,40 +883,8 @@ static irqreturn_t nvt_cir_isr(int irq, void *data)
952
883
953
884
if (status & CIR_IRSTS_RFO )
954
885
nvt_handle_rx_fifo_overrun (nvt );
955
-
956
- else if (status & (CIR_IRSTS_RTR | CIR_IRSTS_PE )) {
957
- /* We only do rx if not tx'ing */
958
- if (nvt_cir_tx_inactive (nvt ))
959
- nvt_get_rx_ir_data (nvt );
960
- }
961
-
962
- if (status & CIR_IRSTS_TE )
963
- nvt_clear_tx_fifo (nvt );
964
-
965
- if (status & CIR_IRSTS_TTR ) {
966
- unsigned int pos , count ;
967
- u8 tmp ;
968
-
969
- pos = nvt -> tx .cur_buf_num ;
970
- count = nvt -> tx .buf_count ;
971
-
972
- /* Write data into the hardware tx fifo while pos < count */
973
- if (pos < count ) {
974
- nvt_cir_reg_write (nvt , nvt -> tx .buf [pos ], CIR_STXFIFO );
975
- nvt -> tx .cur_buf_num ++ ;
976
- /* Disable TX FIFO Trigger Level Reach (TTR) interrupt */
977
- } else {
978
- tmp = nvt_cir_reg_read (nvt , CIR_IREN );
979
- nvt_cir_reg_write (nvt , tmp & ~CIR_IREN_TTR , CIR_IREN );
980
- }
981
- }
982
-
983
- if (status & CIR_IRSTS_TFU ) {
984
- if (nvt -> tx .tx_state == ST_TX_REPLY ) {
985
- nvt -> tx .tx_state = ST_TX_REQUEST ;
986
- wake_up (& nvt -> tx .queue );
987
- }
988
- }
886
+ else if (status & (CIR_IRSTS_RTR | CIR_IRSTS_PE ))
887
+ nvt_get_rx_ir_data (nvt );
989
888
990
889
spin_unlock (& nvt -> lock );
991
890
@@ -1062,7 +961,7 @@ static int nvt_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id)
1062
961
if (!nvt )
1063
962
return - ENOMEM ;
1064
963
1065
- /* input device for IR remote (and tx) */
964
+ /* input device for IR remote */
1066
965
nvt -> rdev = devm_rc_allocate_device (& pdev -> dev , RC_DRIVER_IR_RAW );
1067
966
if (!nvt -> rdev )
1068
967
return - ENOMEM ;
@@ -1105,8 +1004,6 @@ static int nvt_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id)
1105
1004
1106
1005
pnp_set_drvdata (pdev , nvt );
1107
1006
1108
- init_waitqueue_head (& nvt -> tx .queue );
1109
-
1110
1007
ret = nvt_hw_detect (nvt );
1111
1008
if (ret )
1112
1009
return ret ;
@@ -1131,7 +1028,6 @@ static int nvt_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id)
1131
1028
rdev -> encode_wakeup = true;
1132
1029
rdev -> open = nvt_open ;
1133
1030
rdev -> close = nvt_close ;
1134
- rdev -> tx_ir = nvt_tx_ir ;
1135
1031
rdev -> s_tx_carrier = nvt_set_tx_carrier ;
1136
1032
rdev -> s_wakeup_filter = nvt_ir_raw_set_wakeup_filter ;
1137
1033
rdev -> device_name = "Nuvoton w836x7hg Infrared Remote Transceiver" ;
@@ -1148,8 +1044,6 @@ static int nvt_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id)
1148
1044
#if 0
1149
1045
rdev -> min_timeout = XYZ ;
1150
1046
rdev -> max_timeout = XYZ ;
1151
- /* tx bits */
1152
- rdev -> tx_resolution = XYZ ;
1153
1047
#endif
1154
1048
ret = devm_rc_register_device (& pdev -> dev , rdev );
1155
1049
if (ret )
@@ -1205,8 +1099,6 @@ static int nvt_suspend(struct pnp_dev *pdev, pm_message_t state)
1205
1099
1206
1100
spin_lock_irqsave (& nvt -> lock , flags );
1207
1101
1208
- nvt -> tx .tx_state = ST_TX_NONE ;
1209
-
1210
1102
/* disable all CIR interrupts */
1211
1103
nvt_cir_reg_write (nvt , 0 , CIR_IREN );
1212
1104
0 commit comments