@@ -1549,6 +1549,80 @@ static void spi_idle_runtime_pm(struct spi_controller *ctlr)
1549
1549
}
1550
1550
}
1551
1551
1552
+ static int __spi_pump_transfer_message (struct spi_controller * ctlr ,
1553
+ struct spi_message * msg , bool was_busy )
1554
+ {
1555
+ struct spi_transfer * xfer ;
1556
+ int ret ;
1557
+
1558
+ if (!was_busy && ctlr -> auto_runtime_pm ) {
1559
+ ret = pm_runtime_get_sync (ctlr -> dev .parent );
1560
+ if (ret < 0 ) {
1561
+ pm_runtime_put_noidle (ctlr -> dev .parent );
1562
+ dev_err (& ctlr -> dev , "Failed to power device: %d\n" ,
1563
+ ret );
1564
+ return ret ;
1565
+ }
1566
+ }
1567
+
1568
+ if (!was_busy )
1569
+ trace_spi_controller_busy (ctlr );
1570
+
1571
+ if (!was_busy && ctlr -> prepare_transfer_hardware ) {
1572
+ ret = ctlr -> prepare_transfer_hardware (ctlr );
1573
+ if (ret ) {
1574
+ dev_err (& ctlr -> dev ,
1575
+ "failed to prepare transfer hardware: %d\n" ,
1576
+ ret );
1577
+
1578
+ if (ctlr -> auto_runtime_pm )
1579
+ pm_runtime_put (ctlr -> dev .parent );
1580
+
1581
+ msg -> status = ret ;
1582
+ spi_finalize_current_message (ctlr );
1583
+
1584
+ return ret ;
1585
+ }
1586
+ }
1587
+
1588
+ trace_spi_message_start (msg );
1589
+
1590
+ if (ctlr -> prepare_message ) {
1591
+ ret = ctlr -> prepare_message (ctlr , msg );
1592
+ if (ret ) {
1593
+ dev_err (& ctlr -> dev , "failed to prepare message: %d\n" ,
1594
+ ret );
1595
+ msg -> status = ret ;
1596
+ spi_finalize_current_message (ctlr );
1597
+ return ret ;
1598
+ }
1599
+ msg -> prepared = true;
1600
+ }
1601
+
1602
+ ret = spi_map_msg (ctlr , msg );
1603
+ if (ret ) {
1604
+ msg -> status = ret ;
1605
+ spi_finalize_current_message (ctlr );
1606
+ return ret ;
1607
+ }
1608
+
1609
+ if (!ctlr -> ptp_sts_supported && !ctlr -> transfer_one ) {
1610
+ list_for_each_entry (xfer , & msg -> transfers , transfer_list ) {
1611
+ xfer -> ptp_sts_word_pre = 0 ;
1612
+ ptp_read_system_prets (xfer -> ptp_sts );
1613
+ }
1614
+ }
1615
+
1616
+ ret = ctlr -> transfer_one_message (ctlr , msg );
1617
+ if (ret ) {
1618
+ dev_err (& ctlr -> dev ,
1619
+ "failed to transfer one message from queue\n" );
1620
+ return ret ;
1621
+ }
1622
+
1623
+ return 0 ;
1624
+ }
1625
+
1552
1626
/**
1553
1627
* __spi_pump_messages - function which processes spi message queue
1554
1628
* @ctlr: controller to process queue for
@@ -1564,7 +1638,6 @@ static void spi_idle_runtime_pm(struct spi_controller *ctlr)
1564
1638
*/
1565
1639
static void __spi_pump_messages (struct spi_controller * ctlr , bool in_kthread )
1566
1640
{
1567
- struct spi_transfer * xfer ;
1568
1641
struct spi_message * msg ;
1569
1642
bool was_busy = false;
1570
1643
unsigned long flags ;
@@ -1599,6 +1672,7 @@ static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
1599
1672
!ctlr -> unprepare_transfer_hardware ) {
1600
1673
spi_idle_runtime_pm (ctlr );
1601
1674
ctlr -> busy = false;
1675
+ ctlr -> queue_empty = true;
1602
1676
trace_spi_controller_idle (ctlr );
1603
1677
} else {
1604
1678
kthread_queue_work (ctlr -> kworker ,
@@ -1625,6 +1699,7 @@ static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
1625
1699
1626
1700
spin_lock_irqsave (& ctlr -> queue_lock , flags );
1627
1701
ctlr -> idling = false;
1702
+ ctlr -> queue_empty = true;
1628
1703
spin_unlock_irqrestore (& ctlr -> queue_lock , flags );
1629
1704
return ;
1630
1705
}
@@ -1641,75 +1716,7 @@ static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
1641
1716
spin_unlock_irqrestore (& ctlr -> queue_lock , flags );
1642
1717
1643
1718
mutex_lock (& ctlr -> io_mutex );
1644
-
1645
- if (!was_busy && ctlr -> auto_runtime_pm ) {
1646
- ret = pm_runtime_resume_and_get (ctlr -> dev .parent );
1647
- if (ret < 0 ) {
1648
- dev_err (& ctlr -> dev , "Failed to power device: %d\n" ,
1649
- ret );
1650
- mutex_unlock (& ctlr -> io_mutex );
1651
- return ;
1652
- }
1653
- }
1654
-
1655
- if (!was_busy )
1656
- trace_spi_controller_busy (ctlr );
1657
-
1658
- if (!was_busy && ctlr -> prepare_transfer_hardware ) {
1659
- ret = ctlr -> prepare_transfer_hardware (ctlr );
1660
- if (ret ) {
1661
- dev_err (& ctlr -> dev ,
1662
- "failed to prepare transfer hardware: %d\n" ,
1663
- ret );
1664
-
1665
- if (ctlr -> auto_runtime_pm )
1666
- pm_runtime_put (ctlr -> dev .parent );
1667
-
1668
- msg -> status = ret ;
1669
- spi_finalize_current_message (ctlr );
1670
-
1671
- mutex_unlock (& ctlr -> io_mutex );
1672
- return ;
1673
- }
1674
- }
1675
-
1676
- trace_spi_message_start (msg );
1677
-
1678
- if (ctlr -> prepare_message ) {
1679
- ret = ctlr -> prepare_message (ctlr , msg );
1680
- if (ret ) {
1681
- dev_err (& ctlr -> dev , "failed to prepare message: %d\n" ,
1682
- ret );
1683
- msg -> status = ret ;
1684
- spi_finalize_current_message (ctlr );
1685
- goto out ;
1686
- }
1687
- msg -> prepared = true;
1688
- }
1689
-
1690
- ret = spi_map_msg (ctlr , msg );
1691
- if (ret ) {
1692
- msg -> status = ret ;
1693
- spi_finalize_current_message (ctlr );
1694
- goto out ;
1695
- }
1696
-
1697
- if (!ctlr -> ptp_sts_supported && !ctlr -> transfer_one ) {
1698
- list_for_each_entry (xfer , & msg -> transfers , transfer_list ) {
1699
- xfer -> ptp_sts_word_pre = 0 ;
1700
- ptp_read_system_prets (xfer -> ptp_sts );
1701
- }
1702
- }
1703
-
1704
- ret = ctlr -> transfer_one_message (ctlr , msg );
1705
- if (ret ) {
1706
- dev_err (& ctlr -> dev ,
1707
- "failed to transfer one message from queue: %d\n" ,
1708
- ret );
1709
- goto out ;
1710
- }
1711
-
1712
- out :
1719
+ ret = __spi_pump_transfer_message (ctlr , msg , was_busy );
1713
1720
mutex_unlock (& ctlr -> io_mutex );
1714
1721
1715
1722
/* Prod the scheduler in case transfer_one() was busy waiting */
@@ -1839,6 +1846,7 @@ static int spi_init_queue(struct spi_controller *ctlr)
1839
1846
{
1840
1847
ctlr -> running = false;
1841
1848
ctlr -> busy = false;
1849
+ ctlr -> queue_empty = true;
1842
1850
1843
1851
ctlr -> kworker = kthread_create_worker (0 , dev_name (& ctlr -> dev ));
1844
1852
if (IS_ERR (ctlr -> kworker )) {
@@ -1936,11 +1944,20 @@ void spi_finalize_current_message(struct spi_controller *ctlr)
1936
1944
1937
1945
mesg -> prepared = false;
1938
1946
1939
- spin_lock_irqsave (& ctlr -> queue_lock , flags );
1940
- ctlr -> cur_msg = NULL ;
1941
- ctlr -> fallback = false;
1942
- kthread_queue_work (ctlr -> kworker , & ctlr -> pump_messages );
1943
- spin_unlock_irqrestore (& ctlr -> queue_lock , flags );
1947
+ if (!mesg -> sync ) {
1948
+ /*
1949
+ * This message was sent via the async message queue. Handle
1950
+ * the queue and kick the worker thread to do the
1951
+ * idling/shutdown or send the next message if needed.
1952
+ */
1953
+ spin_lock_irqsave (& ctlr -> queue_lock , flags );
1954
+ WARN (ctlr -> cur_msg != mesg ,
1955
+ "Finalizing queued message that is not the current head of queue!" );
1956
+ ctlr -> cur_msg = NULL ;
1957
+ ctlr -> fallback = false;
1958
+ kthread_queue_work (ctlr -> kworker , & ctlr -> pump_messages );
1959
+ spin_unlock_irqrestore (& ctlr -> queue_lock , flags );
1960
+ }
1944
1961
1945
1962
trace_spi_message_done (mesg );
1946
1963
@@ -2043,6 +2060,7 @@ static int __spi_queued_transfer(struct spi_device *spi,
2043
2060
msg -> status = - EINPROGRESS ;
2044
2061
2045
2062
list_add_tail (& msg -> queue , & ctlr -> queue );
2063
+ ctlr -> queue_empty = false;
2046
2064
if (!ctlr -> busy && need_pump )
2047
2065
kthread_queue_work (ctlr -> kworker , & ctlr -> pump_messages );
2048
2066
@@ -3938,6 +3956,39 @@ static int spi_async_locked(struct spi_device *spi, struct spi_message *message)
3938
3956
3939
3957
}
3940
3958
3959
+ static void __spi_transfer_message_noqueue (struct spi_controller * ctlr , struct spi_message * msg )
3960
+ {
3961
+ bool was_busy ;
3962
+ int ret ;
3963
+
3964
+ mutex_lock (& ctlr -> io_mutex );
3965
+
3966
+ /* If another context is idling the device then wait */
3967
+ while (ctlr -> idling )
3968
+ usleep_range (10000 , 11000 );
3969
+
3970
+ was_busy = READ_ONCE (ctlr -> busy );
3971
+
3972
+ ret = __spi_pump_transfer_message (ctlr , msg , was_busy );
3973
+ if (ret )
3974
+ goto out ;
3975
+
3976
+ if (!was_busy ) {
3977
+ kfree (ctlr -> dummy_rx );
3978
+ ctlr -> dummy_rx = NULL ;
3979
+ kfree (ctlr -> dummy_tx );
3980
+ ctlr -> dummy_tx = NULL ;
3981
+ if (ctlr -> unprepare_transfer_hardware &&
3982
+ ctlr -> unprepare_transfer_hardware (ctlr ))
3983
+ dev_err (& ctlr -> dev ,
3984
+ "failed to unprepare transfer hardware\n" );
3985
+ spi_idle_runtime_pm (ctlr );
3986
+ }
3987
+
3988
+ out :
3989
+ mutex_unlock (& ctlr -> io_mutex );
3990
+ }
3991
+
3941
3992
/*-------------------------------------------------------------------------*/
3942
3993
3943
3994
/*
@@ -3956,51 +4007,52 @@ static int __spi_sync(struct spi_device *spi, struct spi_message *message)
3956
4007
DECLARE_COMPLETION_ONSTACK (done );
3957
4008
int status ;
3958
4009
struct spi_controller * ctlr = spi -> controller ;
3959
- unsigned long flags ;
3960
4010
3961
4011
status = __spi_validate (spi , message );
3962
4012
if (status != 0 )
3963
4013
return status ;
3964
4014
3965
- message -> complete = spi_complete ;
3966
- message -> context = & done ;
3967
4015
message -> spi = spi ;
3968
4016
3969
4017
SPI_STATISTICS_INCREMENT_FIELD (ctlr -> pcpu_statistics , spi_sync );
3970
4018
SPI_STATISTICS_INCREMENT_FIELD (spi -> pcpu_statistics , spi_sync );
3971
4019
3972
4020
/*
3973
- * If we're not using the legacy transfer method then we will
3974
- * try to transfer in the calling context so special case.
3975
- * This code would be less tricky if we could remove the
3976
- * support for driver implemented message queues .
4021
+ * Checking queue_empty here only guarantees async/sync message
4022
+ * ordering when coming from the same context. It does not need to
4023
+ * guard against reentrancy from a different context. The io_mutex
4024
+ * will catch those cases .
3977
4025
*/
3978
- if (ctlr -> transfer == spi_queued_transfer ) {
3979
- spin_lock_irqsave (& ctlr -> bus_lock_spinlock , flags );
4026
+ if (READ_ONCE (ctlr -> queue_empty )) {
4027
+ message -> sync = true;
4028
+ message -> actual_length = 0 ;
4029
+ message -> status = - EINPROGRESS ;
3980
4030
3981
4031
trace_spi_message_submit (message );
3982
4032
3983
- status = __spi_queued_transfer (spi , message , false);
4033
+ SPI_STATISTICS_INCREMENT_FIELD (ctlr -> pcpu_statistics , spi_sync_immediate );
4034
+ SPI_STATISTICS_INCREMENT_FIELD (spi -> pcpu_statistics , spi_sync_immediate );
3984
4035
3985
- spin_unlock_irqrestore ( & ctlr -> bus_lock_spinlock , flags );
3986
- } else {
3987
- status = spi_async_locked ( spi , message ) ;
4036
+ __spi_transfer_message_noqueue ( ctlr , message );
4037
+
4038
+ return message -> status ;
3988
4039
}
3989
4040
4041
+ /*
4042
+ * There are messages in the async queue that could have originated
4043
+ * from the same context, so we need to preserve ordering.
4044
+ * Therefor we send the message to the async queue and wait until they
4045
+ * are completed.
4046
+ */
4047
+ message -> complete = spi_complete ;
4048
+ message -> context = & done ;
4049
+ status = spi_async_locked (spi , message );
3990
4050
if (status == 0 ) {
3991
- /* Push out the messages in the calling context if we can */
3992
- if (ctlr -> transfer == spi_queued_transfer ) {
3993
- SPI_STATISTICS_INCREMENT_FIELD (ctlr -> pcpu_statistics ,
3994
- spi_sync_immediate );
3995
- SPI_STATISTICS_INCREMENT_FIELD (spi -> pcpu_statistics ,
3996
- spi_sync_immediate );
3997
- __spi_pump_messages (ctlr , false);
3998
- }
3999
-
4000
4051
wait_for_completion (& done );
4001
4052
status = message -> status ;
4002
4053
}
4003
4054
message -> context = NULL ;
4055
+
4004
4056
return status ;
4005
4057
}
4006
4058
0 commit comments