10
10
#define SJA1105_SIZE_RESET_CMD 4
11
11
#define SJA1105_SIZE_SPI_MSG_HEADER 4
12
12
#define SJA1105_SIZE_SPI_MSG_MAXLEN (64 * 4)
13
- #define SJA1105_SIZE_SPI_TRANSFER_MAX \
14
- (SJA1105_SIZE_SPI_MSG_HEADER + SJA1105_SIZE_SPI_MSG_MAXLEN)
15
13
16
- static int sja1105_spi_transfer (const struct sja1105_private * priv ,
17
- const void * tx , void * rx , int size )
18
- {
19
- struct spi_device * spi = priv -> spidev ;
20
- struct spi_transfer transfer = {
21
- .tx_buf = tx ,
22
- .rx_buf = rx ,
23
- .len = size ,
24
- };
25
- struct spi_message msg ;
26
- int rc ;
27
-
28
- if (size > SJA1105_SIZE_SPI_TRANSFER_MAX ) {
29
- dev_err (& spi -> dev , "SPI message (%d) longer than max of %d\n" ,
30
- size , SJA1105_SIZE_SPI_TRANSFER_MAX );
31
- return - EMSGSIZE ;
32
- }
33
-
34
- spi_message_init (& msg );
35
- spi_message_add_tail (& transfer , & msg );
36
-
37
- rc = spi_sync (spi , & msg );
38
- if (rc < 0 ) {
39
- dev_err (& spi -> dev , "SPI transfer failed: %d\n" , rc );
40
- return rc ;
41
- }
42
-
43
- return rc ;
44
- }
14
+ struct sja1105_chunk {
15
+ u8 * buf ;
16
+ size_t len ;
17
+ u64 reg_addr ;
18
+ };
45
19
46
20
static void
47
21
sja1105_spi_message_pack (void * buf , const struct sja1105_spi_message * msg )
@@ -55,49 +29,98 @@ sja1105_spi_message_pack(void *buf, const struct sja1105_spi_message *msg)
55
29
sja1105_pack (buf , & msg -> address , 24 , 4 , size );
56
30
}
57
31
32
+ #define sja1105_hdr_xfer (xfers , chunk ) \
33
+ ((xfers) + 2 * (chunk))
34
+ #define sja1105_chunk_xfer (xfers , chunk ) \
35
+ ((xfers) + 2 * (chunk) + 1)
36
+ #define sja1105_hdr_buf (hdr_bufs , chunk ) \
37
+ ((hdr_bufs) + (chunk) * SJA1105_SIZE_SPI_MSG_HEADER)
38
+
58
39
/* If @rw is:
59
40
* - SPI_WRITE: creates and sends an SPI write message at absolute
60
- * address reg_addr, taking size_bytes from *packed_buf
41
+ * address reg_addr, taking @len bytes from *buf
61
42
* - SPI_READ: creates and sends an SPI read message from absolute
62
- * address reg_addr, writing size_bytes into *packed_buf
63
- *
64
- * This function should only be called if it is priorly known that
65
- * @size_bytes is smaller than SIZE_SPI_MSG_MAXLEN. Larger packed buffers
66
- * are chunked in smaller pieces by sja1105_xfer_long_buf below.
43
+ * address reg_addr, writing @len bytes into *buf
67
44
*/
68
45
int sja1105_xfer_buf (const struct sja1105_private * priv ,
69
46
sja1105_spi_rw_mode_t rw , u64 reg_addr ,
70
- void * packed_buf , size_t size_bytes )
47
+ u8 * buf , size_t len )
71
48
{
72
- u8 tx_buf [SJA1105_SIZE_SPI_TRANSFER_MAX ] = {0 };
73
- u8 rx_buf [SJA1105_SIZE_SPI_TRANSFER_MAX ] = {0 };
74
- const int msg_len = size_bytes + SJA1105_SIZE_SPI_MSG_HEADER ;
75
- struct sja1105_spi_message msg = {0 };
76
- int rc ;
49
+ struct sja1105_chunk chunk = {
50
+ .len = min_t (size_t , len , SJA1105_SIZE_SPI_MSG_MAXLEN ),
51
+ .reg_addr = reg_addr ,
52
+ .buf = buf ,
53
+ };
54
+ struct spi_device * spi = priv -> spidev ;
55
+ struct spi_transfer * xfers ;
56
+ int num_chunks ;
57
+ int rc , i = 0 ;
58
+ u8 * hdr_bufs ;
77
59
78
- if (msg_len > SJA1105_SIZE_SPI_TRANSFER_MAX )
79
- return - ERANGE ;
60
+ num_chunks = DIV_ROUND_UP (len , SJA1105_SIZE_SPI_MSG_MAXLEN );
80
61
81
- msg .access = rw ;
82
- msg .address = reg_addr ;
83
- if (rw == SPI_READ )
84
- msg .read_count = size_bytes / 4 ;
62
+ /* One transfer for each message header, one for each message
63
+ * payload (chunk).
64
+ */
65
+ xfers = kcalloc (2 * num_chunks , sizeof (struct spi_transfer ),
66
+ GFP_KERNEL );
67
+ if (!xfers )
68
+ return - ENOMEM ;
85
69
86
- sja1105_spi_message_pack (tx_buf , & msg );
70
+ /* Packed buffers for the num_chunks SPI message headers,
71
+ * stored as a contiguous array
72
+ */
73
+ hdr_bufs = kcalloc (num_chunks , SJA1105_SIZE_SPI_MSG_HEADER ,
74
+ GFP_KERNEL );
75
+ if (!hdr_bufs ) {
76
+ kfree (xfers );
77
+ return - ENOMEM ;
78
+ }
87
79
88
- if (rw == SPI_WRITE )
89
- memcpy (tx_buf + SJA1105_SIZE_SPI_MSG_HEADER ,
90
- packed_buf , size_bytes );
80
+ for (i = 0 ; i < num_chunks ; i ++ ) {
81
+ struct spi_transfer * chunk_xfer = sja1105_chunk_xfer (xfers , i );
82
+ struct spi_transfer * hdr_xfer = sja1105_hdr_xfer (xfers , i );
83
+ u8 * hdr_buf = sja1105_hdr_buf (hdr_bufs , i );
84
+ struct sja1105_spi_message msg ;
85
+
86
+ /* Populate the transfer's header buffer */
87
+ msg .address = chunk .reg_addr ;
88
+ msg .access = rw ;
89
+ if (rw == SPI_READ )
90
+ msg .read_count = chunk .len / 4 ;
91
+ else
92
+ /* Ignored */
93
+ msg .read_count = 0 ;
94
+ sja1105_spi_message_pack (hdr_buf , & msg );
95
+ hdr_xfer -> tx_buf = hdr_buf ;
96
+ hdr_xfer -> len = SJA1105_SIZE_SPI_MSG_HEADER ;
97
+
98
+ /* Populate the transfer's data buffer */
99
+ if (rw == SPI_READ )
100
+ chunk_xfer -> rx_buf = chunk .buf ;
101
+ else
102
+ chunk_xfer -> tx_buf = chunk .buf ;
103
+ chunk_xfer -> len = chunk .len ;
104
+
105
+ /* Calculate next chunk */
106
+ chunk .buf += chunk .len ;
107
+ chunk .reg_addr += chunk .len / 4 ;
108
+ chunk .len = min_t (size_t , (ptrdiff_t )(buf + len - chunk .buf ),
109
+ SJA1105_SIZE_SPI_MSG_MAXLEN );
110
+
111
+ /* De-assert the chip select after each chunk. */
112
+ if (chunk .len )
113
+ chunk_xfer -> cs_change = 1 ;
114
+ }
91
115
92
- rc = sja1105_spi_transfer ( priv , tx_buf , rx_buf , msg_len );
116
+ rc = spi_sync_transfer ( spi , xfers , 2 * num_chunks );
93
117
if (rc < 0 )
94
- return rc ;
118
+ dev_err ( & spi -> dev , "SPI transfer failed: %d\n" , rc ) ;
95
119
96
- if (rw == SPI_READ )
97
- memcpy (packed_buf , rx_buf + SJA1105_SIZE_SPI_MSG_HEADER ,
98
- size_bytes );
120
+ kfree (hdr_bufs );
121
+ kfree (xfers );
99
122
100
- return 0 ;
123
+ return rc ;
101
124
}
102
125
103
126
/* If @rw is:
@@ -152,43 +175,6 @@ int sja1105_xfer_u32(const struct sja1105_private *priv,
152
175
return rc ;
153
176
}
154
177
155
- /* Should be used if a @packed_buf larger than SJA1105_SIZE_SPI_MSG_MAXLEN
156
- * must be sent/received. Splitting the buffer into chunks and assembling
157
- * those into SPI messages is done automatically by this function.
158
- */
159
- static int sja1105_xfer_long_buf (const struct sja1105_private * priv ,
160
- sja1105_spi_rw_mode_t rw , u64 base_addr ,
161
- void * packed_buf , u64 buf_len )
162
- {
163
- struct chunk {
164
- void * buf_ptr ;
165
- int len ;
166
- u64 spi_address ;
167
- } chunk ;
168
- int distance_to_end ;
169
- int rc ;
170
-
171
- /* Initialize chunk */
172
- chunk .buf_ptr = packed_buf ;
173
- chunk .spi_address = base_addr ;
174
- chunk .len = min_t (int , buf_len , SJA1105_SIZE_SPI_MSG_MAXLEN );
175
-
176
- while (chunk .len ) {
177
- rc = sja1105_xfer_buf (priv , rw , chunk .spi_address ,
178
- chunk .buf_ptr , chunk .len );
179
- if (rc < 0 )
180
- return rc ;
181
-
182
- chunk .buf_ptr += chunk .len ;
183
- chunk .spi_address += chunk .len / 4 ;
184
- distance_to_end = (uintptr_t )(packed_buf + buf_len -
185
- chunk .buf_ptr );
186
- chunk .len = min (distance_to_end , SJA1105_SIZE_SPI_MSG_MAXLEN );
187
- }
188
-
189
- return 0 ;
190
- }
191
-
192
178
/* Back-ported structure from UM11040 Table 112.
193
179
* Reset control register (addr. 100440h)
194
180
* In the SJA1105 E/T, only warm_rst and cold_rst are
@@ -451,8 +437,8 @@ int sja1105_static_config_upload(struct sja1105_private *priv)
451
437
/* Wait for the switch to come out of reset */
452
438
usleep_range (1000 , 5000 );
453
439
/* Upload the static config to the device */
454
- rc = sja1105_xfer_long_buf (priv , SPI_WRITE , regs -> config ,
455
- config_buf , buf_len );
440
+ rc = sja1105_xfer_buf (priv , SPI_WRITE , regs -> config ,
441
+ config_buf , buf_len );
456
442
if (rc < 0 ) {
457
443
dev_err (dev , "Failed to upload config, retrying...\n" );
458
444
continue ;
0 commit comments