@@ -22,7 +22,8 @@ module_param(timesync_delay_ms, uint, 0600);
22
22
MODULE_PARM_DESC (timesync_delay_ms , "Delay in ms between two consecutive timesync operations" );
23
23
24
24
enum qts_msg_type {
25
- QAIC_TS_SYNC_REQ = 1 ,
25
+ QAIC_TS_CMD_TO_HOST ,
26
+ QAIC_TS_SYNC_REQ ,
26
27
QAIC_TS_ACK_TO_HOST ,
27
28
QAIC_TS_MSG_TYPE_MAX
28
29
};
@@ -83,6 +84,16 @@ struct mqts_dev {
83
84
struct qts_host_time_sync_msg_data * sync_msg ;
84
85
};
85
86
87
+ struct qts_resp_msg {
88
+ struct qts_hdr hdr ;
89
+ } __packed ;
90
+
91
+ struct qts_resp {
92
+ struct qts_resp_msg data ;
93
+ struct work_struct work ;
94
+ struct qaic_device * qdev ;
95
+ };
96
+
86
97
#ifdef readq
87
98
static u64 read_qtimer (const volatile void __iomem * addr )
88
99
{
@@ -234,12 +245,151 @@ static struct mhi_driver qaic_timesync_driver = {
234
245
},
235
246
};
236
247
248
+ static void qaic_boot_timesync_worker (struct work_struct * work )
249
+ {
250
+ struct qts_resp * resp = container_of (work , struct qts_resp , work );
251
+ struct qts_host_time_sync_msg_data * req ;
252
+ struct qts_resp_msg data = resp -> data ;
253
+ struct qaic_device * qdev = resp -> qdev ;
254
+ struct mhi_device * mhi_dev ;
255
+ struct timespec64 ts ;
256
+ int ret ;
257
+
258
+ mhi_dev = qdev -> qts_ch ;
259
+ /* Queue the response message beforehand to avoid race conditions */
260
+ ret = mhi_queue_buf (mhi_dev , DMA_FROM_DEVICE , & resp -> data , sizeof (resp -> data ), MHI_EOT );
261
+ if (ret ) {
262
+ kfree (resp );
263
+ dev_warn (& mhi_dev -> dev , "Failed to re-queue response buffer %d\n" , ret );
264
+ return ;
265
+ }
266
+
267
+ switch (data .hdr .msg_type ) {
268
+ case QAIC_TS_CMD_TO_HOST :
269
+ req = kzalloc (sizeof (* req ), GFP_KERNEL );
270
+ if (!req )
271
+ break ;
272
+
273
+ req -> header = data .hdr ;
274
+ req -> header .msg_type = QAIC_TS_SYNC_REQ ;
275
+ ktime_get_real_ts64 (& ts );
276
+ req -> data .tv_sec = cpu_to_le64 (ts .tv_sec );
277
+ req -> data .tv_usec = cpu_to_le64 (div_u64 (ts .tv_nsec , NSEC_PER_USEC ));
278
+
279
+ ret = mhi_queue_buf (mhi_dev , DMA_TO_DEVICE , req , sizeof (* req ), MHI_EOT );
280
+ if (ret ) {
281
+ kfree (req );
282
+ dev_dbg (& mhi_dev -> dev , "Failed to send request message. Error %d\n" , ret );
283
+ }
284
+ break ;
285
+ case QAIC_TS_ACK_TO_HOST :
286
+ dev_dbg (& mhi_dev -> dev , "ACK received from device\n" );
287
+ break ;
288
+ default :
289
+ dev_err (& mhi_dev -> dev , "Invalid message type %u.\n" , data .hdr .msg_type );
290
+ }
291
+ }
292
+
293
+ static int qaic_boot_timesync_queue_resp (struct mhi_device * mhi_dev , struct qaic_device * qdev )
294
+ {
295
+ struct qts_resp * resp ;
296
+ int ret ;
297
+
298
+ resp = kzalloc (sizeof (* resp ), GFP_KERNEL );
299
+ if (!resp )
300
+ return - ENOMEM ;
301
+
302
+ resp -> qdev = qdev ;
303
+ INIT_WORK (& resp -> work , qaic_boot_timesync_worker );
304
+
305
+ ret = mhi_queue_buf (mhi_dev , DMA_FROM_DEVICE , & resp -> data , sizeof (resp -> data ), MHI_EOT );
306
+ if (ret ) {
307
+ kfree (resp );
308
+ dev_warn (& mhi_dev -> dev , "Failed to queue response buffer %d\n" , ret );
309
+ return ret ;
310
+ }
311
+
312
+ return 0 ;
313
+ }
314
+
315
+ static void qaic_boot_timesync_remove (struct mhi_device * mhi_dev )
316
+ {
317
+ struct qaic_device * qdev ;
318
+
319
+ qdev = dev_get_drvdata (& mhi_dev -> dev );
320
+ mhi_unprepare_from_transfer (qdev -> qts_ch );
321
+ qdev -> qts_ch = NULL ;
322
+ }
323
+
324
+ static int qaic_boot_timesync_probe (struct mhi_device * mhi_dev , const struct mhi_device_id * id )
325
+ {
326
+ struct qaic_device * qdev = pci_get_drvdata (to_pci_dev (mhi_dev -> mhi_cntrl -> cntrl_dev ));
327
+ int ret ;
328
+
329
+ ret = mhi_prepare_for_transfer (mhi_dev );
330
+ if (ret )
331
+ return ret ;
332
+
333
+ qdev -> qts_ch = mhi_dev ;
334
+ dev_set_drvdata (& mhi_dev -> dev , qdev );
335
+
336
+ ret = qaic_boot_timesync_queue_resp (mhi_dev , qdev );
337
+ if (ret ) {
338
+ dev_set_drvdata (& mhi_dev -> dev , NULL );
339
+ qdev -> qts_ch = NULL ;
340
+ mhi_unprepare_from_transfer (mhi_dev );
341
+ }
342
+
343
+ return ret ;
344
+ }
345
+
346
+ static void qaic_boot_timesync_ul_xfer_cb (struct mhi_device * mhi_dev , struct mhi_result * mhi_result )
347
+ {
348
+ kfree (mhi_result -> buf_addr );
349
+ }
350
+
351
+ static void qaic_boot_timesync_dl_xfer_cb (struct mhi_device * mhi_dev , struct mhi_result * mhi_result )
352
+ {
353
+ struct qts_resp * resp = container_of (mhi_result -> buf_addr , struct qts_resp , data );
354
+
355
+ if (mhi_result -> transaction_status || mhi_result -> bytes_xferd != sizeof (resp -> data )) {
356
+ kfree (resp );
357
+ return ;
358
+ }
359
+
360
+ queue_work (resp -> qdev -> qts_wq , & resp -> work );
361
+ }
362
+
363
+ static const struct mhi_device_id qaic_boot_timesync_match_table [] = {
364
+ { .chan = "QAIC_TIMESYNC" },
365
+ {},
366
+ };
367
+
368
+ static struct mhi_driver qaic_boot_timesync_driver = {
369
+ .id_table = qaic_boot_timesync_match_table ,
370
+ .remove = qaic_boot_timesync_remove ,
371
+ .probe = qaic_boot_timesync_probe ,
372
+ .ul_xfer_cb = qaic_boot_timesync_ul_xfer_cb ,
373
+ .dl_xfer_cb = qaic_boot_timesync_dl_xfer_cb ,
374
+ .driver = {
375
+ .name = "qaic_timesync" ,
376
+ },
377
+ };
378
+
237
379
int qaic_timesync_init (void )
238
380
{
239
- return mhi_driver_register (& qaic_timesync_driver );
381
+ int ret ;
382
+
383
+ ret = mhi_driver_register (& qaic_timesync_driver );
384
+ if (ret )
385
+ return ret ;
386
+ ret = mhi_driver_register (& qaic_boot_timesync_driver );
387
+
388
+ return ret ;
240
389
}
241
390
242
391
void qaic_timesync_deinit (void )
243
392
{
393
+ mhi_driver_unregister (& qaic_boot_timesync_driver );
244
394
mhi_driver_unregister (& qaic_timesync_driver );
245
395
}
0 commit comments