32
32
/* The amount of RX buffer space consumed by standard skb overhead */
33
33
#define IPA_RX_BUFFER_OVERHEAD (PAGE_SIZE - SKB_MAX_ORDER(NET_SKB_PAD, 0))
34
34
35
+ /* Where to find the QMAP mux_id for a packet within modem-supplied metadata */
36
+ #define IPA_ENDPOINT_QMAP_METADATA_MASK 0x000000ff /* host byte order */
37
+
35
38
#define IPA_ENDPOINT_RESET_AGGR_RETRY_MAX 3
36
39
#define IPA_AGGR_TIME_LIMIT_DEFAULT 1000 /* microseconds */
37
40
@@ -433,6 +436,24 @@ static void ipa_endpoint_init_cfg(struct ipa_endpoint *endpoint)
433
436
iowrite32 (val , endpoint -> ipa -> reg_virt + offset );
434
437
}
435
438
439
+ /**
440
+ * We program QMAP endpoints so each packet received is preceded by a QMAP
441
+ * header structure. The QMAP header contains a 1-byte mux_id and 2-byte
442
+ * packet size field, and we have the IPA hardware populate both for each
443
+ * received packet. The header is configured (in the HDR_EXT register)
444
+ * to use big endian format.
445
+ *
446
+ * The packet size is written into the QMAP header's pkt_len field. That
447
+ * location is defined here using the HDR_OFST_PKT_SIZE field.
448
+ *
449
+ * The mux_id comes from a 4-byte metadata value supplied with each packet
450
+ * by the modem. It is *not* a QMAP header, but it does contain the mux_id
451
+ * value that we want, in its low-order byte. A bitmask defined in the
452
+ * endpoint's METADATA_MASK register defines which byte within the modem
453
+ * metadata contains the mux_id. And the OFST_METADATA field programmed
454
+ * here indicates where the extracted byte should be placed within the QMAP
455
+ * header.
456
+ */
436
457
static void ipa_endpoint_init_hdr (struct ipa_endpoint * endpoint )
437
458
{
438
459
u32 offset = IPA_REG_ENDP_INIT_HDR_N_OFFSET (endpoint -> endpoint_id );
@@ -441,25 +462,31 @@ static void ipa_endpoint_init_hdr(struct ipa_endpoint *endpoint)
441
462
if (endpoint -> data -> qmap ) {
442
463
size_t header_size = sizeof (struct rmnet_map_header );
443
464
465
+ /* We might supply a checksum header after the QMAP header */
444
466
if (endpoint -> toward_ipa && endpoint -> data -> checksum )
445
467
header_size += sizeof (struct rmnet_map_ul_csum_header );
446
-
447
468
val |= u32_encode_bits (header_size , HDR_LEN_FMASK );
448
- /* metadata is the 4 byte rmnet_map header itself */
449
- val |= HDR_OFST_METADATA_VALID_FMASK ;
450
- val |= u32_encode_bits (0 , HDR_OFST_METADATA_FMASK );
451
- /* HDR_ADDITIONAL_CONST_LEN is 0; (IPA->AP only) */
469
+
470
+ /* Define how to fill fields in a received QMAP header */
452
471
if (!endpoint -> toward_ipa ) {
453
- u32 size_offset = offsetof(struct rmnet_map_header ,
454
- pkt_len );
472
+ u32 off ; /* Field offset within header */
473
+
474
+ /* Where IPA will write the metadata value */
475
+ off = offsetof(struct rmnet_map_header , mux_id );
476
+ val |= u32_encode_bits (off , HDR_OFST_METADATA_FMASK );
455
477
478
+ /* Where IPA will write the length */
479
+ off = offsetof(struct rmnet_map_header , pkt_len );
456
480
val |= HDR_OFST_PKT_SIZE_VALID_FMASK ;
457
- val |= u32_encode_bits (size_offset ,
458
- HDR_OFST_PKT_SIZE_FMASK );
481
+ val |= u32_encode_bits (off , HDR_OFST_PKT_SIZE_FMASK );
459
482
}
483
+ /* For QMAP TX, metadata offset is 0 (modem assumes this) */
484
+ val |= HDR_OFST_METADATA_VALID_FMASK ;
485
+
486
+ /* HDR_ADDITIONAL_CONST_LEN is 0; (RX only) */
460
487
/* HDR_A5_MUX is 0 */
461
488
/* HDR_LEN_INC_DEAGG_HDR is 0 */
462
- /* HDR_METADATA_REG_VALID is 0; (AP->IPA only) */
489
+ /* HDR_METADATA_REG_VALID is 0 (TX only) */
463
490
}
464
491
465
492
iowrite32 (val , endpoint -> ipa -> reg_virt + offset );
@@ -472,38 +499,27 @@ static void ipa_endpoint_init_hdr_ext(struct ipa_endpoint *endpoint)
472
499
u32 val = 0 ;
473
500
474
501
val |= HDR_ENDIANNESS_FMASK ; /* big endian */
475
- val |= HDR_TOTAL_LEN_OR_PAD_VALID_FMASK ;
476
- /* HDR_TOTAL_LEN_OR_PAD is 0 (pad, not total_len) */
502
+
503
+ /* A QMAP header contains a 6 bit pad field at offset 0. The RMNet
504
+ * driver assumes this field is meaningful in packets it receives,
505
+ * and assumes the header's payload length includes that padding.
506
+ * The RMNet driver does *not* pad packets it sends, however, so
507
+ * the pad field (although 0) should be ignored.
508
+ */
509
+ if (endpoint -> data -> qmap && !endpoint -> toward_ipa ) {
510
+ val |= HDR_TOTAL_LEN_OR_PAD_VALID_FMASK ;
511
+ /* HDR_TOTAL_LEN_OR_PAD is 0 (pad, not total_len) */
512
+ val |= HDR_PAYLOAD_LEN_INC_PADDING_FMASK ;
513
+ /* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0 */
514
+ }
515
+
477
516
/* HDR_PAYLOAD_LEN_INC_PADDING is 0 */
478
- /* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0 */
479
517
if (!endpoint -> toward_ipa )
480
518
val |= u32_encode_bits (pad_align , HDR_PAD_TO_ALIGNMENT_FMASK );
481
519
482
520
iowrite32 (val , endpoint -> ipa -> reg_virt + offset );
483
521
}
484
522
485
- /**
486
- * Generate a metadata mask value that will select only the mux_id
487
- * field in an rmnet_map header structure. The mux_id is at offset
488
- * 1 byte from the beginning of the structure, but the metadata
489
- * value is treated as a 4-byte unit. So this mask must be computed
490
- * with endianness in mind. Note that ipa_endpoint_init_hdr_metadata_mask()
491
- * will convert this value to the proper byte order.
492
- *
493
- * Marked __always_inline because this is really computing a
494
- * constant value.
495
- */
496
- static __always_inline __be32 ipa_rmnet_mux_id_metadata_mask (void )
497
- {
498
- size_t mux_id_offset = offsetof(struct rmnet_map_header , mux_id );
499
- u32 mux_id_mask = 0 ;
500
- u8 * bytes ;
501
-
502
- bytes = (u8 * )& mux_id_mask ;
503
- bytes [mux_id_offset ] = 0xff ; /* mux_id is 1 byte */
504
-
505
- return cpu_to_be32 (mux_id_mask );
506
- }
507
523
508
524
static void ipa_endpoint_init_hdr_metadata_mask (struct ipa_endpoint * endpoint )
509
525
{
@@ -513,8 +529,9 @@ static void ipa_endpoint_init_hdr_metadata_mask(struct ipa_endpoint *endpoint)
513
529
514
530
offset = IPA_REG_ENDP_INIT_HDR_METADATA_MASK_N_OFFSET (endpoint_id );
515
531
532
+ /* Note that HDR_ENDIANNESS indicates big endian header fields */
516
533
if (!endpoint -> toward_ipa && endpoint -> data -> qmap )
517
- val = ipa_rmnet_mux_id_metadata_mask ( );
534
+ val = cpu_to_be32 ( IPA_ENDPOINT_QMAP_METADATA_MASK );
518
535
519
536
iowrite32 (val , endpoint -> ipa -> reg_virt + offset );
520
537
}
@@ -693,10 +710,12 @@ static void ipa_endpoint_init_seq(struct ipa_endpoint *endpoint)
693
710
u32 seq_type = endpoint -> seq_type ;
694
711
u32 val = 0 ;
695
712
713
+ /* Sequencer type is made up of four nibbles */
696
714
val |= u32_encode_bits (seq_type & 0xf , HPS_SEQ_TYPE_FMASK );
697
715
val |= u32_encode_bits ((seq_type >> 4 ) & 0xf , DPS_SEQ_TYPE_FMASK );
698
- /* HPS_REP_SEQ_TYPE is 0 */
699
- /* DPS_REP_SEQ_TYPE is 0 */
716
+ /* The second two apply to replicated packets */
717
+ val |= u32_encode_bits ((seq_type >> 8 ) & 0xf , HPS_REP_SEQ_TYPE_FMASK );
718
+ val |= u32_encode_bits ((seq_type >> 12 ) & 0xf , DPS_REP_SEQ_TYPE_FMASK );
700
719
701
720
iowrite32 (val , endpoint -> ipa -> reg_virt + offset );
702
721
}
0 commit comments