Skip to content

Commit 1c60c7f

Browse files
ffainellidavem330
authored andcommitted
net: dsa: bcm_sf2: Get rid of unmarshalling functions
Now that we have migrated the CFP rule handling to a list with a software copy, the delete/get operation just returns what is on the list, no need to read from the hardware which is both slow and more error prone. Signed-off-by: Florian Fainelli <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent 1c0130f commit 1c60c7f

File tree

1 file changed

+0
-310
lines changed

1 file changed

+0
-310
lines changed

drivers/net/dsa/bcm_sf2_cfp.c

Lines changed: 0 additions & 310 deletions
Original file line numberDiff line numberDiff line change
@@ -974,316 +974,6 @@ static void bcm_sf2_invert_masks(struct ethtool_rx_flow_spec *flow)
974974
flow->m_ext.data[1] ^= cpu_to_be32(~0);
975975
}
976976

977-
static int __maybe_unused bcm_sf2_cfp_unslice_ipv4(struct bcm_sf2_priv *priv,
978-
struct ethtool_tcpip4_spec *v4_spec,
979-
bool mask)
980-
{
981-
u32 reg, offset, ipv4;
982-
u16 src_dst_port;
983-
984-
if (mask)
985-
offset = CORE_CFP_MASK_PORT(3);
986-
else
987-
offset = CORE_CFP_DATA_PORT(3);
988-
989-
reg = core_readl(priv, offset);
990-
/* src port [15:8] */
991-
src_dst_port = reg << 8;
992-
993-
if (mask)
994-
offset = CORE_CFP_MASK_PORT(2);
995-
else
996-
offset = CORE_CFP_DATA_PORT(2);
997-
998-
reg = core_readl(priv, offset);
999-
/* src port [7:0] */
1000-
src_dst_port |= (reg >> 24);
1001-
1002-
v4_spec->pdst = cpu_to_be16(src_dst_port);
1003-
v4_spec->psrc = cpu_to_be16((u16)(reg >> 8));
1004-
1005-
/* IPv4 dst [15:8] */
1006-
ipv4 = (reg & 0xff) << 8;
1007-
1008-
if (mask)
1009-
offset = CORE_CFP_MASK_PORT(1);
1010-
else
1011-
offset = CORE_CFP_DATA_PORT(1);
1012-
1013-
reg = core_readl(priv, offset);
1014-
/* IPv4 dst [31:16] */
1015-
ipv4 |= ((reg >> 8) & 0xffff) << 16;
1016-
/* IPv4 dst [7:0] */
1017-
ipv4 |= (reg >> 24) & 0xff;
1018-
v4_spec->ip4dst = cpu_to_be32(ipv4);
1019-
1020-
/* IPv4 src [15:8] */
1021-
ipv4 = (reg & 0xff) << 8;
1022-
1023-
if (mask)
1024-
offset = CORE_CFP_MASK_PORT(0);
1025-
else
1026-
offset = CORE_CFP_DATA_PORT(0);
1027-
reg = core_readl(priv, offset);
1028-
1029-
/* Once the TCAM is programmed, the mask reflects the slice number
1030-
* being matched, don't bother checking it when reading back the
1031-
* mask spec
1032-
*/
1033-
if (!mask && !(reg & SLICE_VALID))
1034-
return -EINVAL;
1035-
1036-
/* IPv4 src [7:0] */
1037-
ipv4 |= (reg >> 24) & 0xff;
1038-
/* IPv4 src [31:16] */
1039-
ipv4 |= ((reg >> 8) & 0xffff) << 16;
1040-
v4_spec->ip4src = cpu_to_be32(ipv4);
1041-
1042-
return 0;
1043-
}
1044-
1045-
static int bcm_sf2_cfp_ipv4_rule_get(struct bcm_sf2_priv *priv, int port,
1046-
struct ethtool_rx_flow_spec *fs)
1047-
{
1048-
struct ethtool_tcpip4_spec *v4_spec = NULL, *v4_m_spec = NULL;
1049-
u32 reg;
1050-
int ret;
1051-
1052-
reg = core_readl(priv, CORE_CFP_DATA_PORT(6));
1053-
1054-
switch ((reg & IPPROTO_MASK) >> IPPROTO_SHIFT) {
1055-
case IPPROTO_TCP:
1056-
fs->flow_type = TCP_V4_FLOW;
1057-
v4_spec = &fs->h_u.tcp_ip4_spec;
1058-
v4_m_spec = &fs->m_u.tcp_ip4_spec;
1059-
break;
1060-
case IPPROTO_UDP:
1061-
fs->flow_type = UDP_V4_FLOW;
1062-
v4_spec = &fs->h_u.udp_ip4_spec;
1063-
v4_m_spec = &fs->m_u.udp_ip4_spec;
1064-
break;
1065-
default:
1066-
return -EINVAL;
1067-
}
1068-
1069-
fs->m_ext.data[0] = cpu_to_be32((reg >> IP_FRAG_SHIFT) & 1);
1070-
v4_spec->tos = (reg >> IPTOS_SHIFT) & IPTOS_MASK;
1071-
1072-
ret = bcm_sf2_cfp_unslice_ipv4(priv, v4_spec, false);
1073-
if (ret)
1074-
return ret;
1075-
1076-
return bcm_sf2_cfp_unslice_ipv4(priv, v4_m_spec, true);
1077-
}
1078-
1079-
static int __maybe_unused bcm_sf2_cfp_unslice_ipv6(struct bcm_sf2_priv *priv,
1080-
__be32 *ip6_addr,
1081-
__be16 *port,
1082-
bool mask)
1083-
{
1084-
u32 reg, tmp, offset;
1085-
1086-
/* C-Tag [31:24]
1087-
* UDF_n_B8 [23:8] (port)
1088-
* UDF_n_B7 (upper) [7:0] (addr[15:8])
1089-
*/
1090-
if (mask)
1091-
offset = CORE_CFP_MASK_PORT(4);
1092-
else
1093-
offset = CORE_CFP_DATA_PORT(4);
1094-
reg = core_readl(priv, offset);
1095-
*port = cpu_to_be32(reg) >> 8;
1096-
tmp = (u32)(reg & 0xff) << 8;
1097-
1098-
/* UDF_n_B7 (lower) [31:24] (addr[7:0])
1099-
* UDF_n_B6 [23:8] (addr[31:16])
1100-
* UDF_n_B5 (upper) [7:0] (addr[47:40])
1101-
*/
1102-
if (mask)
1103-
offset = CORE_CFP_MASK_PORT(3);
1104-
else
1105-
offset = CORE_CFP_DATA_PORT(3);
1106-
reg = core_readl(priv, offset);
1107-
tmp |= (reg >> 24) & 0xff;
1108-
tmp |= (u32)((reg >> 8) << 16);
1109-
ip6_addr[3] = cpu_to_be32(tmp);
1110-
tmp = (u32)(reg & 0xff) << 8;
1111-
1112-
/* UDF_n_B5 (lower) [31:24] (addr[39:32])
1113-
* UDF_n_B4 [23:8] (addr[63:48])
1114-
* UDF_n_B3 (upper) [7:0] (addr[79:72])
1115-
*/
1116-
if (mask)
1117-
offset = CORE_CFP_MASK_PORT(2);
1118-
else
1119-
offset = CORE_CFP_DATA_PORT(2);
1120-
reg = core_readl(priv, offset);
1121-
tmp |= (reg >> 24) & 0xff;
1122-
tmp |= (u32)((reg >> 8) << 16);
1123-
ip6_addr[2] = cpu_to_be32(tmp);
1124-
tmp = (u32)(reg & 0xff) << 8;
1125-
1126-
/* UDF_n_B3 (lower) [31:24] (addr[71:64])
1127-
* UDF_n_B2 [23:8] (addr[95:80])
1128-
* UDF_n_B1 (upper) [7:0] (addr[111:104])
1129-
*/
1130-
if (mask)
1131-
offset = CORE_CFP_MASK_PORT(1);
1132-
else
1133-
offset = CORE_CFP_DATA_PORT(1);
1134-
reg = core_readl(priv, offset);
1135-
tmp |= (reg >> 24) & 0xff;
1136-
tmp |= (u32)((reg >> 8) << 16);
1137-
ip6_addr[1] = cpu_to_be32(tmp);
1138-
tmp = (u32)(reg & 0xff) << 8;
1139-
1140-
/* UDF_n_B1 (lower) [31:24] (addr[103:96])
1141-
* UDF_n_B0 [23:8] (addr[127:112])
1142-
* Reserved [7:4]
1143-
* Slice ID [3:2]
1144-
* Slice valid [1:0]
1145-
*/
1146-
if (mask)
1147-
offset = CORE_CFP_MASK_PORT(0);
1148-
else
1149-
offset = CORE_CFP_DATA_PORT(0);
1150-
reg = core_readl(priv, offset);
1151-
tmp |= (reg >> 24) & 0xff;
1152-
tmp |= (u32)((reg >> 8) << 16);
1153-
ip6_addr[0] = cpu_to_be32(tmp);
1154-
1155-
if (!mask && !(reg & SLICE_VALID))
1156-
return -EINVAL;
1157-
1158-
return 0;
1159-
}
1160-
1161-
static int __maybe_unused bcm_sf2_cfp_ipv6_rule_get(struct bcm_sf2_priv *priv,
1162-
int port,
1163-
struct ethtool_rx_flow_spec *fs,
1164-
u32 next_loc)
1165-
{
1166-
struct ethtool_tcpip6_spec *v6_spec = NULL, *v6_m_spec = NULL;
1167-
u32 reg;
1168-
int ret;
1169-
1170-
/* UDPv6 and TCPv6 both use ethtool_tcpip6_spec so we are fine
1171-
* assuming tcp_ip6_spec here being an union.
1172-
*/
1173-
v6_spec = &fs->h_u.tcp_ip6_spec;
1174-
v6_m_spec = &fs->m_u.tcp_ip6_spec;
1175-
1176-
/* Read the second half first */
1177-
ret = bcm_sf2_cfp_unslice_ipv6(priv, v6_spec->ip6dst, &v6_spec->pdst,
1178-
false);
1179-
if (ret)
1180-
return ret;
1181-
1182-
ret = bcm_sf2_cfp_unslice_ipv6(priv, v6_m_spec->ip6dst,
1183-
&v6_m_spec->pdst, true);
1184-
if (ret)
1185-
return ret;
1186-
1187-
/* Read last to avoid next entry clobbering the results during search
1188-
* operations. We would not have the port enabled for this rule, so
1189-
* don't bother checking it.
1190-
*/
1191-
(void)core_readl(priv, CORE_CFP_DATA_PORT(7));
1192-
1193-
/* The slice number is valid, so read the rule we are chained from now
1194-
* which is our first half.
1195-
*/
1196-
bcm_sf2_cfp_rule_addr_set(priv, next_loc);
1197-
ret = bcm_sf2_cfp_op(priv, OP_SEL_READ | TCAM_SEL);
1198-
if (ret)
1199-
return ret;
1200-
1201-
reg = core_readl(priv, CORE_CFP_DATA_PORT(6));
1202-
1203-
switch ((reg & IPPROTO_MASK) >> IPPROTO_SHIFT) {
1204-
case IPPROTO_TCP:
1205-
fs->flow_type = TCP_V6_FLOW;
1206-
break;
1207-
case IPPROTO_UDP:
1208-
fs->flow_type = UDP_V6_FLOW;
1209-
break;
1210-
default:
1211-
return -EINVAL;
1212-
}
1213-
1214-
ret = bcm_sf2_cfp_unslice_ipv6(priv, v6_spec->ip6src, &v6_spec->psrc,
1215-
false);
1216-
if (ret)
1217-
return ret;
1218-
1219-
return bcm_sf2_cfp_unslice_ipv6(priv, v6_m_spec->ip6src,
1220-
&v6_m_spec->psrc, true);
1221-
}
1222-
1223-
static int __maybe_unused bcm_sf2_cfp_rule_get_hw(struct bcm_sf2_priv *priv,
1224-
int port,
1225-
struct ethtool_rxnfc *nfc)
1226-
{
1227-
u32 reg, ipv4_or_chain_id;
1228-
unsigned int queue_num;
1229-
int ret;
1230-
1231-
bcm_sf2_cfp_rule_addr_set(priv, nfc->fs.location);
1232-
1233-
ret = bcm_sf2_cfp_op(priv, OP_SEL_READ | ACT_POL_RAM);
1234-
if (ret)
1235-
return ret;
1236-
1237-
reg = core_readl(priv, CORE_ACT_POL_DATA0);
1238-
1239-
ret = bcm_sf2_cfp_op(priv, OP_SEL_READ | TCAM_SEL);
1240-
if (ret)
1241-
return ret;
1242-
1243-
/* Extract the destination port */
1244-
nfc->fs.ring_cookie = fls((reg >> DST_MAP_IB_SHIFT) &
1245-
DST_MAP_IB_MASK) - 1;
1246-
1247-
/* There is no Port 6, so we compensate for that here */
1248-
if (nfc->fs.ring_cookie >= 6)
1249-
nfc->fs.ring_cookie++;
1250-
nfc->fs.ring_cookie *= SF2_NUM_EGRESS_QUEUES;
1251-
1252-
/* Extract the destination queue */
1253-
queue_num = (reg >> NEW_TC_SHIFT) & NEW_TC_MASK;
1254-
nfc->fs.ring_cookie += queue_num;
1255-
1256-
/* Extract the L3_FRAMING or CHAIN_ID */
1257-
reg = core_readl(priv, CORE_CFP_DATA_PORT(6));
1258-
1259-
/* With IPv6 rules this would contain a non-zero chain ID since
1260-
* we reserve entry 0 and it cannot be used. So if we read 0 here
1261-
* this means an IPv4 rule.
1262-
*/
1263-
ipv4_or_chain_id = (reg >> L3_FRAMING_SHIFT) & 0xff;
1264-
if (ipv4_or_chain_id == 0)
1265-
ret = bcm_sf2_cfp_ipv4_rule_get(priv, port, &nfc->fs);
1266-
else
1267-
ret = bcm_sf2_cfp_ipv6_rule_get(priv, port, &nfc->fs,
1268-
ipv4_or_chain_id);
1269-
if (ret)
1270-
return ret;
1271-
1272-
/* Read last to avoid next entry clobbering the results during search
1273-
* operations
1274-
*/
1275-
reg = core_readl(priv, CORE_CFP_DATA_PORT(7));
1276-
if (!(reg & 1 << port))
1277-
return -EINVAL;
1278-
1279-
bcm_sf2_invert_masks(&nfc->fs);
1280-
1281-
/* Put the TCAM size here */
1282-
nfc->data = bcm_sf2_cfp_rule_size(priv);
1283-
1284-
return 0;
1285-
}
1286-
1287977
static int bcm_sf2_cfp_rule_get(struct bcm_sf2_priv *priv, int port,
1288978
struct ethtool_rxnfc *nfc)
1289979
{

0 commit comments

Comments
 (0)