Skip to content

Commit a484fe8

Browse files
committed
Merge branch 'enable-multiple-irq-lines-support-in-airoha_eth-driver'
Lorenzo Bianconi says: ==================== Enable multiple IRQ lines support in airoha_eth driver EN7581 ethernet SoC supports 4 programmable IRQ lines each one composed by 4 IRQ configuration registers to map Tx/Rx queues. Enable multiple IRQ lines support. ==================== Link: https://patch.msgid.link/[email protected] Signed-off-by: Jakub Kicinski <[email protected]>
2 parents cd7276e + f252493 commit a484fe8

File tree

3 files changed

+283
-94
lines changed

3 files changed

+283
-94
lines changed

drivers/net/ethernet/airoha/airoha_eth.c

Lines changed: 108 additions & 49 deletions
Original file line numberDiff line numberDiff line change
@@ -34,37 +34,40 @@ u32 airoha_rmw(void __iomem *base, u32 offset, u32 mask, u32 val)
3434
return val;
3535
}
3636

37-
static void airoha_qdma_set_irqmask(struct airoha_qdma *qdma, int index,
38-
u32 clear, u32 set)
37+
static void airoha_qdma_set_irqmask(struct airoha_irq_bank *irq_bank,
38+
int index, u32 clear, u32 set)
3939
{
40+
struct airoha_qdma *qdma = irq_bank->qdma;
41+
int bank = irq_bank - &qdma->irq_banks[0];
4042
unsigned long flags;
4143

42-
if (WARN_ON_ONCE(index >= ARRAY_SIZE(qdma->irqmask)))
44+
if (WARN_ON_ONCE(index >= ARRAY_SIZE(irq_bank->irqmask)))
4345
return;
4446

45-
spin_lock_irqsave(&qdma->irq_lock, flags);
47+
spin_lock_irqsave(&irq_bank->irq_lock, flags);
4648

47-
qdma->irqmask[index] &= ~clear;
48-
qdma->irqmask[index] |= set;
49-
airoha_qdma_wr(qdma, REG_INT_ENABLE(index), qdma->irqmask[index]);
49+
irq_bank->irqmask[index] &= ~clear;
50+
irq_bank->irqmask[index] |= set;
51+
airoha_qdma_wr(qdma, REG_INT_ENABLE(bank, index),
52+
irq_bank->irqmask[index]);
5053
/* Read irq_enable register in order to guarantee the update above
5154
* completes in the spinlock critical section.
5255
*/
53-
airoha_qdma_rr(qdma, REG_INT_ENABLE(index));
56+
airoha_qdma_rr(qdma, REG_INT_ENABLE(bank, index));
5457

55-
spin_unlock_irqrestore(&qdma->irq_lock, flags);
58+
spin_unlock_irqrestore(&irq_bank->irq_lock, flags);
5659
}
5760

58-
static void airoha_qdma_irq_enable(struct airoha_qdma *qdma, int index,
59-
u32 mask)
61+
static void airoha_qdma_irq_enable(struct airoha_irq_bank *irq_bank,
62+
int index, u32 mask)
6063
{
61-
airoha_qdma_set_irqmask(qdma, index, 0, mask);
64+
airoha_qdma_set_irqmask(irq_bank, index, 0, mask);
6265
}
6366

64-
static void airoha_qdma_irq_disable(struct airoha_qdma *qdma, int index,
65-
u32 mask)
67+
static void airoha_qdma_irq_disable(struct airoha_irq_bank *irq_bank,
68+
int index, u32 mask)
6669
{
67-
airoha_qdma_set_irqmask(qdma, index, mask, 0);
70+
airoha_qdma_set_irqmask(irq_bank, index, mask, 0);
6871
}
6972

7073
static bool airhoa_is_lan_gdm_port(struct airoha_gdm_port *port)
@@ -739,9 +742,20 @@ static int airoha_qdma_rx_napi_poll(struct napi_struct *napi, int budget)
739742
done += cur;
740743
} while (cur && done < budget);
741744

742-
if (done < budget && napi_complete(napi))
743-
airoha_qdma_irq_enable(q->qdma, QDMA_INT_REG_IDX1,
744-
RX_DONE_INT_MASK);
745+
if (done < budget && napi_complete(napi)) {
746+
struct airoha_qdma *qdma = q->qdma;
747+
int i, qid = q - &qdma->q_rx[0];
748+
int intr_reg = qid < RX_DONE_HIGH_OFFSET ? QDMA_INT_REG_IDX1
749+
: QDMA_INT_REG_IDX2;
750+
751+
for (i = 0; i < ARRAY_SIZE(qdma->irq_banks); i++) {
752+
if (!(BIT(qid) & RX_IRQ_BANK_PIN_MASK(i)))
753+
continue;
754+
755+
airoha_qdma_irq_enable(&qdma->irq_banks[i], intr_reg,
756+
BIT(qid % RX_DONE_HIGH_OFFSET));
757+
}
758+
}
745759

746760
return done;
747761
}
@@ -944,7 +958,7 @@ static int airoha_qdma_tx_napi_poll(struct napi_struct *napi, int budget)
944958
}
945959

946960
if (done < budget && napi_complete(napi))
947-
airoha_qdma_irq_enable(qdma, QDMA_INT_REG_IDX0,
961+
airoha_qdma_irq_enable(&qdma->irq_banks[0], QDMA_INT_REG_IDX0,
948962
TX_DONE_INT_MASK(id));
949963

950964
return done;
@@ -1174,14 +1188,24 @@ static int airoha_qdma_hw_init(struct airoha_qdma *qdma)
11741188
{
11751189
int i;
11761190

1177-
/* clear pending irqs */
1178-
for (i = 0; i < ARRAY_SIZE(qdma->irqmask); i++)
1191+
for (i = 0; i < ARRAY_SIZE(qdma->irq_banks); i++) {
1192+
/* clear pending irqs */
11791193
airoha_qdma_wr(qdma, REG_INT_STATUS(i), 0xffffffff);
1180-
1181-
/* setup irqs */
1182-
airoha_qdma_irq_enable(qdma, QDMA_INT_REG_IDX0, INT_IDX0_MASK);
1183-
airoha_qdma_irq_enable(qdma, QDMA_INT_REG_IDX1, INT_IDX1_MASK);
1184-
airoha_qdma_irq_enable(qdma, QDMA_INT_REG_IDX4, INT_IDX4_MASK);
1194+
/* setup rx irqs */
1195+
airoha_qdma_irq_enable(&qdma->irq_banks[i], QDMA_INT_REG_IDX0,
1196+
INT_RX0_MASK(RX_IRQ_BANK_PIN_MASK(i)));
1197+
airoha_qdma_irq_enable(&qdma->irq_banks[i], QDMA_INT_REG_IDX1,
1198+
INT_RX1_MASK(RX_IRQ_BANK_PIN_MASK(i)));
1199+
airoha_qdma_irq_enable(&qdma->irq_banks[i], QDMA_INT_REG_IDX2,
1200+
INT_RX2_MASK(RX_IRQ_BANK_PIN_MASK(i)));
1201+
airoha_qdma_irq_enable(&qdma->irq_banks[i], QDMA_INT_REG_IDX3,
1202+
INT_RX3_MASK(RX_IRQ_BANK_PIN_MASK(i)));
1203+
}
1204+
/* setup tx irqs */
1205+
airoha_qdma_irq_enable(&qdma->irq_banks[0], QDMA_INT_REG_IDX0,
1206+
TX_COHERENT_LOW_INT_MASK | INT_TX_MASK);
1207+
airoha_qdma_irq_enable(&qdma->irq_banks[0], QDMA_INT_REG_IDX4,
1208+
TX_COHERENT_HIGH_INT_MASK);
11851209

11861210
/* setup irq binding */
11871211
for (i = 0; i < ARRAY_SIZE(qdma->q_tx); i++) {
@@ -1226,38 +1250,47 @@ static int airoha_qdma_hw_init(struct airoha_qdma *qdma)
12261250

12271251
static irqreturn_t airoha_irq_handler(int irq, void *dev_instance)
12281252
{
1229-
struct airoha_qdma *qdma = dev_instance;
1230-
u32 intr[ARRAY_SIZE(qdma->irqmask)];
1253+
struct airoha_irq_bank *irq_bank = dev_instance;
1254+
struct airoha_qdma *qdma = irq_bank->qdma;
1255+
u32 rx_intr_mask = 0, rx_intr1, rx_intr2;
1256+
u32 intr[ARRAY_SIZE(irq_bank->irqmask)];
12311257
int i;
12321258

1233-
for (i = 0; i < ARRAY_SIZE(qdma->irqmask); i++) {
1259+
for (i = 0; i < ARRAY_SIZE(intr); i++) {
12341260
intr[i] = airoha_qdma_rr(qdma, REG_INT_STATUS(i));
1235-
intr[i] &= qdma->irqmask[i];
1261+
intr[i] &= irq_bank->irqmask[i];
12361262
airoha_qdma_wr(qdma, REG_INT_STATUS(i), intr[i]);
12371263
}
12381264

12391265
if (!test_bit(DEV_STATE_INITIALIZED, &qdma->eth->state))
12401266
return IRQ_NONE;
12411267

1242-
if (intr[1] & RX_DONE_INT_MASK) {
1243-
airoha_qdma_irq_disable(qdma, QDMA_INT_REG_IDX1,
1244-
RX_DONE_INT_MASK);
1268+
rx_intr1 = intr[1] & RX_DONE_LOW_INT_MASK;
1269+
if (rx_intr1) {
1270+
airoha_qdma_irq_disable(irq_bank, QDMA_INT_REG_IDX1, rx_intr1);
1271+
rx_intr_mask |= rx_intr1;
1272+
}
1273+
1274+
rx_intr2 = intr[2] & RX_DONE_HIGH_INT_MASK;
1275+
if (rx_intr2) {
1276+
airoha_qdma_irq_disable(irq_bank, QDMA_INT_REG_IDX2, rx_intr2);
1277+
rx_intr_mask |= (rx_intr2 << 16);
1278+
}
12451279

1246-
for (i = 0; i < ARRAY_SIZE(qdma->q_rx); i++) {
1247-
if (!qdma->q_rx[i].ndesc)
1248-
continue;
1280+
for (i = 0; rx_intr_mask && i < ARRAY_SIZE(qdma->q_rx); i++) {
1281+
if (!qdma->q_rx[i].ndesc)
1282+
continue;
12491283

1250-
if (intr[1] & BIT(i))
1251-
napi_schedule(&qdma->q_rx[i].napi);
1252-
}
1284+
if (rx_intr_mask & BIT(i))
1285+
napi_schedule(&qdma->q_rx[i].napi);
12531286
}
12541287

12551288
if (intr[0] & INT_TX_MASK) {
12561289
for (i = 0; i < ARRAY_SIZE(qdma->q_tx_irq); i++) {
12571290
if (!(intr[0] & TX_DONE_INT_MASK(i)))
12581291
continue;
12591292

1260-
airoha_qdma_irq_disable(qdma, QDMA_INT_REG_IDX0,
1293+
airoha_qdma_irq_disable(irq_bank, QDMA_INT_REG_IDX0,
12611294
TX_DONE_INT_MASK(i));
12621295
napi_schedule(&qdma->q_tx_irq[i].napi);
12631296
}
@@ -1266,16 +1299,47 @@ static irqreturn_t airoha_irq_handler(int irq, void *dev_instance)
12661299
return IRQ_HANDLED;
12671300
}
12681301

1302+
static int airoha_qdma_init_irq_banks(struct platform_device *pdev,
1303+
struct airoha_qdma *qdma)
1304+
{
1305+
struct airoha_eth *eth = qdma->eth;
1306+
int i, id = qdma - &eth->qdma[0];
1307+
1308+
for (i = 0; i < ARRAY_SIZE(qdma->irq_banks); i++) {
1309+
struct airoha_irq_bank *irq_bank = &qdma->irq_banks[i];
1310+
int err, irq_index = 4 * id + i;
1311+
const char *name;
1312+
1313+
spin_lock_init(&irq_bank->irq_lock);
1314+
irq_bank->qdma = qdma;
1315+
1316+
irq_bank->irq = platform_get_irq(pdev, irq_index);
1317+
if (irq_bank->irq < 0)
1318+
return irq_bank->irq;
1319+
1320+
name = devm_kasprintf(eth->dev, GFP_KERNEL,
1321+
KBUILD_MODNAME ".%d", irq_index);
1322+
if (!name)
1323+
return -ENOMEM;
1324+
1325+
err = devm_request_irq(eth->dev, irq_bank->irq,
1326+
airoha_irq_handler, IRQF_SHARED, name,
1327+
irq_bank);
1328+
if (err)
1329+
return err;
1330+
}
1331+
1332+
return 0;
1333+
}
1334+
12691335
static int airoha_qdma_init(struct platform_device *pdev,
12701336
struct airoha_eth *eth,
12711337
struct airoha_qdma *qdma)
12721338
{
12731339
int err, id = qdma - &eth->qdma[0];
12741340
const char *res;
12751341

1276-
spin_lock_init(&qdma->irq_lock);
12771342
qdma->eth = eth;
1278-
12791343
res = devm_kasprintf(eth->dev, GFP_KERNEL, "qdma%d", id);
12801344
if (!res)
12811345
return -ENOMEM;
@@ -1285,12 +1349,7 @@ static int airoha_qdma_init(struct platform_device *pdev,
12851349
return dev_err_probe(eth->dev, PTR_ERR(qdma->regs),
12861350
"failed to iomap qdma%d regs\n", id);
12871351

1288-
qdma->irq = platform_get_irq(pdev, 4 * id);
1289-
if (qdma->irq < 0)
1290-
return qdma->irq;
1291-
1292-
err = devm_request_irq(eth->dev, qdma->irq, airoha_irq_handler,
1293-
IRQF_SHARED, KBUILD_MODNAME, qdma);
1352+
err = airoha_qdma_init_irq_banks(pdev, qdma);
12941353
if (err)
12951354
return err;
12961355

@@ -2784,7 +2843,7 @@ static int airoha_alloc_gdm_port(struct airoha_eth *eth,
27842843
dev->features |= dev->hw_features;
27852844
dev->vlan_features = dev->hw_features;
27862845
dev->dev.of_node = np;
2787-
dev->irq = qdma->irq;
2846+
dev->irq = qdma->irq_banks[0].irq;
27882847
SET_NETDEV_DEV(dev, eth->dev);
27892848

27902849
/* reserve hw queues for HTB offloading */

drivers/net/ethernet/airoha/airoha_eth.h

Lines changed: 21 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@
1717

1818
#define AIROHA_MAX_NUM_GDM_PORTS 4
1919
#define AIROHA_MAX_NUM_QDMA 2
20+
#define AIROHA_MAX_NUM_IRQ_BANKS 4
2021
#define AIROHA_MAX_DSA_PORTS 7
2122
#define AIROHA_MAX_NUM_RSTS 3
2223
#define AIROHA_MAX_NUM_XSI_RSTS 5
@@ -452,17 +453,34 @@ struct airoha_flow_table_entry {
452453
unsigned long cookie;
453454
};
454455

455-
struct airoha_qdma {
456-
struct airoha_eth *eth;
457-
void __iomem *regs;
456+
/* RX queue to IRQ mapping: BIT(q) in IRQ(n) */
457+
#define RX_IRQ0_BANK_PIN_MASK 0x839f
458+
#define RX_IRQ1_BANK_PIN_MASK 0x7fe00000
459+
#define RX_IRQ2_BANK_PIN_MASK 0x20
460+
#define RX_IRQ3_BANK_PIN_MASK 0x40
461+
#define RX_IRQ_BANK_PIN_MASK(_n) \
462+
(((_n) == 3) ? RX_IRQ3_BANK_PIN_MASK : \
463+
((_n) == 2) ? RX_IRQ2_BANK_PIN_MASK : \
464+
((_n) == 1) ? RX_IRQ1_BANK_PIN_MASK : \
465+
RX_IRQ0_BANK_PIN_MASK)
466+
467+
struct airoha_irq_bank {
468+
struct airoha_qdma *qdma;
458469

459470
/* protect concurrent irqmask accesses */
460471
spinlock_t irq_lock;
461472
u32 irqmask[QDMA_INT_REG_MAX];
462473
int irq;
474+
};
475+
476+
struct airoha_qdma {
477+
struct airoha_eth *eth;
478+
void __iomem *regs;
463479

464480
atomic_t users;
465481

482+
struct airoha_irq_bank irq_banks[AIROHA_MAX_NUM_IRQ_BANKS];
483+
466484
struct airoha_tx_irq_queue q_tx_irq[AIROHA_NUM_TX_IRQ];
467485

468486
struct airoha_queue q_tx[AIROHA_NUM_TX_RING];

0 commit comments

Comments
 (0)