Skip to content

Commit ab677ff

Browse files
Hariprasad Shenaidavem330
authored andcommitted
cxgb4: Allocate Tx queues dynamically
Allocate resources dynamically for Upper layer driver's (ULD) like cxgbit, iw_cxgb4, cxgb4i and chcr. The resources allocated include Tx queues which are allocated when ULD register with cxgb4 driver and freed while un-registering. The Tx queues which are shared by ULD shall be allocated by first registering driver and un-allocated by last unregistering driver. Signed-off-by: Atul Gupta <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent c816061 commit ab677ff

File tree

11 files changed

+287
-82
lines changed

11 files changed

+287
-82
lines changed

drivers/crypto/chelsio/chcr_algo.c

Lines changed: 9 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -592,16 +592,18 @@ static int chcr_aes_cbc_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
592592

593593
static int cxgb4_is_crypto_q_full(struct net_device *dev, unsigned int idx)
594594
{
595-
int ret = 0;
596-
struct sge_ofld_txq *q;
597595
struct adapter *adap = netdev2adap(dev);
596+
struct sge_uld_txq_info *txq_info =
597+
adap->sge.uld_txq_info[CXGB4_TX_CRYPTO];
598+
struct sge_uld_txq *txq;
599+
int ret = 0;
598600

599601
local_bh_disable();
600-
q = &adap->sge.ofldtxq[idx];
601-
spin_lock(&q->sendq.lock);
602-
if (q->full)
602+
txq = &txq_info->uldtxq[idx];
603+
spin_lock(&txq->sendq.lock);
604+
if (txq->full)
603605
ret = -1;
604-
spin_unlock(&q->sendq.lock);
606+
spin_unlock(&txq->sendq.lock);
605607
local_bh_enable();
606608
return ret;
607609
}
@@ -674,11 +676,11 @@ static int chcr_device_init(struct chcr_context *ctx)
674676
}
675677
u_ctx = ULD_CTX(ctx);
676678
rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan;
677-
ctx->dev->tx_channel_id = 0;
678679
rxq_idx = ctx->dev->tx_channel_id * rxq_perchan;
679680
rxq_idx += id % rxq_perchan;
680681
spin_lock(&ctx->dev->lock_chcr_dev);
681682
ctx->tx_channel_id = rxq_idx;
683+
ctx->dev->tx_channel_id = !ctx->dev->tx_channel_id;
682684
spin_unlock(&ctx->dev->lock_chcr_dev);
683685
}
684686
out:

drivers/crypto/chelsio/chcr_core.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -42,6 +42,7 @@ static chcr_handler_func work_handlers[NUM_CPL_CMDS] = {
4242
static struct cxgb4_uld_info chcr_uld_info = {
4343
.name = DRV_MODULE_NAME,
4444
.nrxq = MAX_ULD_QSETS,
45+
.ntxq = MAX_ULD_QSETS,
4546
.rxq_size = 1024,
4647
.add = chcr_uld_add,
4748
.state_change = chcr_uld_state_change,
@@ -126,7 +127,7 @@ static int cpl_fw6_pld_handler(struct chcr_dev *dev,
126127

127128
int chcr_send_wr(struct sk_buff *skb)
128129
{
129-
return cxgb4_ofld_send(skb->dev, skb);
130+
return cxgb4_crypto_send(skb->dev, skb);
130131
}
131132

132133
static void *chcr_uld_add(const struct cxgb4_lld_info *lld)

drivers/infiniband/hw/cxgb4/device.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1481,6 +1481,7 @@ static int c4iw_uld_control(void *handle, enum cxgb4_control control, ...)
14811481
static struct cxgb4_uld_info c4iw_uld_info = {
14821482
.name = DRV_NAME,
14831483
.nrxq = MAX_ULD_QSETS,
1484+
.ntxq = MAX_ULD_QSETS,
14841485
.rxq_size = 511,
14851486
.ciq = true,
14861487
.lro = false,

drivers/net/ethernet/chelsio/cxgb4/cxgb4.h

Lines changed: 15 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -635,6 +635,7 @@ struct tx_sw_desc;
635635

636636
struct sge_txq {
637637
unsigned int in_use; /* # of in-use Tx descriptors */
638+
unsigned int q_type; /* Q type Eth/Ctrl/Ofld */
638639
unsigned int size; /* # of descriptors */
639640
unsigned int cidx; /* SW consumer index */
640641
unsigned int pidx; /* producer index */
@@ -665,7 +666,7 @@ struct sge_eth_txq { /* state for an SGE Ethernet Tx queue */
665666
unsigned long mapping_err; /* # of I/O MMU packet mapping errors */
666667
} ____cacheline_aligned_in_smp;
667668

668-
struct sge_ofld_txq { /* state for an SGE offload Tx queue */
669+
struct sge_uld_txq { /* state for an SGE offload Tx queue */
669670
struct sge_txq q;
670671
struct adapter *adap;
671672
struct sk_buff_head sendq; /* list of backpressured packets */
@@ -693,14 +694,20 @@ struct sge_uld_rxq_info {
693694
u8 uld; /* uld type */
694695
};
695696

697+
struct sge_uld_txq_info {
698+
struct sge_uld_txq *uldtxq; /* Txq's for ULD */
699+
atomic_t users; /* num users */
700+
u16 ntxq; /* # of egress uld queues */
701+
};
702+
696703
struct sge {
697704
struct sge_eth_txq ethtxq[MAX_ETH_QSETS];
698-
struct sge_ofld_txq ofldtxq[MAX_OFLD_QSETS];
699705
struct sge_ctrl_txq ctrlq[MAX_CTRL_QUEUES];
700706

701707
struct sge_eth_rxq ethrxq[MAX_ETH_QSETS];
702708
struct sge_rspq fw_evtq ____cacheline_aligned_in_smp;
703709
struct sge_uld_rxq_info **uld_rxq_info;
710+
struct sge_uld_txq_info **uld_txq_info;
704711

705712
struct sge_rspq intrq ____cacheline_aligned_in_smp;
706713
spinlock_t intrq_lock;
@@ -1298,8 +1305,9 @@ int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
12981305
unsigned int cmplqid);
12991306
int t4_sge_mod_ctrl_txq(struct adapter *adap, unsigned int eqid,
13001307
unsigned int cmplqid);
1301-
int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq,
1302-
struct net_device *dev, unsigned int iqid);
1308+
int t4_sge_alloc_uld_txq(struct adapter *adap, struct sge_uld_txq *txq,
1309+
struct net_device *dev, unsigned int iqid,
1310+
unsigned int uld_type);
13031311
irqreturn_t t4_sge_intr_msix(int irq, void *cookie);
13041312
int t4_sge_init(struct adapter *adap);
13051313
void t4_sge_start(struct adapter *adap);
@@ -1661,4 +1669,7 @@ int t4_uld_mem_alloc(struct adapter *adap);
16611669
void t4_uld_clean_up(struct adapter *adap);
16621670
void t4_register_netevent_notifier(void);
16631671
void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq, struct sge_fl *fl);
1672+
void free_tx_desc(struct adapter *adap, struct sge_txq *q,
1673+
unsigned int n, bool unmap);
1674+
void free_txq(struct adapter *adap, struct sge_txq *q);
16641675
#endif /* __CXGB4_H__ */

drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c

Lines changed: 0 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -2512,18 +2512,6 @@ do { \
25122512
RL("FLLow:", fl.low);
25132513
RL("FLStarving:", fl.starving);
25142514

2515-
} else if (ofld_idx < ofld_entries) {
2516-
const struct sge_ofld_txq *tx =
2517-
&adap->sge.ofldtxq[ofld_idx * 4];
2518-
int n = min(4, adap->sge.ofldqsets - 4 * ofld_idx);
2519-
2520-
S("QType:", "OFLD-Txq");
2521-
T("TxQ ID:", q.cntxt_id);
2522-
T("TxQ size:", q.size);
2523-
T("TxQ inuse:", q.in_use);
2524-
T("TxQ CIDX:", q.cidx);
2525-
T("TxQ PIDX:", q.pidx);
2526-
25272515
} else if (ctrl_idx < ctrl_entries) {
25282516
const struct sge_ctrl_txq *tx = &adap->sge.ctrlq[ctrl_idx * 4];
25292517
int n = min(4, adap->params.nports - 4 * ctrl_idx);

drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c

Lines changed: 40 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -530,15 +530,15 @@ static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
530530

531531
txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start];
532532
txq->restarts++;
533-
if ((u8 *)txq < (u8 *)q->adap->sge.ofldtxq) {
533+
if (txq->q_type == CXGB4_TXQ_ETH) {
534534
struct sge_eth_txq *eq;
535535

536536
eq = container_of(txq, struct sge_eth_txq, q);
537537
netif_tx_wake_queue(eq->txq);
538538
} else {
539-
struct sge_ofld_txq *oq;
539+
struct sge_uld_txq *oq;
540540

541-
oq = container_of(txq, struct sge_ofld_txq, q);
541+
oq = container_of(txq, struct sge_uld_txq, q);
542542
tasklet_schedule(&oq->qresume_tsk);
543543
}
544544
} else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) {
@@ -885,15 +885,6 @@ static int setup_sge_queues(struct adapter *adap)
885885
}
886886
}
887887

888-
j = s->ofldqsets / adap->params.nports; /* iscsi queues per channel */
889-
for_each_ofldtxq(s, i) {
890-
err = t4_sge_alloc_ofld_txq(adap, &s->ofldtxq[i],
891-
adap->port[i / j],
892-
s->fw_evtq.cntxt_id);
893-
if (err)
894-
goto freeout;
895-
}
896-
897888
for_each_port(adap, i) {
898889
/* Note that cmplqid below is 0 if we don't
899890
* have RDMA queues, and that's the right value.
@@ -1922,8 +1913,18 @@ static void disable_dbs(struct adapter *adap)
19221913

19231914
for_each_ethrxq(&adap->sge, i)
19241915
disable_txq_db(&adap->sge.ethtxq[i].q);
1925-
for_each_ofldtxq(&adap->sge, i)
1926-
disable_txq_db(&adap->sge.ofldtxq[i].q);
1916+
if (is_offload(adap)) {
1917+
struct sge_uld_txq_info *txq_info =
1918+
adap->sge.uld_txq_info[CXGB4_TX_OFLD];
1919+
1920+
if (txq_info) {
1921+
for_each_ofldtxq(&adap->sge, i) {
1922+
struct sge_uld_txq *txq = &txq_info->uldtxq[i];
1923+
1924+
disable_txq_db(&txq->q);
1925+
}
1926+
}
1927+
}
19271928
for_each_port(adap, i)
19281929
disable_txq_db(&adap->sge.ctrlq[i].q);
19291930
}
@@ -1934,8 +1935,18 @@ static void enable_dbs(struct adapter *adap)
19341935

19351936
for_each_ethrxq(&adap->sge, i)
19361937
enable_txq_db(adap, &adap->sge.ethtxq[i].q);
1937-
for_each_ofldtxq(&adap->sge, i)
1938-
enable_txq_db(adap, &adap->sge.ofldtxq[i].q);
1938+
if (is_offload(adap)) {
1939+
struct sge_uld_txq_info *txq_info =
1940+
adap->sge.uld_txq_info[CXGB4_TX_OFLD];
1941+
1942+
if (txq_info) {
1943+
for_each_ofldtxq(&adap->sge, i) {
1944+
struct sge_uld_txq *txq = &txq_info->uldtxq[i];
1945+
1946+
enable_txq_db(adap, &txq->q);
1947+
}
1948+
}
1949+
}
19391950
for_each_port(adap, i)
19401951
enable_txq_db(adap, &adap->sge.ctrlq[i].q);
19411952
}
@@ -2006,8 +2017,17 @@ static void recover_all_queues(struct adapter *adap)
20062017

20072018
for_each_ethrxq(&adap->sge, i)
20082019
sync_txq_pidx(adap, &adap->sge.ethtxq[i].q);
2009-
for_each_ofldtxq(&adap->sge, i)
2010-
sync_txq_pidx(adap, &adap->sge.ofldtxq[i].q);
2020+
if (is_offload(adap)) {
2021+
struct sge_uld_txq_info *txq_info =
2022+
adap->sge.uld_txq_info[CXGB4_TX_OFLD];
2023+
if (txq_info) {
2024+
for_each_ofldtxq(&adap->sge, i) {
2025+
struct sge_uld_txq *txq = &txq_info->uldtxq[i];
2026+
2027+
sync_txq_pidx(adap, &txq->q);
2028+
}
2029+
}
2030+
}
20112031
for_each_port(adap, i)
20122032
sync_txq_pidx(adap, &adap->sge.ctrlq[i].q);
20132033
}
@@ -3991,7 +4011,7 @@ static inline bool is_x_10g_port(const struct link_config *lc)
39914011
static void cfg_queues(struct adapter *adap)
39924012
{
39934013
struct sge *s = &adap->sge;
3994-
int i, n10g = 0, qidx = 0;
4014+
int i = 0, n10g = 0, qidx = 0;
39954015
#ifndef CONFIG_CHELSIO_T4_DCB
39964016
int q10g = 0;
39974017
#endif
@@ -4006,8 +4026,7 @@ static void cfg_queues(struct adapter *adap)
40064026
adap->params.crypto = 0;
40074027
}
40084028

4009-
for_each_port(adap, i)
4010-
n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg);
4029+
n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg);
40114030
#ifdef CONFIG_CHELSIO_T4_DCB
40124031
/* For Data Center Bridging support we need to be able to support up
40134032
* to 8 Traffic Priorities; each of which will be assigned to its
@@ -4075,9 +4094,6 @@ static void cfg_queues(struct adapter *adap)
40754094
for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++)
40764095
s->ctrlq[i].q.size = 512;
40774096

4078-
for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++)
4079-
s->ofldtxq[i].q.size = 1024;
4080-
40814097
init_rspq(adap, &s->fw_evtq, 0, 1, 1024, 64);
40824098
init_rspq(adap, &s->intrq, 0, 1, 512, 64);
40834099
}

0 commit comments

Comments
 (0)