Skip to content

Commit 869cec9

Browse files
committed
Merge branch 'liquidio-adding-support-for-ethtool-set-channels-feature'
Intiyaz Basha says: ==================== liquidio: adding support for ethtool --set-channels feature Code reorganization is required for adding ethtool --set-channels feature. First three patches are for code reorganization. The last patch is for adding this feature. ==================== Signed-off-by: David S. Miller <[email protected]>
2 parents 463910e + a82457f commit 869cec9

File tree

7 files changed

+529
-451
lines changed

7 files changed

+529
-451
lines changed

drivers/net/ethernet/cavium/liquidio/lio_core.c

Lines changed: 304 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -275,6 +275,11 @@ void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr)
275275
netif_info(lio, probe, lio->netdev, "Set RX/TX flow control parameters\n");
276276
break;
277277

278+
case OCTNET_CMD_QUEUE_COUNT_CTL:
279+
netif_info(lio, probe, lio->netdev, "Queue count updated to %d\n",
280+
nctrl->ncmd.s.param1);
281+
break;
282+
278283
default:
279284
dev_err(&oct->pci_dev->dev, "%s Unknown cmd %d\n", __func__,
280285
nctrl->ncmd.s.cmd);
@@ -689,7 +694,8 @@ static int liquidio_napi_poll(struct napi_struct *napi, int budget)
689694
* an input queue is for egress packets, and output queues
690695
* are for ingress packets.
691696
*/
692-
int liquidio_setup_io_queues(struct octeon_device *octeon_dev, int ifidx)
697+
int liquidio_setup_io_queues(struct octeon_device *octeon_dev, int ifidx,
698+
u32 num_iqs, u32 num_oqs)
693699
{
694700
struct octeon_droq_ops droq_ops;
695701
struct net_device *netdev;
@@ -717,7 +723,7 @@ int liquidio_setup_io_queues(struct octeon_device *octeon_dev, int ifidx)
717723
cpu_id_modulus = num_present_cpus();
718724

719725
/* set up DROQs. */
720-
for (q = 0; q < lio->linfo.num_rxpciq; q++) {
726+
for (q = 0; q < num_oqs; q++) {
721727
q_no = lio->linfo.rxpciq[q].s.q_no;
722728
dev_dbg(&octeon_dev->pci_dev->dev,
723729
"%s index:%d linfo.rxpciq.s.q_no:%d\n",
@@ -761,7 +767,7 @@ int liquidio_setup_io_queues(struct octeon_device *octeon_dev, int ifidx)
761767
}
762768

763769
/* set up IQs. */
764-
for (q = 0; q < lio->linfo.num_txpciq; q++) {
770+
for (q = 0; q < num_iqs; q++) {
765771
num_tx_descs = CFG_GET_NUM_TX_DESCS_NIC_IF(
766772
octeon_get_conf(octeon_dev), lio->ifidx);
767773
retval = octeon_setup_iq(octeon_dev, ifidx, q,
@@ -788,3 +794,298 @@ int liquidio_setup_io_queues(struct octeon_device *octeon_dev, int ifidx)
788794

789795
return 0;
790796
}
797+
798+
static
799+
int liquidio_schedule_msix_droq_pkt_handler(struct octeon_droq *droq, u64 ret)
800+
{
801+
struct octeon_device *oct = droq->oct_dev;
802+
struct octeon_device_priv *oct_priv =
803+
(struct octeon_device_priv *)oct->priv;
804+
805+
if (droq->ops.poll_mode) {
806+
droq->ops.napi_fn(droq);
807+
} else {
808+
if (ret & MSIX_PO_INT) {
809+
if (OCTEON_CN23XX_VF(oct))
810+
dev_err(&oct->pci_dev->dev,
811+
"should not come here should not get rx when poll mode = 0 for vf\n");
812+
tasklet_schedule(&oct_priv->droq_tasklet);
813+
return 1;
814+
}
815+
/* this will be flushed periodically by check iq db */
816+
if (ret & MSIX_PI_INT)
817+
return 0;
818+
}
819+
820+
return 0;
821+
}
822+
823+
irqreturn_t
824+
liquidio_msix_intr_handler(int irq __attribute__((unused)), void *dev)
825+
{
826+
struct octeon_ioq_vector *ioq_vector = (struct octeon_ioq_vector *)dev;
827+
struct octeon_device *oct = ioq_vector->oct_dev;
828+
struct octeon_droq *droq = oct->droq[ioq_vector->droq_index];
829+
u64 ret;
830+
831+
ret = oct->fn_list.msix_interrupt_handler(ioq_vector);
832+
833+
if (ret & MSIX_PO_INT || ret & MSIX_PI_INT)
834+
liquidio_schedule_msix_droq_pkt_handler(droq, ret);
835+
836+
return IRQ_HANDLED;
837+
}
838+
839+
/**
840+
* \brief Droq packet processor sceduler
841+
* @param oct octeon device
842+
*/
843+
static void liquidio_schedule_droq_pkt_handlers(struct octeon_device *oct)
844+
{
845+
struct octeon_device_priv *oct_priv =
846+
(struct octeon_device_priv *)oct->priv;
847+
struct octeon_droq *droq;
848+
u64 oq_no;
849+
850+
if (oct->int_status & OCT_DEV_INTR_PKT_DATA) {
851+
for (oq_no = 0; oq_no < MAX_OCTEON_OUTPUT_QUEUES(oct);
852+
oq_no++) {
853+
if (!(oct->droq_intr & BIT_ULL(oq_no)))
854+
continue;
855+
856+
droq = oct->droq[oq_no];
857+
858+
if (droq->ops.poll_mode) {
859+
droq->ops.napi_fn(droq);
860+
oct_priv->napi_mask |= (1 << oq_no);
861+
} else {
862+
tasklet_schedule(&oct_priv->droq_tasklet);
863+
}
864+
}
865+
}
866+
}
867+
868+
/**
869+
* \brief Interrupt handler for octeon
870+
* @param irq unused
871+
* @param dev octeon device
872+
*/
873+
static
874+
irqreturn_t liquidio_legacy_intr_handler(int irq __attribute__((unused)),
875+
void *dev)
876+
{
877+
struct octeon_device *oct = (struct octeon_device *)dev;
878+
irqreturn_t ret;
879+
880+
/* Disable our interrupts for the duration of ISR */
881+
oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
882+
883+
ret = oct->fn_list.process_interrupt_regs(oct);
884+
885+
if (ret == IRQ_HANDLED)
886+
liquidio_schedule_droq_pkt_handlers(oct);
887+
888+
/* Re-enable our interrupts */
889+
if (!(atomic_read(&oct->status) == OCT_DEV_IN_RESET))
890+
oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR);
891+
892+
return ret;
893+
}
894+
895+
/**
896+
* \brief Setup interrupt for octeon device
897+
* @param oct octeon device
898+
*
899+
* Enable interrupt in Octeon device as given in the PCI interrupt mask.
900+
*/
901+
int octeon_setup_interrupt(struct octeon_device *oct, u32 num_ioqs)
902+
{
903+
struct msix_entry *msix_entries;
904+
char *queue_irq_names = NULL;
905+
int i, num_interrupts = 0;
906+
int num_alloc_ioq_vectors;
907+
char *aux_irq_name = NULL;
908+
int num_ioq_vectors;
909+
int irqret, err;
910+
911+
oct->num_msix_irqs = num_ioqs;
912+
if (oct->msix_on) {
913+
if (OCTEON_CN23XX_PF(oct)) {
914+
num_interrupts = MAX_IOQ_INTERRUPTS_PER_PF + 1;
915+
916+
/* one non ioq interrupt for handling
917+
* sli_mac_pf_int_sum
918+
*/
919+
oct->num_msix_irqs += 1;
920+
} else if (OCTEON_CN23XX_VF(oct)) {
921+
num_interrupts = MAX_IOQ_INTERRUPTS_PER_VF;
922+
}
923+
924+
/* allocate storage for the names assigned to each irq */
925+
oct->irq_name_storage =
926+
kcalloc(num_interrupts, INTRNAMSIZ, GFP_KERNEL);
927+
if (!oct->irq_name_storage) {
928+
dev_err(&oct->pci_dev->dev, "Irq name storage alloc failed...\n");
929+
return -ENOMEM;
930+
}
931+
932+
queue_irq_names = oct->irq_name_storage;
933+
934+
if (OCTEON_CN23XX_PF(oct))
935+
aux_irq_name = &queue_irq_names
936+
[IRQ_NAME_OFF(MAX_IOQ_INTERRUPTS_PER_PF)];
937+
938+
oct->msix_entries = kcalloc(oct->num_msix_irqs,
939+
sizeof(struct msix_entry),
940+
GFP_KERNEL);
941+
if (!oct->msix_entries) {
942+
dev_err(&oct->pci_dev->dev, "Memory Alloc failed...\n");
943+
kfree(oct->irq_name_storage);
944+
oct->irq_name_storage = NULL;
945+
return -ENOMEM;
946+
}
947+
948+
msix_entries = (struct msix_entry *)oct->msix_entries;
949+
950+
/*Assumption is that pf msix vectors start from pf srn to pf to
951+
* trs and not from 0. if not change this code
952+
*/
953+
if (OCTEON_CN23XX_PF(oct)) {
954+
for (i = 0; i < oct->num_msix_irqs - 1; i++)
955+
msix_entries[i].entry =
956+
oct->sriov_info.pf_srn + i;
957+
958+
msix_entries[oct->num_msix_irqs - 1].entry =
959+
oct->sriov_info.trs;
960+
} else if (OCTEON_CN23XX_VF(oct)) {
961+
for (i = 0; i < oct->num_msix_irqs; i++)
962+
msix_entries[i].entry = i;
963+
}
964+
num_alloc_ioq_vectors = pci_enable_msix_range(
965+
oct->pci_dev, msix_entries,
966+
oct->num_msix_irqs,
967+
oct->num_msix_irqs);
968+
if (num_alloc_ioq_vectors < 0) {
969+
dev_err(&oct->pci_dev->dev, "unable to Allocate MSI-X interrupts\n");
970+
kfree(oct->msix_entries);
971+
oct->msix_entries = NULL;
972+
kfree(oct->irq_name_storage);
973+
oct->irq_name_storage = NULL;
974+
return num_alloc_ioq_vectors;
975+
}
976+
977+
dev_dbg(&oct->pci_dev->dev, "OCTEON: Enough MSI-X interrupts are allocated...\n");
978+
979+
num_ioq_vectors = oct->num_msix_irqs;
980+
/** For PF, there is one non-ioq interrupt handler */
981+
if (OCTEON_CN23XX_PF(oct)) {
982+
num_ioq_vectors -= 1;
983+
984+
snprintf(aux_irq_name, INTRNAMSIZ,
985+
"LiquidIO%u-pf%u-aux", oct->octeon_id,
986+
oct->pf_num);
987+
irqret = request_irq(
988+
msix_entries[num_ioq_vectors].vector,
989+
liquidio_legacy_intr_handler, 0,
990+
aux_irq_name, oct);
991+
if (irqret) {
992+
dev_err(&oct->pci_dev->dev,
993+
"Request_irq failed for MSIX interrupt Error: %d\n",
994+
irqret);
995+
pci_disable_msix(oct->pci_dev);
996+
kfree(oct->msix_entries);
997+
kfree(oct->irq_name_storage);
998+
oct->irq_name_storage = NULL;
999+
oct->msix_entries = NULL;
1000+
return irqret;
1001+
}
1002+
}
1003+
for (i = 0 ; i < num_ioq_vectors ; i++) {
1004+
if (OCTEON_CN23XX_PF(oct))
1005+
snprintf(&queue_irq_names[IRQ_NAME_OFF(i)],
1006+
INTRNAMSIZ, "LiquidIO%u-pf%u-rxtx-%u",
1007+
oct->octeon_id, oct->pf_num, i);
1008+
1009+
if (OCTEON_CN23XX_VF(oct))
1010+
snprintf(&queue_irq_names[IRQ_NAME_OFF(i)],
1011+
INTRNAMSIZ, "LiquidIO%u-vf%u-rxtx-%u",
1012+
oct->octeon_id, oct->vf_num, i);
1013+
1014+
irqret = request_irq(msix_entries[i].vector,
1015+
liquidio_msix_intr_handler, 0,
1016+
&queue_irq_names[IRQ_NAME_OFF(i)],
1017+
&oct->ioq_vector[i]);
1018+
1019+
if (irqret) {
1020+
dev_err(&oct->pci_dev->dev,
1021+
"Request_irq failed for MSIX interrupt Error: %d\n",
1022+
irqret);
1023+
/** Freeing the non-ioq irq vector here . */
1024+
free_irq(msix_entries[num_ioq_vectors].vector,
1025+
oct);
1026+
1027+
while (i) {
1028+
i--;
1029+
/** clearing affinity mask. */
1030+
irq_set_affinity_hint(
1031+
msix_entries[i].vector,
1032+
NULL);
1033+
free_irq(msix_entries[i].vector,
1034+
&oct->ioq_vector[i]);
1035+
}
1036+
pci_disable_msix(oct->pci_dev);
1037+
kfree(oct->msix_entries);
1038+
kfree(oct->irq_name_storage);
1039+
oct->irq_name_storage = NULL;
1040+
oct->msix_entries = NULL;
1041+
return irqret;
1042+
}
1043+
oct->ioq_vector[i].vector = msix_entries[i].vector;
1044+
/* assign the cpu mask for this msix interrupt vector */
1045+
irq_set_affinity_hint(msix_entries[i].vector,
1046+
&oct->ioq_vector[i].affinity_mask
1047+
);
1048+
}
1049+
dev_dbg(&oct->pci_dev->dev, "OCTEON[%d]: MSI-X enabled\n",
1050+
oct->octeon_id);
1051+
} else {
1052+
err = pci_enable_msi(oct->pci_dev);
1053+
if (err)
1054+
dev_warn(&oct->pci_dev->dev, "Reverting to legacy interrupts. Error: %d\n",
1055+
err);
1056+
else
1057+
oct->flags |= LIO_FLAG_MSI_ENABLED;
1058+
1059+
/* allocate storage for the names assigned to the irq */
1060+
oct->irq_name_storage = kcalloc(1, INTRNAMSIZ, GFP_KERNEL);
1061+
if (!oct->irq_name_storage)
1062+
return -ENOMEM;
1063+
1064+
queue_irq_names = oct->irq_name_storage;
1065+
1066+
if (OCTEON_CN23XX_PF(oct))
1067+
snprintf(&queue_irq_names[IRQ_NAME_OFF(0)], INTRNAMSIZ,
1068+
"LiquidIO%u-pf%u-rxtx-%u",
1069+
oct->octeon_id, oct->pf_num, 0);
1070+
1071+
if (OCTEON_CN23XX_VF(oct))
1072+
snprintf(&queue_irq_names[IRQ_NAME_OFF(0)], INTRNAMSIZ,
1073+
"LiquidIO%u-vf%u-rxtx-%u",
1074+
oct->octeon_id, oct->vf_num, 0);
1075+
1076+
irqret = request_irq(oct->pci_dev->irq,
1077+
liquidio_legacy_intr_handler,
1078+
IRQF_SHARED,
1079+
&queue_irq_names[IRQ_NAME_OFF(0)], oct);
1080+
if (irqret) {
1081+
if (oct->flags & LIO_FLAG_MSI_ENABLED)
1082+
pci_disable_msi(oct->pci_dev);
1083+
dev_err(&oct->pci_dev->dev, "Request IRQ failed with code: %d\n",
1084+
irqret);
1085+
kfree(oct->irq_name_storage);
1086+
oct->irq_name_storage = NULL;
1087+
return irqret;
1088+
}
1089+
}
1090+
return 0;
1091+
}

0 commit comments

Comments
 (0)