|
29 | 29 | /* OOM task polling interval */
|
30 | 30 | #define LIO_OOM_POLL_INTERVAL_MS 250
|
31 | 31 |
|
| 32 | +#define OCTNIC_MAX_SG MAX_SKB_FRAGS |
| 33 | + |
| 34 | +/** |
| 35 | + * \brief Callback for getting interface configuration |
| 36 | + * @param status status of request |
| 37 | + * @param buf pointer to resp structure |
| 38 | + */ |
| 39 | +void lio_if_cfg_callback(struct octeon_device *oct, |
| 40 | + u32 status __attribute__((unused)), void *buf) |
| 41 | +{ |
| 42 | + struct octeon_soft_command *sc = (struct octeon_soft_command *)buf; |
| 43 | + struct liquidio_if_cfg_context *ctx; |
| 44 | + struct liquidio_if_cfg_resp *resp; |
| 45 | + |
| 46 | + resp = (struct liquidio_if_cfg_resp *)sc->virtrptr; |
| 47 | + ctx = (struct liquidio_if_cfg_context *)sc->ctxptr; |
| 48 | + |
| 49 | + oct = lio_get_device(ctx->octeon_id); |
| 50 | + if (resp->status) |
| 51 | + dev_err(&oct->pci_dev->dev, "nic if cfg instruction failed. Status: %llx\n", |
| 52 | + CVM_CAST64(resp->status)); |
| 53 | + WRITE_ONCE(ctx->cond, 1); |
| 54 | + |
| 55 | + snprintf(oct->fw_info.liquidio_firmware_version, 32, "%s", |
| 56 | + resp->cfg_info.liquidio_firmware_version); |
| 57 | + |
| 58 | + /* This barrier is required to be sure that the response has been |
| 59 | + * written fully before waking up the handler |
| 60 | + */ |
| 61 | + wmb(); |
| 62 | + |
| 63 | + wake_up_interruptible(&ctx->wc); |
| 64 | +} |
| 65 | + |
| 66 | +/** |
| 67 | + * \brief Delete gather lists |
| 68 | + * @param lio per-network private data |
| 69 | + */ |
| 70 | +void lio_delete_glists(struct lio *lio) |
| 71 | +{ |
| 72 | + struct octnic_gather *g; |
| 73 | + int i; |
| 74 | + |
| 75 | + kfree(lio->glist_lock); |
| 76 | + lio->glist_lock = NULL; |
| 77 | + |
| 78 | + if (!lio->glist) |
| 79 | + return; |
| 80 | + |
| 81 | + for (i = 0; i < lio->oct_dev->num_iqs; i++) { |
| 82 | + do { |
| 83 | + g = (struct octnic_gather *) |
| 84 | + lio_list_delete_head(&lio->glist[i]); |
| 85 | + kfree(g); |
| 86 | + } while (g); |
| 87 | + |
| 88 | + if (lio->glists_virt_base && lio->glists_virt_base[i] && |
| 89 | + lio->glists_dma_base && lio->glists_dma_base[i]) { |
| 90 | + lio_dma_free(lio->oct_dev, |
| 91 | + lio->glist_entry_size * lio->tx_qsize, |
| 92 | + lio->glists_virt_base[i], |
| 93 | + lio->glists_dma_base[i]); |
| 94 | + } |
| 95 | + } |
| 96 | + |
| 97 | + kfree(lio->glists_virt_base); |
| 98 | + lio->glists_virt_base = NULL; |
| 99 | + |
| 100 | + kfree(lio->glists_dma_base); |
| 101 | + lio->glists_dma_base = NULL; |
| 102 | + |
| 103 | + kfree(lio->glist); |
| 104 | + lio->glist = NULL; |
| 105 | +} |
| 106 | + |
| 107 | +/** |
| 108 | + * \brief Setup gather lists |
| 109 | + * @param lio per-network private data |
| 110 | + */ |
| 111 | +int lio_setup_glists(struct octeon_device *oct, struct lio *lio, int num_iqs) |
| 112 | +{ |
| 113 | + struct octnic_gather *g; |
| 114 | + int i, j; |
| 115 | + |
| 116 | + lio->glist_lock = |
| 117 | + kcalloc(num_iqs, sizeof(*lio->glist_lock), GFP_KERNEL); |
| 118 | + if (!lio->glist_lock) |
| 119 | + return -ENOMEM; |
| 120 | + |
| 121 | + lio->glist = |
| 122 | + kcalloc(num_iqs, sizeof(*lio->glist), GFP_KERNEL); |
| 123 | + if (!lio->glist) { |
| 124 | + kfree(lio->glist_lock); |
| 125 | + lio->glist_lock = NULL; |
| 126 | + return -ENOMEM; |
| 127 | + } |
| 128 | + |
| 129 | + lio->glist_entry_size = |
| 130 | + ROUNDUP8((ROUNDUP4(OCTNIC_MAX_SG) >> 2) * OCT_SG_ENTRY_SIZE); |
| 131 | + |
| 132 | + /* allocate memory to store virtual and dma base address of |
| 133 | + * per glist consistent memory |
| 134 | + */ |
| 135 | + lio->glists_virt_base = kcalloc(num_iqs, sizeof(*lio->glists_virt_base), |
| 136 | + GFP_KERNEL); |
| 137 | + lio->glists_dma_base = kcalloc(num_iqs, sizeof(*lio->glists_dma_base), |
| 138 | + GFP_KERNEL); |
| 139 | + |
| 140 | + if (!lio->glists_virt_base || !lio->glists_dma_base) { |
| 141 | + lio_delete_glists(lio); |
| 142 | + return -ENOMEM; |
| 143 | + } |
| 144 | + |
| 145 | + for (i = 0; i < num_iqs; i++) { |
| 146 | + int numa_node = dev_to_node(&oct->pci_dev->dev); |
| 147 | + |
| 148 | + spin_lock_init(&lio->glist_lock[i]); |
| 149 | + |
| 150 | + INIT_LIST_HEAD(&lio->glist[i]); |
| 151 | + |
| 152 | + lio->glists_virt_base[i] = |
| 153 | + lio_dma_alloc(oct, |
| 154 | + lio->glist_entry_size * lio->tx_qsize, |
| 155 | + &lio->glists_dma_base[i]); |
| 156 | + |
| 157 | + if (!lio->glists_virt_base[i]) { |
| 158 | + lio_delete_glists(lio); |
| 159 | + return -ENOMEM; |
| 160 | + } |
| 161 | + |
| 162 | + for (j = 0; j < lio->tx_qsize; j++) { |
| 163 | + g = kzalloc_node(sizeof(*g), GFP_KERNEL, |
| 164 | + numa_node); |
| 165 | + if (!g) |
| 166 | + g = kzalloc(sizeof(*g), GFP_KERNEL); |
| 167 | + if (!g) |
| 168 | + break; |
| 169 | + |
| 170 | + g->sg = lio->glists_virt_base[i] + |
| 171 | + (j * lio->glist_entry_size); |
| 172 | + |
| 173 | + g->sg_dma_ptr = lio->glists_dma_base[i] + |
| 174 | + (j * lio->glist_entry_size); |
| 175 | + |
| 176 | + list_add_tail(&g->list, &lio->glist[i]); |
| 177 | + } |
| 178 | + |
| 179 | + if (j != lio->tx_qsize) { |
| 180 | + lio_delete_glists(lio); |
| 181 | + return -ENOMEM; |
| 182 | + } |
| 183 | + } |
| 184 | + |
| 185 | + return 0; |
| 186 | +} |
| 187 | + |
32 | 188 | int liquidio_set_feature(struct net_device *netdev, int cmd, u16 param1)
|
33 | 189 | {
|
34 | 190 | struct lio *lio = GET_LIO(netdev);
|
@@ -880,8 +1036,8 @@ int octeon_setup_interrupt(struct octeon_device *oct, u32 num_ioqs)
|
880 | 1036 | int num_ioq_vectors;
|
881 | 1037 | int irqret, err;
|
882 | 1038 |
|
883 |
| - oct->num_msix_irqs = num_ioqs; |
884 | 1039 | if (oct->msix_on) {
|
| 1040 | + oct->num_msix_irqs = num_ioqs; |
885 | 1041 | if (OCTEON_CN23XX_PF(oct)) {
|
886 | 1042 | num_interrupts = MAX_IOQ_INTERRUPTS_PER_PF + 1;
|
887 | 1043 |
|
|
0 commit comments