|
| 1 | +// SPDX-License-Identifier: GPL-2.0 |
| 2 | +/* |
| 3 | + * Copyright (C) 2021 Western Digital Corporation or its affiliates. |
| 4 | + * Copyright (C) 2022 Ventana Micro Systems Inc. |
| 5 | + */ |
| 6 | + |
| 7 | +#include <linux/bitfield.h> |
| 8 | +#include <linux/bitops.h> |
| 9 | +#include <linux/cpu.h> |
| 10 | +#include <linux/interrupt.h> |
| 11 | +#include <linux/irqchip.h> |
| 12 | +#include <linux/irqchip/chained_irq.h> |
| 13 | +#include <linux/irqchip/riscv-aplic.h> |
| 14 | +#include <linux/module.h> |
| 15 | +#include <linux/of_address.h> |
| 16 | +#include <linux/printk.h> |
| 17 | +#include <linux/smp.h> |
| 18 | + |
| 19 | +#include "irq-riscv-aplic-main.h" |
| 20 | + |
| 21 | +#define APLIC_DISABLE_IDELIVERY 0 |
| 22 | +#define APLIC_ENABLE_IDELIVERY 1 |
| 23 | +#define APLIC_DISABLE_ITHRESHOLD 1 |
| 24 | +#define APLIC_ENABLE_ITHRESHOLD 0 |
| 25 | + |
| 26 | +struct aplic_direct { |
| 27 | + struct aplic_priv priv; |
| 28 | + struct irq_domain *irqdomain; |
| 29 | + struct cpumask lmask; |
| 30 | +}; |
| 31 | + |
| 32 | +struct aplic_idc { |
| 33 | + unsigned int hart_index; |
| 34 | + void __iomem *regs; |
| 35 | + struct aplic_direct *direct; |
| 36 | +}; |
| 37 | + |
| 38 | +static unsigned int aplic_direct_parent_irq; |
| 39 | +static DEFINE_PER_CPU(struct aplic_idc, aplic_idcs); |
| 40 | + |
| 41 | +static void aplic_direct_irq_eoi(struct irq_data *d) |
| 42 | +{ |
| 43 | + /* |
| 44 | + * The fasteoi_handler requires irq_eoi() callback hence |
| 45 | + * provide a dummy handler. |
| 46 | + */ |
| 47 | +} |
| 48 | + |
| 49 | +#ifdef CONFIG_SMP |
| 50 | +static int aplic_direct_set_affinity(struct irq_data *d, const struct cpumask *mask_val, |
| 51 | + bool force) |
| 52 | +{ |
| 53 | + struct aplic_priv *priv = irq_data_get_irq_chip_data(d); |
| 54 | + struct aplic_direct *direct = container_of(priv, struct aplic_direct, priv); |
| 55 | + struct aplic_idc *idc; |
| 56 | + unsigned int cpu, val; |
| 57 | + struct cpumask amask; |
| 58 | + void __iomem *target; |
| 59 | + |
| 60 | + cpumask_and(&amask, &direct->lmask, mask_val); |
| 61 | + |
| 62 | + if (force) |
| 63 | + cpu = cpumask_first(&amask); |
| 64 | + else |
| 65 | + cpu = cpumask_any_and(&amask, cpu_online_mask); |
| 66 | + |
| 67 | + if (cpu >= nr_cpu_ids) |
| 68 | + return -EINVAL; |
| 69 | + |
| 70 | + idc = per_cpu_ptr(&aplic_idcs, cpu); |
| 71 | + target = priv->regs + APLIC_TARGET_BASE + (d->hwirq - 1) * sizeof(u32); |
| 72 | + val = FIELD_PREP(APLIC_TARGET_HART_IDX, idc->hart_index); |
| 73 | + val |= FIELD_PREP(APLIC_TARGET_IPRIO, APLIC_DEFAULT_PRIORITY); |
| 74 | + writel(val, target); |
| 75 | + |
| 76 | + irq_data_update_effective_affinity(d, cpumask_of(cpu)); |
| 77 | + |
| 78 | + return IRQ_SET_MASK_OK_DONE; |
| 79 | +} |
| 80 | +#endif |
| 81 | + |
| 82 | +static struct irq_chip aplic_direct_chip = { |
| 83 | + .name = "APLIC-DIRECT", |
| 84 | + .irq_mask = aplic_irq_mask, |
| 85 | + .irq_unmask = aplic_irq_unmask, |
| 86 | + .irq_set_type = aplic_irq_set_type, |
| 87 | + .irq_eoi = aplic_direct_irq_eoi, |
| 88 | +#ifdef CONFIG_SMP |
| 89 | + .irq_set_affinity = aplic_direct_set_affinity, |
| 90 | +#endif |
| 91 | + .flags = IRQCHIP_SET_TYPE_MASKED | |
| 92 | + IRQCHIP_SKIP_SET_WAKE | |
| 93 | + IRQCHIP_MASK_ON_SUSPEND, |
| 94 | +}; |
| 95 | + |
| 96 | +static int aplic_direct_irqdomain_translate(struct irq_domain *d, struct irq_fwspec *fwspec, |
| 97 | + unsigned long *hwirq, unsigned int *type) |
| 98 | +{ |
| 99 | + struct aplic_priv *priv = d->host_data; |
| 100 | + |
| 101 | + return aplic_irqdomain_translate(fwspec, priv->gsi_base, hwirq, type); |
| 102 | +} |
| 103 | + |
| 104 | +static int aplic_direct_irqdomain_alloc(struct irq_domain *domain, unsigned int virq, |
| 105 | + unsigned int nr_irqs, void *arg) |
| 106 | +{ |
| 107 | + struct aplic_priv *priv = domain->host_data; |
| 108 | + struct aplic_direct *direct = container_of(priv, struct aplic_direct, priv); |
| 109 | + struct irq_fwspec *fwspec = arg; |
| 110 | + irq_hw_number_t hwirq; |
| 111 | + unsigned int type; |
| 112 | + int i, ret; |
| 113 | + |
| 114 | + ret = aplic_irqdomain_translate(fwspec, priv->gsi_base, &hwirq, &type); |
| 115 | + if (ret) |
| 116 | + return ret; |
| 117 | + |
| 118 | + for (i = 0; i < nr_irqs; i++) { |
| 119 | + irq_domain_set_info(domain, virq + i, hwirq + i, &aplic_direct_chip, |
| 120 | + priv, handle_fasteoi_irq, NULL, NULL); |
| 121 | + irq_set_affinity(virq + i, &direct->lmask); |
| 122 | + } |
| 123 | + |
| 124 | + return 0; |
| 125 | +} |
| 126 | + |
| 127 | +static const struct irq_domain_ops aplic_direct_irqdomain_ops = { |
| 128 | + .translate = aplic_direct_irqdomain_translate, |
| 129 | + .alloc = aplic_direct_irqdomain_alloc, |
| 130 | + .free = irq_domain_free_irqs_top, |
| 131 | +}; |
| 132 | + |
| 133 | +/* |
| 134 | + * To handle an APLIC direct interrupts, we just read the CLAIMI register |
| 135 | + * which will return highest priority pending interrupt and clear the |
| 136 | + * pending bit of the interrupt. This process is repeated until CLAIMI |
| 137 | + * register return zero value. |
| 138 | + */ |
| 139 | +static void aplic_direct_handle_irq(struct irq_desc *desc) |
| 140 | +{ |
| 141 | + struct aplic_idc *idc = this_cpu_ptr(&aplic_idcs); |
| 142 | + struct irq_domain *irqdomain = idc->direct->irqdomain; |
| 143 | + struct irq_chip *chip = irq_desc_get_chip(desc); |
| 144 | + irq_hw_number_t hw_irq; |
| 145 | + int irq; |
| 146 | + |
| 147 | + chained_irq_enter(chip, desc); |
| 148 | + |
| 149 | + while ((hw_irq = readl(idc->regs + APLIC_IDC_CLAIMI))) { |
| 150 | + hw_irq = hw_irq >> APLIC_IDC_TOPI_ID_SHIFT; |
| 151 | + irq = irq_find_mapping(irqdomain, hw_irq); |
| 152 | + |
| 153 | + if (unlikely(irq <= 0)) { |
| 154 | + dev_warn_ratelimited(idc->direct->priv.dev, |
| 155 | + "hw_irq %lu mapping not found\n", hw_irq); |
| 156 | + } else { |
| 157 | + generic_handle_irq(irq); |
| 158 | + } |
| 159 | + } |
| 160 | + |
| 161 | + chained_irq_exit(chip, desc); |
| 162 | +} |
| 163 | + |
| 164 | +static void aplic_idc_set_delivery(struct aplic_idc *idc, bool en) |
| 165 | +{ |
| 166 | + u32 de = (en) ? APLIC_ENABLE_IDELIVERY : APLIC_DISABLE_IDELIVERY; |
| 167 | + u32 th = (en) ? APLIC_ENABLE_ITHRESHOLD : APLIC_DISABLE_ITHRESHOLD; |
| 168 | + |
| 169 | + /* Priority must be less than threshold for interrupt triggering */ |
| 170 | + writel(th, idc->regs + APLIC_IDC_ITHRESHOLD); |
| 171 | + |
| 172 | + /* Delivery must be set to 1 for interrupt triggering */ |
| 173 | + writel(de, idc->regs + APLIC_IDC_IDELIVERY); |
| 174 | +} |
| 175 | + |
| 176 | +static int aplic_direct_dying_cpu(unsigned int cpu) |
| 177 | +{ |
| 178 | + if (aplic_direct_parent_irq) |
| 179 | + disable_percpu_irq(aplic_direct_parent_irq); |
| 180 | + |
| 181 | + return 0; |
| 182 | +} |
| 183 | + |
| 184 | +static int aplic_direct_starting_cpu(unsigned int cpu) |
| 185 | +{ |
| 186 | + if (aplic_direct_parent_irq) { |
| 187 | + enable_percpu_irq(aplic_direct_parent_irq, |
| 188 | + irq_get_trigger_type(aplic_direct_parent_irq)); |
| 189 | + } |
| 190 | + |
| 191 | + return 0; |
| 192 | +} |
| 193 | + |
| 194 | +static int aplic_direct_parse_parent_hwirq(struct device *dev, u32 index, |
| 195 | + u32 *parent_hwirq, unsigned long *parent_hartid) |
| 196 | +{ |
| 197 | + struct of_phandle_args parent; |
| 198 | + int rc; |
| 199 | + |
| 200 | + /* |
| 201 | + * Currently, only OF fwnode is supported so extend this |
| 202 | + * function for ACPI support. |
| 203 | + */ |
| 204 | + if (!is_of_node(dev->fwnode)) |
| 205 | + return -EINVAL; |
| 206 | + |
| 207 | + rc = of_irq_parse_one(to_of_node(dev->fwnode), index, &parent); |
| 208 | + if (rc) |
| 209 | + return rc; |
| 210 | + |
| 211 | + rc = riscv_of_parent_hartid(parent.np, parent_hartid); |
| 212 | + if (rc) |
| 213 | + return rc; |
| 214 | + |
| 215 | + *parent_hwirq = parent.args[0]; |
| 216 | + return 0; |
| 217 | +} |
| 218 | + |
| 219 | +int aplic_direct_setup(struct device *dev, void __iomem *regs) |
| 220 | +{ |
| 221 | + int i, j, rc, cpu, current_cpu, setup_count = 0; |
| 222 | + struct aplic_direct *direct; |
| 223 | + struct irq_domain *domain; |
| 224 | + struct aplic_priv *priv; |
| 225 | + struct aplic_idc *idc; |
| 226 | + unsigned long hartid; |
| 227 | + u32 v, hwirq; |
| 228 | + |
| 229 | + direct = devm_kzalloc(dev, sizeof(*direct), GFP_KERNEL); |
| 230 | + if (!direct) |
| 231 | + return -ENOMEM; |
| 232 | + priv = &direct->priv; |
| 233 | + |
| 234 | + rc = aplic_setup_priv(priv, dev, regs); |
| 235 | + if (rc) { |
| 236 | + dev_err(dev, "failed to create APLIC context\n"); |
| 237 | + return rc; |
| 238 | + } |
| 239 | + |
| 240 | + /* Setup per-CPU IDC and target CPU mask */ |
| 241 | + current_cpu = get_cpu(); |
| 242 | + for (i = 0; i < priv->nr_idcs; i++) { |
| 243 | + rc = aplic_direct_parse_parent_hwirq(dev, i, &hwirq, &hartid); |
| 244 | + if (rc) { |
| 245 | + dev_warn(dev, "parent irq for IDC%d not found\n", i); |
| 246 | + continue; |
| 247 | + } |
| 248 | + |
| 249 | + /* |
| 250 | + * Skip interrupts other than external interrupts for |
| 251 | + * current privilege level. |
| 252 | + */ |
| 253 | + if (hwirq != RV_IRQ_EXT) |
| 254 | + continue; |
| 255 | + |
| 256 | + cpu = riscv_hartid_to_cpuid(hartid); |
| 257 | + if (cpu < 0) { |
| 258 | + dev_warn(dev, "invalid cpuid for IDC%d\n", i); |
| 259 | + continue; |
| 260 | + } |
| 261 | + |
| 262 | + cpumask_set_cpu(cpu, &direct->lmask); |
| 263 | + |
| 264 | + idc = per_cpu_ptr(&aplic_idcs, cpu); |
| 265 | + idc->hart_index = i; |
| 266 | + idc->regs = priv->regs + APLIC_IDC_BASE + i * APLIC_IDC_SIZE; |
| 267 | + idc->direct = direct; |
| 268 | + |
| 269 | + aplic_idc_set_delivery(idc, true); |
| 270 | + |
| 271 | + /* |
| 272 | + * Boot cpu might not have APLIC hart_index = 0 so check |
| 273 | + * and update target registers of all interrupts. |
| 274 | + */ |
| 275 | + if (cpu == current_cpu && idc->hart_index) { |
| 276 | + v = FIELD_PREP(APLIC_TARGET_HART_IDX, idc->hart_index); |
| 277 | + v |= FIELD_PREP(APLIC_TARGET_IPRIO, APLIC_DEFAULT_PRIORITY); |
| 278 | + for (j = 1; j <= priv->nr_irqs; j++) |
| 279 | + writel(v, priv->regs + APLIC_TARGET_BASE + (j - 1) * sizeof(u32)); |
| 280 | + } |
| 281 | + |
| 282 | + setup_count++; |
| 283 | + } |
| 284 | + put_cpu(); |
| 285 | + |
| 286 | + /* Find parent domain and register chained handler */ |
| 287 | + domain = irq_find_matching_fwnode(riscv_get_intc_hwnode(), |
| 288 | + DOMAIN_BUS_ANY); |
| 289 | + if (!aplic_direct_parent_irq && domain) { |
| 290 | + aplic_direct_parent_irq = irq_create_mapping(domain, RV_IRQ_EXT); |
| 291 | + if (aplic_direct_parent_irq) { |
| 292 | + irq_set_chained_handler(aplic_direct_parent_irq, |
| 293 | + aplic_direct_handle_irq); |
| 294 | + |
| 295 | + /* |
| 296 | + * Setup CPUHP notifier to enable parent |
| 297 | + * interrupt on all CPUs |
| 298 | + */ |
| 299 | + cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, |
| 300 | + "irqchip/riscv/aplic:starting", |
| 301 | + aplic_direct_starting_cpu, |
| 302 | + aplic_direct_dying_cpu); |
| 303 | + } |
| 304 | + } |
| 305 | + |
| 306 | + /* Fail if we were not able to setup IDC for any CPU */ |
| 307 | + if (!setup_count) |
| 308 | + return -ENODEV; |
| 309 | + |
| 310 | + /* Setup global config and interrupt delivery */ |
| 311 | + aplic_init_hw_global(priv, false); |
| 312 | + |
| 313 | + /* Create irq domain instance for the APLIC */ |
| 314 | + direct->irqdomain = irq_domain_create_linear(dev->fwnode, priv->nr_irqs + 1, |
| 315 | + &aplic_direct_irqdomain_ops, priv); |
| 316 | + if (!direct->irqdomain) { |
| 317 | + dev_err(dev, "failed to create direct irq domain\n"); |
| 318 | + return -ENOMEM; |
| 319 | + } |
| 320 | + |
| 321 | + /* Advertise the interrupt controller */ |
| 322 | + dev_info(dev, "%d interrupts directly connected to %d CPUs\n", |
| 323 | + priv->nr_irqs, priv->nr_idcs); |
| 324 | + |
| 325 | + return 0; |
| 326 | +} |
0 commit comments