|
123 | 123 | #define CR2_RECINVSID (1 << 1)
|
124 | 124 | #define CR2_E2H (1 << 0)
|
125 | 125 |
|
| 126 | +#define ARM_SMMU_GBPA 0x44 |
| 127 | +#define GBPA_ABORT (1 << 20) |
| 128 | +#define GBPA_UPDATE (1 << 31) |
| 129 | + |
126 | 130 | #define ARM_SMMU_IRQ_CTRL 0x50
|
127 | 131 | #define IRQ_CTRL_EVTQ_IRQEN (1 << 2)
|
128 | 132 | #define IRQ_CTRL_PRIQ_IRQEN (1 << 1)
|
@@ -2124,6 +2128,24 @@ static int arm_smmu_write_reg_sync(struct arm_smmu_device *smmu, u32 val,
|
2124 | 2128 | 1, ARM_SMMU_POLL_TIMEOUT_US);
|
2125 | 2129 | }
|
2126 | 2130 |
|
| 2131 | +/* GBPA is "special" */ |
| 2132 | +static int arm_smmu_update_gbpa(struct arm_smmu_device *smmu, u32 set, u32 clr) |
| 2133 | +{ |
| 2134 | + int ret; |
| 2135 | + u32 reg, __iomem *gbpa = smmu->base + ARM_SMMU_GBPA; |
| 2136 | + |
| 2137 | + ret = readl_relaxed_poll_timeout(gbpa, reg, !(reg & GBPA_UPDATE), |
| 2138 | + 1, ARM_SMMU_POLL_TIMEOUT_US); |
| 2139 | + if (ret) |
| 2140 | + return ret; |
| 2141 | + |
| 2142 | + reg &= ~clr; |
| 2143 | + reg |= set; |
| 2144 | + writel_relaxed(reg | GBPA_UPDATE, gbpa); |
| 2145 | + return readl_relaxed_poll_timeout(gbpa, reg, !(reg & GBPA_UPDATE), |
| 2146 | + 1, ARM_SMMU_POLL_TIMEOUT_US); |
| 2147 | +} |
| 2148 | + |
2127 | 2149 | static void arm_smmu_free_msis(void *data)
|
2128 | 2150 | {
|
2129 | 2151 | struct device *dev = data;
|
@@ -2269,7 +2291,7 @@ static int arm_smmu_device_disable(struct arm_smmu_device *smmu)
|
2269 | 2291 | return ret;
|
2270 | 2292 | }
|
2271 | 2293 |
|
2272 |
| -static int arm_smmu_device_reset(struct arm_smmu_device *smmu) |
| 2294 | +static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass) |
2273 | 2295 | {
|
2274 | 2296 | int ret;
|
2275 | 2297 | u32 reg, enables;
|
@@ -2370,8 +2392,17 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu)
|
2370 | 2392 | return ret;
|
2371 | 2393 | }
|
2372 | 2394 |
|
2373 |
| - /* Enable the SMMU interface */ |
2374 |
| - enables |= CR0_SMMUEN; |
| 2395 | + |
| 2396 | + /* Enable the SMMU interface, or ensure bypass */ |
| 2397 | + if (!bypass || disable_bypass) { |
| 2398 | + enables |= CR0_SMMUEN; |
| 2399 | + } else { |
| 2400 | + ret = arm_smmu_update_gbpa(smmu, 0, GBPA_ABORT); |
| 2401 | + if (ret) { |
| 2402 | + dev_err(smmu->dev, "GBPA not responding to update\n"); |
| 2403 | + return ret; |
| 2404 | + } |
| 2405 | + } |
2375 | 2406 | ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0,
|
2376 | 2407 | ARM_SMMU_CR0ACK);
|
2377 | 2408 | if (ret) {
|
@@ -2570,6 +2601,15 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
|
2570 | 2601 | struct resource *res;
|
2571 | 2602 | struct arm_smmu_device *smmu;
|
2572 | 2603 | struct device *dev = &pdev->dev;
|
| 2604 | + bool bypass = true; |
| 2605 | + u32 cells; |
| 2606 | + |
| 2607 | + if (of_property_read_u32(dev->of_node, "#iommu-cells", &cells)) |
| 2608 | + dev_err(dev, "missing #iommu-cells property\n"); |
| 2609 | + else if (cells != 1) |
| 2610 | + dev_err(dev, "invalid #iommu-cells value (%d)\n", cells); |
| 2611 | + else |
| 2612 | + bypass = false; |
2573 | 2613 |
|
2574 | 2614 | smmu = devm_kzalloc(dev, sizeof(*smmu), GFP_KERNEL);
|
2575 | 2615 | if (!smmu) {
|
@@ -2622,7 +2662,7 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
|
2622 | 2662 | platform_set_drvdata(pdev, smmu);
|
2623 | 2663 |
|
2624 | 2664 | /* Reset the device */
|
2625 |
| - return arm_smmu_device_reset(smmu); |
| 2665 | + return arm_smmu_device_reset(smmu, bypass); |
2626 | 2666 | }
|
2627 | 2667 |
|
2628 | 2668 | static int arm_smmu_device_remove(struct platform_device *pdev)
|
|
0 commit comments