|
| 1 | +// SPDX-License-Identifier: GPL-2.0 |
| 2 | +/* |
| 3 | + * Copyright (c) 2018-22 Raspberry Pi Ltd. |
| 4 | + * All rights reserved. |
| 5 | + */ |
| 6 | + |
| 7 | +#include <linux/clk.h> |
| 8 | +#include <linux/clkdev.h> |
| 9 | +#include <linux/clk-provider.h> |
| 10 | +#include <linux/completion.h> |
| 11 | +#include <linux/etherdevice.h> |
| 12 | +#include <linux/err.h> |
| 13 | +#include <linux/interrupt.h> |
| 14 | +#include <linux/io.h> |
| 15 | +#include <linux/irq.h> |
| 16 | +#include <linux/irqchip/chained_irq.h> |
| 17 | +#include <linux/irqdomain.h> |
| 18 | +#include <linux/mfd/core.h> |
| 19 | +#include <linux/mmc/host.h> |
| 20 | +#include <linux/module.h> |
| 21 | +#include <linux/msi.h> |
| 22 | +#include <linux/of_platform.h> |
| 23 | +#include <linux/pci.h> |
| 24 | +#include <linux/platform_device.h> |
| 25 | +#include <linux/rp1_platform.h> |
| 26 | +#include <linux/reset.h> |
| 27 | +#include <linux/slab.h> |
| 28 | + |
| 29 | +#include <dt-bindings/mfd/rp1.h> |
| 30 | + |
| 31 | +/* TO DO: |
| 32 | + * 1. Occasional shutdown crash - RP1 being closed before its children? |
| 33 | + * 2. DT mode interrupt handling. |
| 34 | + */ |
| 35 | + |
| 36 | +#define RP1_DRIVER_NAME "rp1" |
| 37 | + |
| 38 | +#define PCI_VENDOR_ID_RPI 0x1de4 |
| 39 | +#define PCI_DEVICE_ID_RP1_C0 0x0001 |
| 40 | +#define PCI_DEVICE_REV_RP1_C0 2 |
| 41 | + |
| 42 | +#define RP1_ACTUAL_IRQS RP1_INT_END |
| 43 | +#define RP1_IRQS RP1_ACTUAL_IRQS |
| 44 | + |
| 45 | +#define RP1_SYSCLK_RATE 200000000 |
| 46 | +#define RP1_SYSCLK_FPGA_RATE 60000000 |
| 47 | + |
| 48 | +// Don't want to include the whole sysinfo reg header |
| 49 | +#define SYSINFO_CHIP_ID_OFFSET 0x00000000 |
| 50 | +#define SYSINFO_PLATFORM_OFFSET 0x00000004 |
| 51 | + |
| 52 | +#define REG_RW 0x000 |
| 53 | +#define REG_SET 0x800 |
| 54 | +#define REG_CLR 0xc00 |
| 55 | + |
| 56 | +// MSIX CFG registers start at 0x8 |
| 57 | +#define MSIX_CFG(x) (0x8 + (4 * (x))) |
| 58 | + |
| 59 | +#define MSIX_CFG_IACK_EN BIT(3) |
| 60 | +#define MSIX_CFG_IACK BIT(2) |
| 61 | +#define MSIX_CFG_TEST BIT(1) |
| 62 | +#define MSIX_CFG_ENABLE BIT(0) |
| 63 | + |
| 64 | +#define INTSTATL 0x108 |
| 65 | +#define INTSTATH 0x10c |
| 66 | + |
| 67 | +struct rp1_dev { |
| 68 | + struct pci_dev *pdev; |
| 69 | + struct device *dev; |
| 70 | + resource_size_t bar_start; |
| 71 | + resource_size_t bar_end; |
| 72 | + struct clk *sys_clk; |
| 73 | + struct irq_domain *domain; |
| 74 | + struct irq_data *pcie_irqds[64]; |
| 75 | + void __iomem *msix_cfg_regs; |
| 76 | +}; |
| 77 | + |
| 78 | +static bool rp1_level_triggered_irq[RP1_ACTUAL_IRQS] = { 0 }; |
| 79 | + |
| 80 | +static struct rp1_dev *g_rp1; |
| 81 | +static u32 g_chip_id, g_platform; |
| 82 | + |
| 83 | +static void dump_bar(struct pci_dev *pdev, unsigned int bar) |
| 84 | +{ |
| 85 | + dev_info(&pdev->dev, |
| 86 | + "bar%d len 0x%llx, start 0x%llx, end 0x%llx, flags, 0x%lx\n", |
| 87 | + bar, |
| 88 | + pci_resource_len(pdev, bar), |
| 89 | + pci_resource_start(pdev, bar), |
| 90 | + pci_resource_end(pdev, bar), |
| 91 | + pci_resource_flags(pdev, bar)); |
| 92 | +} |
| 93 | + |
| 94 | +static void msix_cfg_set(struct rp1_dev *rp1, unsigned int hwirq, u32 value) |
| 95 | +{ |
| 96 | + writel(value, rp1->msix_cfg_regs + REG_SET + MSIX_CFG(hwirq)); |
| 97 | +} |
| 98 | + |
| 99 | +static void msix_cfg_clr(struct rp1_dev *rp1, unsigned int hwirq, u32 value) |
| 100 | +{ |
| 101 | + writel(value, rp1->msix_cfg_regs + REG_CLR + MSIX_CFG(hwirq)); |
| 102 | +} |
| 103 | + |
| 104 | +static void rp1_mask_irq(struct irq_data *irqd) |
| 105 | +{ |
| 106 | + struct rp1_dev *rp1 = irqd->domain->host_data; |
| 107 | + struct irq_data *pcie_irqd = rp1->pcie_irqds[irqd->hwirq]; |
| 108 | + |
| 109 | + pci_msi_mask_irq(pcie_irqd); |
| 110 | +} |
| 111 | + |
| 112 | +static void rp1_unmask_irq(struct irq_data *irqd) |
| 113 | +{ |
| 114 | + struct rp1_dev *rp1 = irqd->domain->host_data; |
| 115 | + struct irq_data *pcie_irqd = rp1->pcie_irqds[irqd->hwirq]; |
| 116 | + |
| 117 | + pci_msi_unmask_irq(pcie_irqd); |
| 118 | +} |
| 119 | + |
| 120 | +static int rp1_irq_set_type(struct irq_data *irqd, unsigned int type) |
| 121 | +{ |
| 122 | + struct rp1_dev *rp1 = irqd->domain->host_data; |
| 123 | + unsigned int hwirq = (unsigned int)irqd->hwirq; |
| 124 | + int ret = 0; |
| 125 | + |
| 126 | + switch (type) { |
| 127 | + case IRQ_TYPE_LEVEL_HIGH: |
| 128 | + dev_dbg(rp1->dev, "MSIX IACK EN for irq %d\n", hwirq); |
| 129 | + msix_cfg_set(rp1, hwirq, MSIX_CFG_IACK_EN); |
| 130 | + rp1_level_triggered_irq[hwirq] = true; |
| 131 | + break; |
| 132 | + case IRQ_TYPE_EDGE_RISING: |
| 133 | + msix_cfg_clr(rp1, hwirq, MSIX_CFG_IACK_EN); |
| 134 | + rp1_level_triggered_irq[hwirq] = false; |
| 135 | + break; |
| 136 | + default: |
| 137 | + ret = -EINVAL; |
| 138 | + break; |
| 139 | + } |
| 140 | + |
| 141 | + return ret; |
| 142 | +} |
| 143 | + |
| 144 | +static int rp1_irq_set_affinity(struct irq_data *irqd, const struct cpumask *dest, bool force) |
| 145 | +{ |
| 146 | + struct rp1_dev *rp1 = irqd->domain->host_data; |
| 147 | + struct irq_data *pcie_irqd = rp1->pcie_irqds[irqd->hwirq]; |
| 148 | + |
| 149 | + return msi_domain_set_affinity(pcie_irqd, dest, force); |
| 150 | +} |
| 151 | + |
| 152 | +static struct irq_chip rp1_irq_chip = { |
| 153 | + .name = "rp1_irq_chip", |
| 154 | + .irq_mask = rp1_mask_irq, |
| 155 | + .irq_unmask = rp1_unmask_irq, |
| 156 | + .irq_set_type = rp1_irq_set_type, |
| 157 | + .irq_set_affinity = rp1_irq_set_affinity, |
| 158 | +}; |
| 159 | + |
| 160 | +static void rp1_chained_handle_irq(struct irq_desc *desc) |
| 161 | +{ |
| 162 | + struct irq_chip *chip = irq_desc_get_chip(desc); |
| 163 | + struct rp1_dev *rp1 = desc->irq_data.chip_data; |
| 164 | + unsigned int hwirq = desc->irq_data.hwirq & 0x3f; |
| 165 | + int new_irq; |
| 166 | + |
| 167 | + rp1 = g_rp1; |
| 168 | + |
| 169 | + chained_irq_enter(chip, desc); |
| 170 | + |
| 171 | + new_irq = irq_linear_revmap(rp1->domain, hwirq); |
| 172 | + generic_handle_irq(new_irq); |
| 173 | + if (rp1_level_triggered_irq[hwirq]) |
| 174 | + msix_cfg_set(rp1, hwirq, MSIX_CFG_IACK); |
| 175 | + |
| 176 | + chained_irq_exit(chip, desc); |
| 177 | +} |
| 178 | + |
| 179 | +static int rp1_irq_xlate(struct irq_domain *d, struct device_node *node, |
| 180 | + const u32 *intspec, unsigned int intsize, |
| 181 | + unsigned long *out_hwirq, unsigned int *out_type) |
| 182 | +{ |
| 183 | + struct rp1_dev *rp1 = d->host_data; |
| 184 | + struct irq_data *pcie_irqd; |
| 185 | + unsigned long hwirq; |
| 186 | + int pcie_irq; |
| 187 | + int ret; |
| 188 | + |
| 189 | + ret = irq_domain_xlate_twocell(d, node, intspec, intsize, |
| 190 | + &hwirq, out_type); |
| 191 | + if (!ret) { |
| 192 | + pcie_irq = pci_irq_vector(rp1->pdev, hwirq); |
| 193 | + pcie_irqd = irq_get_irq_data(pcie_irq); |
| 194 | + rp1->pcie_irqds[hwirq] = pcie_irqd; |
| 195 | + *out_hwirq = hwirq; |
| 196 | + } |
| 197 | + return ret; |
| 198 | +} |
| 199 | + |
| 200 | +static int rp1_irq_activate(struct irq_domain *d, struct irq_data *irqd, |
| 201 | + bool reserve) |
| 202 | +{ |
| 203 | + struct rp1_dev *rp1 = d->host_data; |
| 204 | + struct irq_data *pcie_irqd; |
| 205 | + |
| 206 | + pcie_irqd = rp1->pcie_irqds[irqd->hwirq]; |
| 207 | + msix_cfg_set(rp1, (unsigned int)irqd->hwirq, MSIX_CFG_ENABLE); |
| 208 | + return irq_domain_activate_irq(pcie_irqd, reserve); |
| 209 | +} |
| 210 | + |
| 211 | +static void rp1_irq_deactivate(struct irq_domain *d, struct irq_data *irqd) |
| 212 | +{ |
| 213 | + struct rp1_dev *rp1 = d->host_data; |
| 214 | + struct irq_data *pcie_irqd; |
| 215 | + |
| 216 | + pcie_irqd = rp1->pcie_irqds[irqd->hwirq]; |
| 217 | + msix_cfg_clr(rp1, (unsigned int)irqd->hwirq, MSIX_CFG_ENABLE); |
| 218 | + return irq_domain_deactivate_irq(pcie_irqd); |
| 219 | +} |
| 220 | + |
| 221 | +static const struct irq_domain_ops rp1_domain_ops = { |
| 222 | + .xlate = rp1_irq_xlate, |
| 223 | + .activate = rp1_irq_activate, |
| 224 | + .deactivate = rp1_irq_deactivate, |
| 225 | +}; |
| 226 | + |
| 227 | +static inline dma_addr_t rp1_io_to_phys(struct rp1_dev *rp1, unsigned int offset) |
| 228 | +{ |
| 229 | + return rp1->bar_start + offset; |
| 230 | +} |
| 231 | + |
| 232 | +static u32 rp1_reg_read(struct rp1_dev *rp1, unsigned int base_addr, u32 offset) |
| 233 | +{ |
| 234 | + dma_addr_t phys = rp1_io_to_phys(rp1, base_addr); |
| 235 | + void __iomem *regblock = ioremap(phys, 0x1000); |
| 236 | + u32 value = readl(regblock + offset); |
| 237 | + |
| 238 | + iounmap(regblock); |
| 239 | + return value; |
| 240 | +} |
| 241 | + |
| 242 | +void rp1_get_platform(u32 *chip_id, u32 *platform) |
| 243 | +{ |
| 244 | + if (chip_id) |
| 245 | + *chip_id = g_chip_id; |
| 246 | + if (platform) |
| 247 | + *platform = g_platform; |
| 248 | +} |
| 249 | +EXPORT_SYMBOL_GPL(rp1_get_platform); |
| 250 | + |
| 251 | +static int rp1_probe(struct pci_dev *pdev, const struct pci_device_id *id) |
| 252 | +{ |
| 253 | + struct reset_control *reset; |
| 254 | + struct platform_device *pcie_pdev; |
| 255 | + struct device_node *rp1_node; |
| 256 | + struct rp1_dev *rp1; |
| 257 | + int err = 0; |
| 258 | + int i; |
| 259 | + |
| 260 | + reset = devm_reset_control_get_optional_exclusive(&pdev->dev, NULL); |
| 261 | + if (IS_ERR(reset)) |
| 262 | + return PTR_ERR(reset); |
| 263 | + reset_control_reset(reset); |
| 264 | + |
| 265 | + dump_bar(pdev, 0); |
| 266 | + dump_bar(pdev, 1); |
| 267 | + |
| 268 | + if (pci_resource_len(pdev, 1) <= 0x10000) { |
| 269 | + dev_err(&pdev->dev, |
| 270 | + "Not initialised - is the firmware running?\n"); |
| 271 | + return -EINVAL; |
| 272 | + } |
| 273 | + |
| 274 | + /* enable pci device */ |
| 275 | + err = pcim_enable_device(pdev); |
| 276 | + if (err < 0) { |
| 277 | + dev_err(&pdev->dev, "Enabling PCI device has failed: %d", |
| 278 | + err); |
| 279 | + return err; |
| 280 | + } |
| 281 | + |
| 282 | + pci_set_master(pdev); |
| 283 | + |
| 284 | + err = pci_alloc_irq_vectors(pdev, RP1_IRQS, RP1_IRQS, |
| 285 | + PCI_IRQ_MSIX); |
| 286 | + if (err != RP1_IRQS) { |
| 287 | + dev_err(&pdev->dev, "pci_alloc_irq_vectors failed - %d\n", err); |
| 288 | + return err; |
| 289 | + } |
| 290 | + |
| 291 | + rp1 = devm_kzalloc(&pdev->dev, sizeof(*rp1), GFP_KERNEL); |
| 292 | + if (!rp1) |
| 293 | + return -ENOMEM; |
| 294 | + |
| 295 | + rp1->pdev = pdev; |
| 296 | + rp1->dev = &pdev->dev; |
| 297 | + |
| 298 | + pci_set_drvdata(pdev, rp1); |
| 299 | + |
| 300 | + rp1->bar_start = pci_resource_start(pdev, 1); |
| 301 | + rp1->bar_end = pci_resource_end(pdev, 1); |
| 302 | + |
| 303 | + // Get chip id |
| 304 | + g_chip_id = rp1_reg_read(rp1, RP1_SYSINFO_BASE, SYSINFO_CHIP_ID_OFFSET); |
| 305 | + g_platform = rp1_reg_read(rp1, RP1_SYSINFO_BASE, SYSINFO_PLATFORM_OFFSET); |
| 306 | + dev_info(&pdev->dev, "chip_id 0x%x%s\n", g_chip_id, |
| 307 | + (g_platform & RP1_PLATFORM_FPGA) ? " FPGA" : ""); |
| 308 | + if (g_chip_id != RP1_C0_CHIP_ID) { |
| 309 | + dev_err(&pdev->dev, "wrong chip id (%x)\n", g_chip_id); |
| 310 | + return -EINVAL; |
| 311 | + } |
| 312 | + |
| 313 | + rp1_node = of_find_node_by_name(NULL, "rp1"); |
| 314 | + if (!rp1_node) { |
| 315 | + dev_err(&pdev->dev, "failed to find RP1 DT node\n"); |
| 316 | + return -EINVAL; |
| 317 | + } |
| 318 | + |
| 319 | + pcie_pdev = of_find_device_by_node(rp1_node->parent); |
| 320 | + rp1->domain = irq_domain_add_linear(rp1_node, RP1_IRQS, |
| 321 | + &rp1_domain_ops, rp1); |
| 322 | + |
| 323 | + g_rp1 = rp1; |
| 324 | + |
| 325 | + /* TODO can this go in the rp1 device tree entry? */ |
| 326 | + rp1->msix_cfg_regs = ioremap(rp1_io_to_phys(rp1, RP1_PCIE_APBS_BASE), 0x1000); |
| 327 | + |
| 328 | + for (i = 0; i < RP1_IRQS; i++) { |
| 329 | + int irq = irq_create_mapping(rp1->domain, i); |
| 330 | + |
| 331 | + if (irq < 0) { |
| 332 | + dev_err(&pdev->dev, "failed to create irq mapping\n"); |
| 333 | + return irq; |
| 334 | + } |
| 335 | + |
| 336 | + irq_set_chip_data(irq, rp1); |
| 337 | + irq_set_chip_and_handler(irq, &rp1_irq_chip, handle_level_irq); |
| 338 | + irq_set_probe(irq); |
| 339 | + irq_set_chained_handler(pci_irq_vector(pdev, i), |
| 340 | + rp1_chained_handle_irq); |
| 341 | + } |
| 342 | + |
| 343 | + if (rp1_node) |
| 344 | + of_platform_populate(rp1_node, NULL, NULL, &pcie_pdev->dev); |
| 345 | + |
| 346 | + of_node_put(rp1_node); |
| 347 | + |
| 348 | + return 0; |
| 349 | +} |
| 350 | + |
| 351 | +static void rp1_remove(struct pci_dev *pdev) |
| 352 | +{ |
| 353 | + struct rp1_dev *rp1 = pci_get_drvdata(pdev); |
| 354 | + |
| 355 | + mfd_remove_devices(&pdev->dev); |
| 356 | + |
| 357 | + clk_unregister(rp1->sys_clk); |
| 358 | +} |
| 359 | + |
| 360 | +static const struct pci_device_id dev_id_table[] = { |
| 361 | + { PCI_DEVICE(PCI_VENDOR_ID_RPI, PCI_DEVICE_ID_RP1_C0), }, |
| 362 | + { 0, } |
| 363 | +}; |
| 364 | + |
| 365 | +static struct pci_driver rp1_driver = { |
| 366 | + .name = RP1_DRIVER_NAME, |
| 367 | + .id_table = dev_id_table, |
| 368 | + .probe = rp1_probe, |
| 369 | + .remove = rp1_remove, |
| 370 | +}; |
| 371 | + |
| 372 | +module_pci_driver(rp1_driver); |
| 373 | + |
| 374 | +MODULE_AUTHOR( "Phil Elwell <[email protected]>"); |
| 375 | +MODULE_DESCRIPTION("RP1 wrapper"); |
| 376 | +MODULE_LICENSE("GPL"); |
0 commit comments