2 * Copyright 2007, Michael Ellerman, IBM Corporation.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
11 #include <linux/interrupt.h>
12 #include <linux/irq.h>
13 #include <linux/kernel.h>
14 #include <linux/pci.h>
15 #include <linux/msi.h>
16 #include <linux/of_platform.h>
17 #include <linux/debugfs.h>
18 #include <linux/slab.h>
21 #include <asm/machdep.h>
26 * MSIC registers, specified as offsets from dcr_base
28 #define MSIC_CTRL_REG 0x0
30 /* Base Address registers specify FIFO location in BE memory */
31 #define MSIC_BASE_ADDR_HI_REG 0x3
32 #define MSIC_BASE_ADDR_LO_REG 0x4
34 /* Hold the read/write offsets into the FIFO */
35 #define MSIC_READ_OFFSET_REG 0x5
36 #define MSIC_WRITE_OFFSET_REG 0x6
39 /* MSIC control register flags */
40 #define MSIC_CTRL_ENABLE 0x0001
41 #define MSIC_CTRL_FIFO_FULL_ENABLE 0x0002
42 #define MSIC_CTRL_IRQ_ENABLE 0x0008
43 #define MSIC_CTRL_FULL_STOP_ENABLE 0x0010
46 * The MSIC can be configured to use a FIFO of 32KB, 64KB, 128KB or 256KB.
47 * Currently we're using a 64KB FIFO size.
49 #define MSIC_FIFO_SIZE_SHIFT 16
50 #define MSIC_FIFO_SIZE_BYTES (1 << MSIC_FIFO_SIZE_SHIFT)
53 * To configure the FIFO size as (1 << n) bytes, we write (n - 15) into bits
54 * 8-9 of the MSIC control reg.
56 #define MSIC_CTRL_FIFO_SIZE (((MSIC_FIFO_SIZE_SHIFT - 15) << 8) & 0x300)
59 * We need to mask the read/write offsets to make sure they stay within
60 * the bounds of the FIFO. Also they should always be 16-byte aligned.
62 #define MSIC_FIFO_SIZE_MASK ((MSIC_FIFO_SIZE_BYTES - 1) & ~0xFu)
64 /* Each entry in the FIFO is 16 bytes, the first 4 bytes hold the irq # */
65 #define MSIC_FIFO_ENTRY_SIZE 0x10
69 struct irq_host *irq_host;
80 void axon_msi_debug_setup(struct device_node *dn, struct axon_msic *msic);
82 static inline void axon_msi_debug_setup(struct device_node *dn,
83 struct axon_msic *msic) { }
87 static void msic_dcr_write(struct axon_msic *msic, unsigned int dcr_n, u32 val)
89 pr_devel("axon_msi: dcr_write(0x%x, 0x%x)\n", val, dcr_n);
91 dcr_write(msic->dcr_host, dcr_n, val);
94 static void axon_msi_cascade(unsigned int irq, struct irq_desc *desc)
96 struct irq_chip *chip = irq_desc_get_chip(desc);
97 struct axon_msic *msic = irq_get_handler_data(irq);
98 u32 write_offset, msi;
102 write_offset = dcr_read(msic->dcr_host, MSIC_WRITE_OFFSET_REG);
103 pr_devel("axon_msi: original write_offset 0x%x\n", write_offset);
105 /* write_offset doesn't wrap properly, so we have to mask it */
106 write_offset &= MSIC_FIFO_SIZE_MASK;
108 while (msic->read_offset != write_offset && retry < 100) {
109 idx = msic->read_offset / sizeof(__le32);
110 msi = le32_to_cpu(msic->fifo_virt[idx]);
113 pr_devel("axon_msi: woff %x roff %x msi %x\n",
114 write_offset, msic->read_offset, msi);
116 if (msi < NR_IRQS && irq_map[msi].host == msic->irq_host) {
117 generic_handle_irq(msi);
118 msic->fifo_virt[idx] = cpu_to_le32(0xffffffff);
121 * Reading the MSIC_WRITE_OFFSET_REG does not
122 * reliably flush the outstanding DMA to the
123 * FIFO buffer. Here we were reading stale
124 * data, so we need to retry.
128 pr_devel("axon_msi: invalid irq 0x%x!\n", msi);
133 pr_devel("axon_msi: late irq 0x%x, retry %d\n",
138 msic->read_offset += MSIC_FIFO_ENTRY_SIZE;
139 msic->read_offset &= MSIC_FIFO_SIZE_MASK;
143 printk(KERN_WARNING "axon_msi: irq timed out\n");
145 msic->read_offset += MSIC_FIFO_ENTRY_SIZE;
146 msic->read_offset &= MSIC_FIFO_SIZE_MASK;
149 chip->irq_eoi(&desc->irq_data);
152 static struct axon_msic *find_msi_translator(struct pci_dev *dev)
154 struct irq_host *irq_host;
155 struct device_node *dn, *tmp;
157 struct axon_msic *msic = NULL;
159 dn = of_node_get(pci_device_to_OF_node(dev));
161 dev_dbg(&dev->dev, "axon_msi: no pci_dn found\n");
165 for (; dn; dn = of_get_next_parent(dn)) {
166 ph = of_get_property(dn, "msi-translator", NULL);
173 "axon_msi: no msi-translator property found\n");
178 dn = of_find_node_by_phandle(*ph);
182 "axon_msi: msi-translator doesn't point to a node\n");
186 irq_host = irq_find_host(dn);
188 dev_dbg(&dev->dev, "axon_msi: no irq_host found for node %s\n",
193 msic = irq_host->host_data;
201 static int axon_msi_check_device(struct pci_dev *dev, int nvec, int type)
203 if (!find_msi_translator(dev))
209 static int setup_msi_msg_address(struct pci_dev *dev, struct msi_msg *msg)
211 struct device_node *dn;
212 struct msi_desc *entry;
216 dn = of_node_get(pci_device_to_OF_node(dev));
218 dev_dbg(&dev->dev, "axon_msi: no pci_dn found\n");
222 entry = list_first_entry(&dev->msi_list, struct msi_desc, list);
224 for (; dn; dn = of_get_next_parent(dn)) {
225 if (entry->msi_attrib.is_64) {
226 prop = of_get_property(dn, "msi-address-64", &len);
231 prop = of_get_property(dn, "msi-address-32", &len);
238 "axon_msi: no msi-address-(32|64) properties found\n");
244 msg->address_hi = prop[0];
245 msg->address_lo = prop[1];
249 msg->address_lo = prop[0];
253 "axon_msi: malformed msi-address-(32|64) property\n");
263 static int axon_msi_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
265 unsigned int virq, rc;
266 struct msi_desc *entry;
268 struct axon_msic *msic;
270 msic = find_msi_translator(dev);
274 rc = setup_msi_msg_address(dev, &msg);
278 /* We rely on being able to stash a virq in a u16 */
279 BUILD_BUG_ON(NR_IRQS > 65536);
281 list_for_each_entry(entry, &dev->msi_list, list) {
282 virq = irq_create_direct_mapping(msic->irq_host);
283 if (virq == NO_IRQ) {
285 "axon_msi: virq allocation failed!\n");
288 dev_dbg(&dev->dev, "axon_msi: allocated virq 0x%x\n", virq);
290 irq_set_msi_desc(virq, entry);
292 write_msi_msg(virq, &msg);
298 static void axon_msi_teardown_msi_irqs(struct pci_dev *dev)
300 struct msi_desc *entry;
302 dev_dbg(&dev->dev, "axon_msi: tearing down msi irqs\n");
304 list_for_each_entry(entry, &dev->msi_list, list) {
305 if (entry->irq == NO_IRQ)
308 irq_set_msi_desc(entry->irq, NULL);
309 irq_dispose_mapping(entry->irq);
313 static struct irq_chip msic_irq_chip = {
314 .irq_mask = mask_msi_irq,
315 .irq_unmask = unmask_msi_irq,
316 .irq_shutdown = mask_msi_irq,
320 static int msic_host_map(struct irq_host *h, unsigned int virq,
323 irq_set_chip_and_handler(virq, &msic_irq_chip, handle_simple_irq);
328 static struct irq_host_ops msic_host_ops = {
329 .map = msic_host_map,
332 static void axon_msi_shutdown(struct platform_device *device)
334 struct axon_msic *msic = dev_get_drvdata(&device->dev);
337 pr_devel("axon_msi: disabling %s\n",
338 msic->irq_host->of_node->full_name);
339 tmp = dcr_read(msic->dcr_host, MSIC_CTRL_REG);
340 tmp &= ~MSIC_CTRL_ENABLE & ~MSIC_CTRL_IRQ_ENABLE;
341 msic_dcr_write(msic, MSIC_CTRL_REG, tmp);
344 static int axon_msi_probe(struct platform_device *device)
346 struct device_node *dn = device->dev.of_node;
347 struct axon_msic *msic;
349 int dcr_base, dcr_len;
351 pr_devel("axon_msi: setting up dn %s\n", dn->full_name);
353 msic = kzalloc(sizeof(struct axon_msic), GFP_KERNEL);
355 printk(KERN_ERR "axon_msi: couldn't allocate msic for %s\n",
360 dcr_base = dcr_resource_start(dn, 0);
361 dcr_len = dcr_resource_len(dn, 0);
363 if (dcr_base == 0 || dcr_len == 0) {
365 "axon_msi: couldn't parse dcr properties on %s\n",
370 msic->dcr_host = dcr_map(dn, dcr_base, dcr_len);
371 if (!DCR_MAP_OK(msic->dcr_host)) {
372 printk(KERN_ERR "axon_msi: dcr_map failed for %s\n",
377 msic->fifo_virt = dma_alloc_coherent(&device->dev, MSIC_FIFO_SIZE_BYTES,
378 &msic->fifo_phys, GFP_KERNEL);
379 if (!msic->fifo_virt) {
380 printk(KERN_ERR "axon_msi: couldn't allocate fifo for %s\n",
385 virq = irq_of_parse_and_map(dn, 0);
386 if (virq == NO_IRQ) {
387 printk(KERN_ERR "axon_msi: irq parse and map failed for %s\n",
391 memset(msic->fifo_virt, 0xff, MSIC_FIFO_SIZE_BYTES);
393 msic->irq_host = irq_alloc_host(dn, IRQ_HOST_MAP_NOMAP,
394 NR_IRQS, &msic_host_ops, 0);
395 if (!msic->irq_host) {
396 printk(KERN_ERR "axon_msi: couldn't allocate irq_host for %s\n",
401 msic->irq_host->host_data = msic;
403 irq_set_handler_data(virq, msic);
404 irq_set_chained_handler(virq, axon_msi_cascade);
405 pr_devel("axon_msi: irq 0x%x setup for axon_msi\n", virq);
407 /* Enable the MSIC hardware */
408 msic_dcr_write(msic, MSIC_BASE_ADDR_HI_REG, msic->fifo_phys >> 32);
409 msic_dcr_write(msic, MSIC_BASE_ADDR_LO_REG,
410 msic->fifo_phys & 0xFFFFFFFF);
411 msic_dcr_write(msic, MSIC_CTRL_REG,
412 MSIC_CTRL_IRQ_ENABLE | MSIC_CTRL_ENABLE |
413 MSIC_CTRL_FIFO_SIZE);
415 msic->read_offset = dcr_read(msic->dcr_host, MSIC_WRITE_OFFSET_REG)
416 & MSIC_FIFO_SIZE_MASK;
418 dev_set_drvdata(&device->dev, msic);
420 ppc_md.setup_msi_irqs = axon_msi_setup_msi_irqs;
421 ppc_md.teardown_msi_irqs = axon_msi_teardown_msi_irqs;
422 ppc_md.msi_check_device = axon_msi_check_device;
424 axon_msi_debug_setup(dn, msic);
426 printk(KERN_DEBUG "axon_msi: setup MSIC on %s\n", dn->full_name);
431 dma_free_coherent(&device->dev, MSIC_FIFO_SIZE_BYTES, msic->fifo_virt,
440 static const struct of_device_id axon_msi_device_id[] = {
442 .compatible = "ibm,axon-msic"
447 static struct platform_driver axon_msi_driver = {
448 .probe = axon_msi_probe,
449 .shutdown = axon_msi_shutdown,
452 .owner = THIS_MODULE,
453 .of_match_table = axon_msi_device_id,
457 static int __init axon_msi_init(void)
459 return platform_driver_register(&axon_msi_driver);
461 subsys_initcall(axon_msi_init);
465 static int msic_set(void *data, u64 val)
467 struct axon_msic *msic = data;
468 out_le32(msic->trigger, val);
472 static int msic_get(void *data, u64 *val)
478 DEFINE_SIMPLE_ATTRIBUTE(fops_msic, msic_get, msic_set, "%llu\n");
480 void axon_msi_debug_setup(struct device_node *dn, struct axon_msic *msic)
485 addr = of_translate_address(dn, of_get_property(dn, "reg", NULL));
486 if (addr == OF_BAD_ADDR) {
487 pr_devel("axon_msi: couldn't translate reg property\n");
491 msic->trigger = ioremap(addr, 0x4);
492 if (!msic->trigger) {
493 pr_devel("axon_msi: ioremap failed\n");
497 snprintf(name, sizeof(name), "msic_%d", of_node_to_nid(dn));
499 if (!debugfs_create_file(name, 0600, powerpc_debugfs_root,
501 pr_devel("axon_msi: debugfs_create_file failed!\n");