1/* 2 * Broadcom specific AMBA 3 * PCI Host 4 * 5 * Licensed under the GNU/GPL. See COPYING for details. 6 */ 7 8#include "bcma_private.h" 9#include <linux/slab.h> 10#include <linux/bcma/bcma.h> 11#include <linux/pci.h> 12#include <linux/module.h> 13 14static void bcma_host_pci_switch_core(struct bcma_device *core) 15{ 16 pci_write_config_dword(core->bus->host_pci, BCMA_PCI_BAR0_WIN, 17 core->addr); 18 pci_write_config_dword(core->bus->host_pci, BCMA_PCI_BAR0_WIN2, 19 core->wrap); 20 core->bus->mapped_core = core; 21 pr_debug("Switched to core: 0x%X\n", core->id.id); 22} 23 24/* Provides access to the requested core. Returns base offset that has to be 25 * used. It makes use of fixed windows when possible. */ 26static u16 bcma_host_pci_provide_access_to_core(struct bcma_device *core) 27{ 28 switch (core->id.id) { 29 case BCMA_CORE_CHIPCOMMON: 30 return 3 * BCMA_CORE_SIZE; 31 case BCMA_CORE_PCIE: 32 return 2 * BCMA_CORE_SIZE; 33 } 34 35 if (core->bus->mapped_core != core) 36 bcma_host_pci_switch_core(core); 37 return 0; 38} 39 40static u8 bcma_host_pci_read8(struct bcma_device *core, u16 offset) 41{ 42 offset += bcma_host_pci_provide_access_to_core(core); 43 return ioread8(core->bus->mmio + offset); 44} 45 46static u16 bcma_host_pci_read16(struct bcma_device *core, u16 offset) 47{ 48 offset += bcma_host_pci_provide_access_to_core(core); 49 return ioread16(core->bus->mmio + offset); 50} 51 52static u32 bcma_host_pci_read32(struct bcma_device *core, u16 offset) 53{ 54 offset += bcma_host_pci_provide_access_to_core(core); 55 return ioread32(core->bus->mmio + offset); 56} 57 58static void bcma_host_pci_write8(struct bcma_device *core, u16 offset, 59 u8 value) 60{ 61 offset += bcma_host_pci_provide_access_to_core(core); 62 iowrite8(value, core->bus->mmio + offset); 63} 64 65static void bcma_host_pci_write16(struct bcma_device *core, u16 offset, 66 u16 value) 67{ 68 offset += bcma_host_pci_provide_access_to_core(core); 69 iowrite16(value, core->bus->mmio + offset); 70} 71 72static void bcma_host_pci_write32(struct bcma_device *core, u16 offset, 73 u32 value) 74{ 75 offset += bcma_host_pci_provide_access_to_core(core); 76 iowrite32(value, core->bus->mmio + offset); 77} 78 79#ifdef CONFIG_BCMA_BLOCKIO 80void bcma_host_pci_block_read(struct bcma_device *core, void *buffer, 81 size_t count, u16 offset, u8 reg_width) 82{ 83 void __iomem *addr = core->bus->mmio + offset; 84 if (core->bus->mapped_core != core) 85 bcma_host_pci_switch_core(core); 86 switch (reg_width) { 87 case sizeof(u8): 88 ioread8_rep(addr, buffer, count); 89 break; 90 case sizeof(u16): 91 WARN_ON(count & 1); 92 ioread16_rep(addr, buffer, count >> 1); 93 break; 94 case sizeof(u32): 95 WARN_ON(count & 3); 96 ioread32_rep(addr, buffer, count >> 2); 97 break; 98 default: 99 WARN_ON(1); 100 } 101} 102 103void bcma_host_pci_block_write(struct bcma_device *core, const void *buffer, 104 size_t count, u16 offset, u8 reg_width) 105{ 106 void __iomem *addr = core->bus->mmio + offset; 107 if (core->bus->mapped_core != core) 108 bcma_host_pci_switch_core(core); 109 switch (reg_width) { 110 case sizeof(u8): 111 iowrite8_rep(addr, buffer, count); 112 break; 113 case sizeof(u16): 114 WARN_ON(count & 1); 115 iowrite16_rep(addr, buffer, count >> 1); 116 break; 117 case sizeof(u32): 118 WARN_ON(count & 3); 119 iowrite32_rep(addr, buffer, count >> 2); 120 break; 121 default: 122 WARN_ON(1); 123 } 124} 125#endif 126 127static u32 bcma_host_pci_aread32(struct bcma_device *core, u16 offset) 128{ 129 if (core->bus->mapped_core != core) 130 bcma_host_pci_switch_core(core); 131 return ioread32(core->bus->mmio + (1 * BCMA_CORE_SIZE) + offset); 132} 133 134static void bcma_host_pci_awrite32(struct bcma_device *core, u16 offset, 135 u32 value) 136{ 137 if (core->bus->mapped_core != core) 138 bcma_host_pci_switch_core(core); 139 iowrite32(value, core->bus->mmio + (1 * BCMA_CORE_SIZE) + offset); 140} 141 142const struct bcma_host_ops bcma_host_pci_ops = { 143 .read8 = bcma_host_pci_read8, 144 .read16 = bcma_host_pci_read16, 145 .read32 = bcma_host_pci_read32, 146 .write8 = bcma_host_pci_write8, 147 .write16 = bcma_host_pci_write16, 148 .write32 = bcma_host_pci_write32, 149#ifdef CONFIG_BCMA_BLOCKIO 150 .block_read = bcma_host_pci_block_read, 151 .block_write = bcma_host_pci_block_write, 152#endif 153 .aread32 = bcma_host_pci_aread32, 154 .awrite32 = bcma_host_pci_awrite32, 155}; 156 157static int __devinit bcma_host_pci_probe(struct pci_dev *dev, 158 const struct pci_device_id *id) 159{ 160 struct bcma_bus *bus; 161 int err = -ENOMEM; 162 const char *name; 163 u32 val; 164 165 /* Alloc */ 166 bus = kzalloc(sizeof(*bus), GFP_KERNEL); 167 if (!bus) 168 goto out; 169 170 /* Basic PCI configuration */ 171 err = pci_enable_device(dev); 172 if (err) 173 goto err_kfree_bus; 174 175 name = dev_name(&dev->dev); 176 if (dev->driver && dev->driver->name) 177 name = dev->driver->name; 178 err = pci_request_regions(dev, name); 179 if (err) 180 goto err_pci_disable; 181 pci_set_master(dev); 182 183 /* Disable the RETRY_TIMEOUT register (0x41) to keep 184 * PCI Tx retries from interfering with C3 CPU state */ 185 pci_read_config_dword(dev, 0x40, &val); 186 if ((val & 0x0000ff00) != 0) 187 pci_write_config_dword(dev, 0x40, val & 0xffff00ff); 188 189 /* SSB needed additional powering up, do we have any AMBA PCI cards? */ 190 if (!pci_is_pcie(dev)) 191 pr_err("PCI card detected, report problems.\n"); 192 193 /* Map MMIO */ 194 err = -ENOMEM; 195 bus->mmio = pci_iomap(dev, 0, ~0UL); 196 if (!bus->mmio) 197 goto err_pci_release_regions; 198 199 /* Host specific */ 200 bus->host_pci = dev; 201 bus->hosttype = BCMA_HOSTTYPE_PCI; 202 bus->ops = &bcma_host_pci_ops; 203 204 /* Register */ 205 err = bcma_bus_register(bus); 206 if (err) 207 goto err_pci_unmap_mmio; 208 209 pci_set_drvdata(dev, bus); 210 211out: 212 return err; 213 214err_pci_unmap_mmio: 215 pci_iounmap(dev, bus->mmio); 216err_pci_release_regions: 217 pci_release_regions(dev); 218err_pci_disable: 219 pci_disable_device(dev); 220err_kfree_bus: 221 kfree(bus); 222 return err; 223} 224 225static void bcma_host_pci_remove(struct pci_dev *dev) 226{ 227 struct bcma_bus *bus = pci_get_drvdata(dev); 228 229 bcma_bus_unregister(bus); 230 pci_iounmap(dev, bus->mmio); 231 pci_release_regions(dev); 232 pci_disable_device(dev); 233 kfree(bus); 234 pci_set_drvdata(dev, NULL); 235} 236 237#ifdef CONFIG_PM 238static int bcma_host_pci_suspend(struct device *dev) 239{ 240 struct pci_dev *pdev = to_pci_dev(dev); 241 struct bcma_bus *bus = pci_get_drvdata(pdev); 242 243 bus->mapped_core = NULL; 244 245 return bcma_bus_suspend(bus); 246} 247 248static int bcma_host_pci_resume(struct device *dev) 249{ 250 struct pci_dev *pdev = to_pci_dev(dev); 251 struct bcma_bus *bus = pci_get_drvdata(pdev); 252 253 return bcma_bus_resume(bus); 254} 255 256static SIMPLE_DEV_PM_OPS(bcma_pm_ops, bcma_host_pci_suspend, 257 bcma_host_pci_resume); 258#define BCMA_PM_OPS (&bcma_pm_ops) 259 260#else /* CONFIG_PM */ 261 262#define BCMA_PM_OPS NULL 263 264#endif /* CONFIG_PM */ 265 266static DEFINE_PCI_DEVICE_TABLE(bcma_pci_bridge_tbl) = { 267 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x0576) }, 268 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4331) }, 269 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4353) }, 270 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4357) }, 271 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4727) }, 272 { 0, }, 273}; 274MODULE_DEVICE_TABLE(pci, bcma_pci_bridge_tbl); 275 276static struct pci_driver bcma_pci_bridge_driver = { 277 .name = "bcma-pci-bridge", 278 .id_table = bcma_pci_bridge_tbl, 279 .probe = bcma_host_pci_probe, 280 .remove = bcma_host_pci_remove, 281 .driver.pm = BCMA_PM_OPS, 282}; 283 284int __init bcma_host_pci_init(void) 285{ 286 return pci_register_driver(&bcma_pci_bridge_driver); 287} 288 289void __exit bcma_host_pci_exit(void) 290{ 291 pci_unregister_driver(&bcma_pci_bridge_driver); 292} 293