pci-common.c revision 38973ba7903fa0660a31b2bdc50ff711ec8d08c9
1/*
2 * Contains common pci routines for ALL ppc platform
3 * (based on pci_32.c and pci_64.c)
4 *
5 * Port for PPC64 David Engebretsen, IBM Corp.
6 * Contains common pci routines for ppc64 platform, pSeries and iSeries brands.
7 *
8 * Copyright (C) 2003 Anton Blanchard <anton@au.ibm.com>, IBM
9 *   Rework, based on alpha PCI code.
10 *
11 * Common pmac/prep/chrp pci routines. -- Cort
12 *
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License
15 * as published by the Free Software Foundation; either version
16 * 2 of the License, or (at your option) any later version.
17 */
18
19#include <linux/kernel.h>
20#include <linux/pci.h>
21#include <linux/string.h>
22#include <linux/init.h>
23#include <linux/bootmem.h>
24#include <linux/export.h>
25#include <linux/of_address.h>
26#include <linux/of_pci.h>
27#include <linux/mm.h>
28#include <linux/list.h>
29#include <linux/syscalls.h>
30#include <linux/irq.h>
31#include <linux/vmalloc.h>
32#include <linux/slab.h>
33
34#include <asm/processor.h>
35#include <asm/io.h>
36#include <asm/prom.h>
37#include <asm/pci-bridge.h>
38#include <asm/byteorder.h>
39#include <asm/machdep.h>
40#include <asm/ppc-pci.h>
41#include <asm/firmware.h>
42#include <asm/eeh.h>
43
44static DEFINE_SPINLOCK(hose_spinlock);
45LIST_HEAD(hose_list);
46
47/* XXX kill that some day ... */
48static int global_phb_number;		/* Global phb counter */
49
50/* ISA Memory physical address */
51resource_size_t isa_mem_base;
52
53
54static struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
55
56void set_pci_dma_ops(struct dma_map_ops *dma_ops)
57{
58	pci_dma_ops = dma_ops;
59}
60
61struct dma_map_ops *get_pci_dma_ops(void)
62{
63	return pci_dma_ops;
64}
65EXPORT_SYMBOL(get_pci_dma_ops);
66
67struct pci_controller *pcibios_alloc_controller(struct device_node *dev)
68{
69	struct pci_controller *phb;
70
71	phb = zalloc_maybe_bootmem(sizeof(struct pci_controller), GFP_KERNEL);
72	if (phb == NULL)
73		return NULL;
74	spin_lock(&hose_spinlock);
75	phb->global_number = global_phb_number++;
76	list_add_tail(&phb->list_node, &hose_list);
77	spin_unlock(&hose_spinlock);
78	phb->dn = dev;
79	phb->is_dynamic = mem_init_done;
80#ifdef CONFIG_PPC64
81	if (dev) {
82		int nid = of_node_to_nid(dev);
83
84		if (nid < 0 || !node_online(nid))
85			nid = -1;
86
87		PHB_SET_NODE(phb, nid);
88	}
89#endif
90	return phb;
91}
92
93void pcibios_free_controller(struct pci_controller *phb)
94{
95	spin_lock(&hose_spinlock);
96	list_del(&phb->list_node);
97	spin_unlock(&hose_spinlock);
98
99	if (phb->is_dynamic)
100		kfree(phb);
101}
102
103static resource_size_t pcibios_io_size(const struct pci_controller *hose)
104{
105#ifdef CONFIG_PPC64
106	return hose->pci_io_size;
107#else
108	return resource_size(&hose->io_resource);
109#endif
110}
111
112int pcibios_vaddr_is_ioport(void __iomem *address)
113{
114	int ret = 0;
115	struct pci_controller *hose;
116	resource_size_t size;
117
118	spin_lock(&hose_spinlock);
119	list_for_each_entry(hose, &hose_list, list_node) {
120		size = pcibios_io_size(hose);
121		if (address >= hose->io_base_virt &&
122		    address < (hose->io_base_virt + size)) {
123			ret = 1;
124			break;
125		}
126	}
127	spin_unlock(&hose_spinlock);
128	return ret;
129}
130
131unsigned long pci_address_to_pio(phys_addr_t address)
132{
133	struct pci_controller *hose;
134	resource_size_t size;
135	unsigned long ret = ~0;
136
137	spin_lock(&hose_spinlock);
138	list_for_each_entry(hose, &hose_list, list_node) {
139		size = pcibios_io_size(hose);
140		if (address >= hose->io_base_phys &&
141		    address < (hose->io_base_phys + size)) {
142			unsigned long base =
143				(unsigned long)hose->io_base_virt - _IO_BASE;
144			ret = base + (address - hose->io_base_phys);
145			break;
146		}
147	}
148	spin_unlock(&hose_spinlock);
149
150	return ret;
151}
152EXPORT_SYMBOL_GPL(pci_address_to_pio);
153
154/*
155 * Return the domain number for this bus.
156 */
157int pci_domain_nr(struct pci_bus *bus)
158{
159	struct pci_controller *hose = pci_bus_to_host(bus);
160
161	return hose->global_number;
162}
163EXPORT_SYMBOL(pci_domain_nr);
164
165/* This routine is meant to be used early during boot, when the
166 * PCI bus numbers have not yet been assigned, and you need to
167 * issue PCI config cycles to an OF device.
168 * It could also be used to "fix" RTAS config cycles if you want
169 * to set pci_assign_all_buses to 1 and still use RTAS for PCI
170 * config cycles.
171 */
172struct pci_controller* pci_find_hose_for_OF_device(struct device_node* node)
173{
174	while(node) {
175		struct pci_controller *hose, *tmp;
176		list_for_each_entry_safe(hose, tmp, &hose_list, list_node)
177			if (hose->dn == node)
178				return hose;
179		node = node->parent;
180	}
181	return NULL;
182}
183
184static ssize_t pci_show_devspec(struct device *dev,
185		struct device_attribute *attr, char *buf)
186{
187	struct pci_dev *pdev;
188	struct device_node *np;
189
190	pdev = to_pci_dev (dev);
191	np = pci_device_to_OF_node(pdev);
192	if (np == NULL || np->full_name == NULL)
193		return 0;
194	return sprintf(buf, "%s", np->full_name);
195}
196static DEVICE_ATTR(devspec, S_IRUGO, pci_show_devspec, NULL);
197
198/* Add sysfs properties */
199int pcibios_add_platform_entries(struct pci_dev *pdev)
200{
201	return device_create_file(&pdev->dev, &dev_attr_devspec);
202}
203
204char __devinit *pcibios_setup(char *str)
205{
206	return str;
207}
208
209/*
210 * Reads the interrupt pin to determine if interrupt is use by card.
211 * If the interrupt is used, then gets the interrupt line from the
212 * openfirmware and sets it in the pci_dev and pci_config line.
213 */
214static int pci_read_irq_line(struct pci_dev *pci_dev)
215{
216	struct of_irq oirq;
217	unsigned int virq;
218
219	/* The current device-tree that iSeries generates from the HV
220	 * PCI informations doesn't contain proper interrupt routing,
221	 * and all the fallback would do is print out crap, so we
222	 * don't attempt to resolve the interrupts here at all, some
223	 * iSeries specific fixup does it.
224	 *
225	 * In the long run, we will hopefully fix the generated device-tree
226	 * instead.
227	 */
228#ifdef CONFIG_PPC_ISERIES
229	if (firmware_has_feature(FW_FEATURE_ISERIES))
230		return -1;
231#endif
232
233	pr_debug("PCI: Try to map irq for %s...\n", pci_name(pci_dev));
234
235#ifdef DEBUG
236	memset(&oirq, 0xff, sizeof(oirq));
237#endif
238	/* Try to get a mapping from the device-tree */
239	if (of_irq_map_pci(pci_dev, &oirq)) {
240		u8 line, pin;
241
242		/* If that fails, lets fallback to what is in the config
243		 * space and map that through the default controller. We
244		 * also set the type to level low since that's what PCI
245		 * interrupts are. If your platform does differently, then
246		 * either provide a proper interrupt tree or don't use this
247		 * function.
248		 */
249		if (pci_read_config_byte(pci_dev, PCI_INTERRUPT_PIN, &pin))
250			return -1;
251		if (pin == 0)
252			return -1;
253		if (pci_read_config_byte(pci_dev, PCI_INTERRUPT_LINE, &line) ||
254		    line == 0xff || line == 0) {
255			return -1;
256		}
257		pr_debug(" No map ! Using line %d (pin %d) from PCI config\n",
258			 line, pin);
259
260		virq = irq_create_mapping(NULL, line);
261		if (virq != NO_IRQ)
262			irq_set_irq_type(virq, IRQ_TYPE_LEVEL_LOW);
263	} else {
264		pr_debug(" Got one, spec %d cells (0x%08x 0x%08x...) on %s\n",
265			 oirq.size, oirq.specifier[0], oirq.specifier[1],
266			 oirq.controller ? oirq.controller->full_name :
267			 "<default>");
268
269		virq = irq_create_of_mapping(oirq.controller, oirq.specifier,
270					     oirq.size);
271	}
272	if(virq == NO_IRQ) {
273		pr_debug(" Failed to map !\n");
274		return -1;
275	}
276
277	pr_debug(" Mapped to linux irq %d\n", virq);
278
279	pci_dev->irq = virq;
280
281	return 0;
282}
283
284/*
285 * Platform support for /proc/bus/pci/X/Y mmap()s,
286 * modelled on the sparc64 implementation by Dave Miller.
287 *  -- paulus.
288 */
289
290/*
291 * Adjust vm_pgoff of VMA such that it is the physical page offset
292 * corresponding to the 32-bit pci bus offset for DEV requested by the user.
293 *
294 * Basically, the user finds the base address for his device which he wishes
295 * to mmap.  They read the 32-bit value from the config space base register,
296 * add whatever PAGE_SIZE multiple offset they wish, and feed this into the
297 * offset parameter of mmap on /proc/bus/pci/XXX for that device.
298 *
299 * Returns negative error code on failure, zero on success.
300 */
301static struct resource *__pci_mmap_make_offset(struct pci_dev *dev,
302					       resource_size_t *offset,
303					       enum pci_mmap_state mmap_state)
304{
305	struct pci_controller *hose = pci_bus_to_host(dev->bus);
306	unsigned long io_offset = 0;
307	int i, res_bit;
308
309	if (hose == 0)
310		return NULL;		/* should never happen */
311
312	/* If memory, add on the PCI bridge address offset */
313	if (mmap_state == pci_mmap_mem) {
314#if 0 /* See comment in pci_resource_to_user() for why this is disabled */
315		*offset += hose->pci_mem_offset;
316#endif
317		res_bit = IORESOURCE_MEM;
318	} else {
319		io_offset = (unsigned long)hose->io_base_virt - _IO_BASE;
320		*offset += io_offset;
321		res_bit = IORESOURCE_IO;
322	}
323
324	/*
325	 * Check that the offset requested corresponds to one of the
326	 * resources of the device.
327	 */
328	for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
329		struct resource *rp = &dev->resource[i];
330		int flags = rp->flags;
331
332		/* treat ROM as memory (should be already) */
333		if (i == PCI_ROM_RESOURCE)
334			flags |= IORESOURCE_MEM;
335
336		/* Active and same type? */
337		if ((flags & res_bit) == 0)
338			continue;
339
340		/* In the range of this resource? */
341		if (*offset < (rp->start & PAGE_MASK) || *offset > rp->end)
342			continue;
343
344		/* found it! construct the final physical address */
345		if (mmap_state == pci_mmap_io)
346			*offset += hose->io_base_phys - io_offset;
347		return rp;
348	}
349
350	return NULL;
351}
352
353/*
354 * Set vm_page_prot of VMA, as appropriate for this architecture, for a pci
355 * device mapping.
356 */
357static pgprot_t __pci_mmap_set_pgprot(struct pci_dev *dev, struct resource *rp,
358				      pgprot_t protection,
359				      enum pci_mmap_state mmap_state,
360				      int write_combine)
361{
362	unsigned long prot = pgprot_val(protection);
363
364	/* Write combine is always 0 on non-memory space mappings. On
365	 * memory space, if the user didn't pass 1, we check for a
366	 * "prefetchable" resource. This is a bit hackish, but we use
367	 * this to workaround the inability of /sysfs to provide a write
368	 * combine bit
369	 */
370	if (mmap_state != pci_mmap_mem)
371		write_combine = 0;
372	else if (write_combine == 0) {
373		if (rp->flags & IORESOURCE_PREFETCH)
374			write_combine = 1;
375	}
376
377	/* XXX would be nice to have a way to ask for write-through */
378	if (write_combine)
379		return pgprot_noncached_wc(prot);
380	else
381		return pgprot_noncached(prot);
382}
383
384/*
385 * This one is used by /dev/mem and fbdev who have no clue about the
386 * PCI device, it tries to find the PCI device first and calls the
387 * above routine
388 */
389pgprot_t pci_phys_mem_access_prot(struct file *file,
390				  unsigned long pfn,
391				  unsigned long size,
392				  pgprot_t prot)
393{
394	struct pci_dev *pdev = NULL;
395	struct resource *found = NULL;
396	resource_size_t offset = ((resource_size_t)pfn) << PAGE_SHIFT;
397	int i;
398
399	if (page_is_ram(pfn))
400		return prot;
401
402	prot = pgprot_noncached(prot);
403	for_each_pci_dev(pdev) {
404		for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
405			struct resource *rp = &pdev->resource[i];
406			int flags = rp->flags;
407
408			/* Active and same type? */
409			if ((flags & IORESOURCE_MEM) == 0)
410				continue;
411			/* In the range of this resource? */
412			if (offset < (rp->start & PAGE_MASK) ||
413			    offset > rp->end)
414				continue;
415			found = rp;
416			break;
417		}
418		if (found)
419			break;
420	}
421	if (found) {
422		if (found->flags & IORESOURCE_PREFETCH)
423			prot = pgprot_noncached_wc(prot);
424		pci_dev_put(pdev);
425	}
426
427	pr_debug("PCI: Non-PCI map for %llx, prot: %lx\n",
428		 (unsigned long long)offset, pgprot_val(prot));
429
430	return prot;
431}
432
433
434/*
435 * Perform the actual remap of the pages for a PCI device mapping, as
436 * appropriate for this architecture.  The region in the process to map
437 * is described by vm_start and vm_end members of VMA, the base physical
438 * address is found in vm_pgoff.
439 * The pci device structure is provided so that architectures may make mapping
440 * decisions on a per-device or per-bus basis.
441 *
442 * Returns a negative error code on failure, zero on success.
443 */
444int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
445			enum pci_mmap_state mmap_state, int write_combine)
446{
447	resource_size_t offset =
448		((resource_size_t)vma->vm_pgoff) << PAGE_SHIFT;
449	struct resource *rp;
450	int ret;
451
452	rp = __pci_mmap_make_offset(dev, &offset, mmap_state);
453	if (rp == NULL)
454		return -EINVAL;
455
456	vma->vm_pgoff = offset >> PAGE_SHIFT;
457	vma->vm_page_prot = __pci_mmap_set_pgprot(dev, rp,
458						  vma->vm_page_prot,
459						  mmap_state, write_combine);
460
461	ret = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
462			       vma->vm_end - vma->vm_start, vma->vm_page_prot);
463
464	return ret;
465}
466
467/* This provides legacy IO read access on a bus */
468int pci_legacy_read(struct pci_bus *bus, loff_t port, u32 *val, size_t size)
469{
470	unsigned long offset;
471	struct pci_controller *hose = pci_bus_to_host(bus);
472	struct resource *rp = &hose->io_resource;
473	void __iomem *addr;
474
475	/* Check if port can be supported by that bus. We only check
476	 * the ranges of the PHB though, not the bus itself as the rules
477	 * for forwarding legacy cycles down bridges are not our problem
478	 * here. So if the host bridge supports it, we do it.
479	 */
480	offset = (unsigned long)hose->io_base_virt - _IO_BASE;
481	offset += port;
482
483	if (!(rp->flags & IORESOURCE_IO))
484		return -ENXIO;
485	if (offset < rp->start || (offset + size) > rp->end)
486		return -ENXIO;
487	addr = hose->io_base_virt + port;
488
489	switch(size) {
490	case 1:
491		*((u8 *)val) = in_8(addr);
492		return 1;
493	case 2:
494		if (port & 1)
495			return -EINVAL;
496		*((u16 *)val) = in_le16(addr);
497		return 2;
498	case 4:
499		if (port & 3)
500			return -EINVAL;
501		*((u32 *)val) = in_le32(addr);
502		return 4;
503	}
504	return -EINVAL;
505}
506
507/* This provides legacy IO write access on a bus */
508int pci_legacy_write(struct pci_bus *bus, loff_t port, u32 val, size_t size)
509{
510	unsigned long offset;
511	struct pci_controller *hose = pci_bus_to_host(bus);
512	struct resource *rp = &hose->io_resource;
513	void __iomem *addr;
514
515	/* Check if port can be supported by that bus. We only check
516	 * the ranges of the PHB though, not the bus itself as the rules
517	 * for forwarding legacy cycles down bridges are not our problem
518	 * here. So if the host bridge supports it, we do it.
519	 */
520	offset = (unsigned long)hose->io_base_virt - _IO_BASE;
521	offset += port;
522
523	if (!(rp->flags & IORESOURCE_IO))
524		return -ENXIO;
525	if (offset < rp->start || (offset + size) > rp->end)
526		return -ENXIO;
527	addr = hose->io_base_virt + port;
528
529	/* WARNING: The generic code is idiotic. It gets passed a pointer
530	 * to what can be a 1, 2 or 4 byte quantity and always reads that
531	 * as a u32, which means that we have to correct the location of
532	 * the data read within those 32 bits for size 1 and 2
533	 */
534	switch(size) {
535	case 1:
536		out_8(addr, val >> 24);
537		return 1;
538	case 2:
539		if (port & 1)
540			return -EINVAL;
541		out_le16(addr, val >> 16);
542		return 2;
543	case 4:
544		if (port & 3)
545			return -EINVAL;
546		out_le32(addr, val);
547		return 4;
548	}
549	return -EINVAL;
550}
551
552/* This provides legacy IO or memory mmap access on a bus */
553int pci_mmap_legacy_page_range(struct pci_bus *bus,
554			       struct vm_area_struct *vma,
555			       enum pci_mmap_state mmap_state)
556{
557	struct pci_controller *hose = pci_bus_to_host(bus);
558	resource_size_t offset =
559		((resource_size_t)vma->vm_pgoff) << PAGE_SHIFT;
560	resource_size_t size = vma->vm_end - vma->vm_start;
561	struct resource *rp;
562
563	pr_debug("pci_mmap_legacy_page_range(%04x:%02x, %s @%llx..%llx)\n",
564		 pci_domain_nr(bus), bus->number,
565		 mmap_state == pci_mmap_mem ? "MEM" : "IO",
566		 (unsigned long long)offset,
567		 (unsigned long long)(offset + size - 1));
568
569	if (mmap_state == pci_mmap_mem) {
570		/* Hack alert !
571		 *
572		 * Because X is lame and can fail starting if it gets an error trying
573		 * to mmap legacy_mem (instead of just moving on without legacy memory
574		 * access) we fake it here by giving it anonymous memory, effectively
575		 * behaving just like /dev/zero
576		 */
577		if ((offset + size) > hose->isa_mem_size) {
578			printk(KERN_DEBUG
579			       "Process %s (pid:%d) mapped non-existing PCI legacy memory for 0%04x:%02x\n",
580			       current->comm, current->pid, pci_domain_nr(bus), bus->number);
581			if (vma->vm_flags & VM_SHARED)
582				return shmem_zero_setup(vma);
583			return 0;
584		}
585		offset += hose->isa_mem_phys;
586	} else {
587		unsigned long io_offset = (unsigned long)hose->io_base_virt - _IO_BASE;
588		unsigned long roffset = offset + io_offset;
589		rp = &hose->io_resource;
590		if (!(rp->flags & IORESOURCE_IO))
591			return -ENXIO;
592		if (roffset < rp->start || (roffset + size) > rp->end)
593			return -ENXIO;
594		offset += hose->io_base_phys;
595	}
596	pr_debug(" -> mapping phys %llx\n", (unsigned long long)offset);
597
598	vma->vm_pgoff = offset >> PAGE_SHIFT;
599	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
600	return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
601			       vma->vm_end - vma->vm_start,
602			       vma->vm_page_prot);
603}
604
605void pci_resource_to_user(const struct pci_dev *dev, int bar,
606			  const struct resource *rsrc,
607			  resource_size_t *start, resource_size_t *end)
608{
609	struct pci_controller *hose = pci_bus_to_host(dev->bus);
610	resource_size_t offset = 0;
611
612	if (hose == NULL)
613		return;
614
615	if (rsrc->flags & IORESOURCE_IO)
616		offset = (unsigned long)hose->io_base_virt - _IO_BASE;
617
618	/* We pass a fully fixed up address to userland for MMIO instead of
619	 * a BAR value because X is lame and expects to be able to use that
620	 * to pass to /dev/mem !
621	 *
622	 * That means that we'll have potentially 64 bits values where some
623	 * userland apps only expect 32 (like X itself since it thinks only
624	 * Sparc has 64 bits MMIO) but if we don't do that, we break it on
625	 * 32 bits CHRPs :-(
626	 *
627	 * Hopefully, the sysfs insterface is immune to that gunk. Once X
628	 * has been fixed (and the fix spread enough), we can re-enable the
629	 * 2 lines below and pass down a BAR value to userland. In that case
630	 * we'll also have to re-enable the matching code in
631	 * __pci_mmap_make_offset().
632	 *
633	 * BenH.
634	 */
635#if 0
636	else if (rsrc->flags & IORESOURCE_MEM)
637		offset = hose->pci_mem_offset;
638#endif
639
640	*start = rsrc->start - offset;
641	*end = rsrc->end - offset;
642}
643
644/**
645 * pci_process_bridge_OF_ranges - Parse PCI bridge resources from device tree
646 * @hose: newly allocated pci_controller to be setup
647 * @dev: device node of the host bridge
648 * @primary: set if primary bus (32 bits only, soon to be deprecated)
649 *
650 * This function will parse the "ranges" property of a PCI host bridge device
651 * node and setup the resource mapping of a pci controller based on its
652 * content.
653 *
654 * Life would be boring if it wasn't for a few issues that we have to deal
655 * with here:
656 *
657 *   - We can only cope with one IO space range and up to 3 Memory space
658 *     ranges. However, some machines (thanks Apple !) tend to split their
659 *     space into lots of small contiguous ranges. So we have to coalesce.
660 *
661 *   - We can only cope with all memory ranges having the same offset
662 *     between CPU addresses and PCI addresses. Unfortunately, some bridges
663 *     are setup for a large 1:1 mapping along with a small "window" which
664 *     maps PCI address 0 to some arbitrary high address of the CPU space in
665 *     order to give access to the ISA memory hole.
666 *     The way out of here that I've chosen for now is to always set the
667 *     offset based on the first resource found, then override it if we
668 *     have a different offset and the previous was set by an ISA hole.
669 *
670 *   - Some busses have IO space not starting at 0, which causes trouble with
671 *     the way we do our IO resource renumbering. The code somewhat deals with
672 *     it for 64 bits but I would expect problems on 32 bits.
673 *
674 *   - Some 32 bits platforms such as 4xx can have physical space larger than
675 *     32 bits so we need to use 64 bits values for the parsing
676 */
677void __devinit pci_process_bridge_OF_ranges(struct pci_controller *hose,
678					    struct device_node *dev,
679					    int primary)
680{
681	const u32 *ranges;
682	int rlen;
683	int pna = of_n_addr_cells(dev);
684	int np = pna + 5;
685	int memno = 0, isa_hole = -1;
686	u32 pci_space;
687	unsigned long long pci_addr, cpu_addr, pci_next, cpu_next, size;
688	unsigned long long isa_mb = 0;
689	struct resource *res;
690
691	printk(KERN_INFO "PCI host bridge %s %s ranges:\n",
692	       dev->full_name, primary ? "(primary)" : "");
693
694	/* Get ranges property */
695	ranges = of_get_property(dev, "ranges", &rlen);
696	if (ranges == NULL)
697		return;
698
699	/* Parse it */
700	while ((rlen -= np * 4) >= 0) {
701		/* Read next ranges element */
702		pci_space = ranges[0];
703		pci_addr = of_read_number(ranges + 1, 2);
704		cpu_addr = of_translate_address(dev, ranges + 3);
705		size = of_read_number(ranges + pna + 3, 2);
706		ranges += np;
707
708		/* If we failed translation or got a zero-sized region
709		 * (some FW try to feed us with non sensical zero sized regions
710		 * such as power3 which look like some kind of attempt at exposing
711		 * the VGA memory hole)
712		 */
713		if (cpu_addr == OF_BAD_ADDR || size == 0)
714			continue;
715
716		/* Now consume following elements while they are contiguous */
717		for (; rlen >= np * sizeof(u32);
718		     ranges += np, rlen -= np * 4) {
719			if (ranges[0] != pci_space)
720				break;
721			pci_next = of_read_number(ranges + 1, 2);
722			cpu_next = of_translate_address(dev, ranges + 3);
723			if (pci_next != pci_addr + size ||
724			    cpu_next != cpu_addr + size)
725				break;
726			size += of_read_number(ranges + pna + 3, 2);
727		}
728
729		/* Act based on address space type */
730		res = NULL;
731		switch ((pci_space >> 24) & 0x3) {
732		case 1:		/* PCI IO space */
733			printk(KERN_INFO
734			       "  IO 0x%016llx..0x%016llx -> 0x%016llx\n",
735			       cpu_addr, cpu_addr + size - 1, pci_addr);
736
737			/* We support only one IO range */
738			if (hose->pci_io_size) {
739				printk(KERN_INFO
740				       " \\--> Skipped (too many) !\n");
741				continue;
742			}
743#ifdef CONFIG_PPC32
744			/* On 32 bits, limit I/O space to 16MB */
745			if (size > 0x01000000)
746				size = 0x01000000;
747
748			/* 32 bits needs to map IOs here */
749			hose->io_base_virt = ioremap(cpu_addr, size);
750
751			/* Expect trouble if pci_addr is not 0 */
752			if (primary)
753				isa_io_base =
754					(unsigned long)hose->io_base_virt;
755#endif /* CONFIG_PPC32 */
756			/* pci_io_size and io_base_phys always represent IO
757			 * space starting at 0 so we factor in pci_addr
758			 */
759			hose->pci_io_size = pci_addr + size;
760			hose->io_base_phys = cpu_addr - pci_addr;
761
762			/* Build resource */
763			res = &hose->io_resource;
764			res->flags = IORESOURCE_IO;
765			res->start = pci_addr;
766			break;
767		case 2:		/* PCI Memory space */
768		case 3:		/* PCI 64 bits Memory space */
769			printk(KERN_INFO
770			       " MEM 0x%016llx..0x%016llx -> 0x%016llx %s\n",
771			       cpu_addr, cpu_addr + size - 1, pci_addr,
772			       (pci_space & 0x40000000) ? "Prefetch" : "");
773
774			/* We support only 3 memory ranges */
775			if (memno >= 3) {
776				printk(KERN_INFO
777				       " \\--> Skipped (too many) !\n");
778				continue;
779			}
780			/* Handles ISA memory hole space here */
781			if (pci_addr == 0) {
782				isa_mb = cpu_addr;
783				isa_hole = memno;
784				if (primary || isa_mem_base == 0)
785					isa_mem_base = cpu_addr;
786				hose->isa_mem_phys = cpu_addr;
787				hose->isa_mem_size = size;
788			}
789
790			/* We get the PCI/Mem offset from the first range or
791			 * the, current one if the offset came from an ISA
792			 * hole. If they don't match, bugger.
793			 */
794			if (memno == 0 ||
795			    (isa_hole >= 0 && pci_addr != 0 &&
796			     hose->pci_mem_offset == isa_mb))
797				hose->pci_mem_offset = cpu_addr - pci_addr;
798			else if (pci_addr != 0 &&
799				 hose->pci_mem_offset != cpu_addr - pci_addr) {
800				printk(KERN_INFO
801				       " \\--> Skipped (offset mismatch) !\n");
802				continue;
803			}
804
805			/* Build resource */
806			res = &hose->mem_resources[memno++];
807			res->flags = IORESOURCE_MEM;
808			if (pci_space & 0x40000000)
809				res->flags |= IORESOURCE_PREFETCH;
810			res->start = cpu_addr;
811			break;
812		}
813		if (res != NULL) {
814			res->name = dev->full_name;
815			res->end = res->start + size - 1;
816			res->parent = NULL;
817			res->sibling = NULL;
818			res->child = NULL;
819		}
820	}
821
822	/* If there's an ISA hole and the pci_mem_offset is -not- matching
823	 * the ISA hole offset, then we need to remove the ISA hole from
824	 * the resource list for that brige
825	 */
826	if (isa_hole >= 0 && hose->pci_mem_offset != isa_mb) {
827		unsigned int next = isa_hole + 1;
828		printk(KERN_INFO " Removing ISA hole at 0x%016llx\n", isa_mb);
829		if (next < memno)
830			memmove(&hose->mem_resources[isa_hole],
831				&hose->mem_resources[next],
832				sizeof(struct resource) * (memno - next));
833		hose->mem_resources[--memno].flags = 0;
834	}
835}
836
837/* Decide whether to display the domain number in /proc */
838int pci_proc_domain(struct pci_bus *bus)
839{
840	struct pci_controller *hose = pci_bus_to_host(bus);
841
842	if (!pci_has_flag(PCI_ENABLE_PROC_DOMAINS))
843		return 0;
844	if (pci_has_flag(PCI_COMPAT_DOMAIN_0))
845		return hose->global_number != 0;
846	return 1;
847}
848
849/* This header fixup will do the resource fixup for all devices as they are
850 * probed, but not for bridge ranges
851 */
852static void __devinit pcibios_fixup_resources(struct pci_dev *dev)
853{
854	struct pci_controller *hose = pci_bus_to_host(dev->bus);
855	int i;
856
857	if (!hose) {
858		printk(KERN_ERR "No host bridge for PCI dev %s !\n",
859		       pci_name(dev));
860		return;
861	}
862	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
863		struct resource *res = dev->resource + i;
864		if (!res->flags)
865			continue;
866
867		/* If we're going to re-assign everything, we mark all resources
868		 * as unset (and 0-base them). In addition, we mark BARs starting
869		 * at 0 as unset as well, except if PCI_PROBE_ONLY is also set
870		 * since in that case, we don't want to re-assign anything
871		 */
872		if (pci_has_flag(PCI_REASSIGN_ALL_RSRC) ||
873		    (res->start == 0 && !pci_has_flag(PCI_PROBE_ONLY))) {
874			/* Only print message if not re-assigning */
875			if (!pci_has_flag(PCI_REASSIGN_ALL_RSRC))
876				pr_debug("PCI:%s Resource %d %016llx-%016llx [%x] "
877					 "is unassigned\n",
878					 pci_name(dev), i,
879					 (unsigned long long)res->start,
880					 (unsigned long long)res->end,
881					 (unsigned int)res->flags);
882			res->end -= res->start;
883			res->start = 0;
884			res->flags |= IORESOURCE_UNSET;
885			continue;
886		}
887
888		pr_debug("PCI:%s Resource %d %016llx-%016llx [%x]\n",
889			 pci_name(dev), i,
890			 (unsigned long long)res->start,\
891			 (unsigned long long)res->end,
892			 (unsigned int)res->flags);
893	}
894
895	/* Call machine specific resource fixup */
896	if (ppc_md.pcibios_fixup_resources)
897		ppc_md.pcibios_fixup_resources(dev);
898}
899DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pcibios_fixup_resources);
900
901/* This function tries to figure out if a bridge resource has been initialized
902 * by the firmware or not. It doesn't have to be absolutely bullet proof, but
903 * things go more smoothly when it gets it right. It should covers cases such
904 * as Apple "closed" bridge resources and bare-metal pSeries unassigned bridges
905 */
906static int __devinit pcibios_uninitialized_bridge_resource(struct pci_bus *bus,
907							   struct resource *res)
908{
909	struct pci_controller *hose = pci_bus_to_host(bus);
910	struct pci_dev *dev = bus->self;
911	resource_size_t offset;
912	u16 command;
913	int i;
914
915	/* We don't do anything if PCI_PROBE_ONLY is set */
916	if (pci_has_flag(PCI_PROBE_ONLY))
917		return 0;
918
919	/* Job is a bit different between memory and IO */
920	if (res->flags & IORESOURCE_MEM) {
921		/* If the BAR is non-0 (res != pci_mem_offset) then it's probably been
922		 * initialized by somebody
923		 */
924		if (res->start != hose->pci_mem_offset)
925			return 0;
926
927		/* The BAR is 0, let's check if memory decoding is enabled on
928		 * the bridge. If not, we consider it unassigned
929		 */
930		pci_read_config_word(dev, PCI_COMMAND, &command);
931		if ((command & PCI_COMMAND_MEMORY) == 0)
932			return 1;
933
934		/* Memory decoding is enabled and the BAR is 0. If any of the bridge
935		 * resources covers that starting address (0 then it's good enough for
936		 * us for memory
937		 */
938		for (i = 0; i < 3; i++) {
939			if ((hose->mem_resources[i].flags & IORESOURCE_MEM) &&
940			    hose->mem_resources[i].start == hose->pci_mem_offset)
941				return 0;
942		}
943
944		/* Well, it starts at 0 and we know it will collide so we may as
945		 * well consider it as unassigned. That covers the Apple case.
946		 */
947		return 1;
948	} else {
949		/* If the BAR is non-0, then we consider it assigned */
950		offset = (unsigned long)hose->io_base_virt - _IO_BASE;
951		if (((res->start - offset) & 0xfffffffful) != 0)
952			return 0;
953
954		/* Here, we are a bit different than memory as typically IO space
955		 * starting at low addresses -is- valid. What we do instead if that
956		 * we consider as unassigned anything that doesn't have IO enabled
957		 * in the PCI command register, and that's it.
958		 */
959		pci_read_config_word(dev, PCI_COMMAND, &command);
960		if (command & PCI_COMMAND_IO)
961			return 0;
962
963		/* It's starting at 0 and IO is disabled in the bridge, consider
964		 * it unassigned
965		 */
966		return 1;
967	}
968}
969
970/* Fixup resources of a PCI<->PCI bridge */
971static void __devinit pcibios_fixup_bridge(struct pci_bus *bus)
972{
973	struct resource *res;
974	int i;
975
976	struct pci_dev *dev = bus->self;
977
978	pci_bus_for_each_resource(bus, res, i) {
979		if (!res || !res->flags)
980			continue;
981		if (i >= 3 && bus->self->transparent)
982			continue;
983
984		/* If we are going to re-assign everything, mark the resource
985		 * as unset and move it down to 0
986		 */
987		if (pci_has_flag(PCI_REASSIGN_ALL_RSRC)) {
988			res->flags |= IORESOURCE_UNSET;
989			res->end -= res->start;
990			res->start = 0;
991			continue;
992		}
993
994		pr_debug("PCI:%s Bus rsrc %d %016llx-%016llx [%x]\n",
995			 pci_name(dev), i,
996			 (unsigned long long)res->start,\
997			 (unsigned long long)res->end,
998			 (unsigned int)res->flags);
999
1000		/* Try to detect uninitialized P2P bridge resources,
1001		 * and clear them out so they get re-assigned later
1002		 */
1003		if (pcibios_uninitialized_bridge_resource(bus, res)) {
1004			res->flags = 0;
1005			pr_debug("PCI:%s            (unassigned)\n", pci_name(dev));
1006		}
1007	}
1008}
1009
1010void __devinit pcibios_setup_bus_self(struct pci_bus *bus)
1011{
1012	/* Fix up the bus resources for P2P bridges */
1013	if (bus->self != NULL)
1014		pcibios_fixup_bridge(bus);
1015
1016	/* Platform specific bus fixups. This is currently only used
1017	 * by fsl_pci and I'm hoping to get rid of it at some point
1018	 */
1019	if (ppc_md.pcibios_fixup_bus)
1020		ppc_md.pcibios_fixup_bus(bus);
1021
1022	/* Setup bus DMA mappings */
1023	if (ppc_md.pci_dma_bus_setup)
1024		ppc_md.pci_dma_bus_setup(bus);
1025}
1026
1027void __devinit pcibios_setup_bus_devices(struct pci_bus *bus)
1028{
1029	struct pci_dev *dev;
1030
1031	pr_debug("PCI: Fixup bus devices %d (%s)\n",
1032		 bus->number, bus->self ? pci_name(bus->self) : "PHB");
1033
1034	list_for_each_entry(dev, &bus->devices, bus_list) {
1035		/* Cardbus can call us to add new devices to a bus, so ignore
1036		 * those who are already fully discovered
1037		 */
1038		if (dev->is_added)
1039			continue;
1040
1041		/* Fixup NUMA node as it may not be setup yet by the generic
1042		 * code and is needed by the DMA init
1043		 */
1044		set_dev_node(&dev->dev, pcibus_to_node(dev->bus));
1045
1046		/* Hook up default DMA ops */
1047		set_dma_ops(&dev->dev, pci_dma_ops);
1048		set_dma_offset(&dev->dev, PCI_DRAM_OFFSET);
1049
1050		/* Additional platform DMA/iommu setup */
1051		if (ppc_md.pci_dma_dev_setup)
1052			ppc_md.pci_dma_dev_setup(dev);
1053
1054		/* Read default IRQs and fixup if necessary */
1055		pci_read_irq_line(dev);
1056		if (ppc_md.pci_irq_fixup)
1057			ppc_md.pci_irq_fixup(dev);
1058	}
1059}
1060
1061void pcibios_set_master(struct pci_dev *dev)
1062{
1063	/* No special bus mastering setup handling */
1064}
1065
1066void __devinit pcibios_fixup_bus(struct pci_bus *bus)
1067{
1068	/* When called from the generic PCI probe, read PCI<->PCI bridge
1069	 * bases. This is -not- called when generating the PCI tree from
1070	 * the OF device-tree.
1071	 */
1072	if (bus->self != NULL)
1073		pci_read_bridge_bases(bus);
1074
1075	/* Now fixup the bus bus */
1076	pcibios_setup_bus_self(bus);
1077
1078	/* Now fixup devices on that bus */
1079	pcibios_setup_bus_devices(bus);
1080}
1081EXPORT_SYMBOL(pcibios_fixup_bus);
1082
1083void __devinit pci_fixup_cardbus(struct pci_bus *bus)
1084{
1085	/* Now fixup devices on that bus */
1086	pcibios_setup_bus_devices(bus);
1087}
1088
1089
1090static int skip_isa_ioresource_align(struct pci_dev *dev)
1091{
1092	if (pci_has_flag(PCI_CAN_SKIP_ISA_ALIGN) &&
1093	    !(dev->bus->bridge_ctl & PCI_BRIDGE_CTL_ISA))
1094		return 1;
1095	return 0;
1096}
1097
1098/*
1099 * We need to avoid collisions with `mirrored' VGA ports
1100 * and other strange ISA hardware, so we always want the
1101 * addresses to be allocated in the 0x000-0x0ff region
1102 * modulo 0x400.
1103 *
1104 * Why? Because some silly external IO cards only decode
1105 * the low 10 bits of the IO address. The 0x00-0xff region
1106 * is reserved for motherboard devices that decode all 16
1107 * bits, so it's ok to allocate at, say, 0x2800-0x28ff,
1108 * but we want to try to avoid allocating at 0x2900-0x2bff
1109 * which might have be mirrored at 0x0100-0x03ff..
1110 */
1111resource_size_t pcibios_align_resource(void *data, const struct resource *res,
1112				resource_size_t size, resource_size_t align)
1113{
1114	struct pci_dev *dev = data;
1115	resource_size_t start = res->start;
1116
1117	if (res->flags & IORESOURCE_IO) {
1118		if (skip_isa_ioresource_align(dev))
1119			return start;
1120		if (start & 0x300)
1121			start = (start + 0x3ff) & ~0x3ff;
1122	}
1123
1124	return start;
1125}
1126EXPORT_SYMBOL(pcibios_align_resource);
1127
1128/*
1129 * Reparent resource children of pr that conflict with res
1130 * under res, and make res replace those children.
1131 */
1132static int reparent_resources(struct resource *parent,
1133				     struct resource *res)
1134{
1135	struct resource *p, **pp;
1136	struct resource **firstpp = NULL;
1137
1138	for (pp = &parent->child; (p = *pp) != NULL; pp = &p->sibling) {
1139		if (p->end < res->start)
1140			continue;
1141		if (res->end < p->start)
1142			break;
1143		if (p->start < res->start || p->end > res->end)
1144			return -1;	/* not completely contained */
1145		if (firstpp == NULL)
1146			firstpp = pp;
1147	}
1148	if (firstpp == NULL)
1149		return -1;	/* didn't find any conflicting entries? */
1150	res->parent = parent;
1151	res->child = *firstpp;
1152	res->sibling = *pp;
1153	*firstpp = res;
1154	*pp = NULL;
1155	for (p = res->child; p != NULL; p = p->sibling) {
1156		p->parent = res;
1157		pr_debug("PCI: Reparented %s [%llx..%llx] under %s\n",
1158			 p->name,
1159			 (unsigned long long)p->start,
1160			 (unsigned long long)p->end, res->name);
1161	}
1162	return 0;
1163}
1164
1165/*
1166 *  Handle resources of PCI devices.  If the world were perfect, we could
1167 *  just allocate all the resource regions and do nothing more.  It isn't.
1168 *  On the other hand, we cannot just re-allocate all devices, as it would
1169 *  require us to know lots of host bridge internals.  So we attempt to
1170 *  keep as much of the original configuration as possible, but tweak it
1171 *  when it's found to be wrong.
1172 *
1173 *  Known BIOS problems we have to work around:
1174 *	- I/O or memory regions not configured
1175 *	- regions configured, but not enabled in the command register
1176 *	- bogus I/O addresses above 64K used
1177 *	- expansion ROMs left enabled (this may sound harmless, but given
1178 *	  the fact the PCI specs explicitly allow address decoders to be
1179 *	  shared between expansion ROMs and other resource regions, it's
1180 *	  at least dangerous)
1181 *
1182 *  Our solution:
1183 *	(1) Allocate resources for all buses behind PCI-to-PCI bridges.
1184 *	    This gives us fixed barriers on where we can allocate.
1185 *	(2) Allocate resources for all enabled devices.  If there is
1186 *	    a collision, just mark the resource as unallocated. Also
1187 *	    disable expansion ROMs during this step.
1188 *	(3) Try to allocate resources for disabled devices.  If the
1189 *	    resources were assigned correctly, everything goes well,
1190 *	    if they weren't, they won't disturb allocation of other
1191 *	    resources.
1192 *	(4) Assign new addresses to resources which were either
1193 *	    not configured at all or misconfigured.  If explicitly
1194 *	    requested by the user, configure expansion ROM address
1195 *	    as well.
1196 */
1197
1198void pcibios_allocate_bus_resources(struct pci_bus *bus)
1199{
1200	struct pci_bus *b;
1201	int i;
1202	struct resource *res, *pr;
1203
1204	pr_debug("PCI: Allocating bus resources for %04x:%02x...\n",
1205		 pci_domain_nr(bus), bus->number);
1206
1207	pci_bus_for_each_resource(bus, res, i) {
1208		if (!res || !res->flags || res->start > res->end || res->parent)
1209			continue;
1210
1211		/* If the resource was left unset at this point, we clear it */
1212		if (res->flags & IORESOURCE_UNSET)
1213			goto clear_resource;
1214
1215		if (bus->parent == NULL)
1216			pr = (res->flags & IORESOURCE_IO) ?
1217				&ioport_resource : &iomem_resource;
1218		else {
1219			pr = pci_find_parent_resource(bus->self, res);
1220			if (pr == res) {
1221				/* this happens when the generic PCI
1222				 * code (wrongly) decides that this
1223				 * bridge is transparent  -- paulus
1224				 */
1225				continue;
1226			}
1227		}
1228
1229		pr_debug("PCI: %s (bus %d) bridge rsrc %d: %016llx-%016llx "
1230			 "[0x%x], parent %p (%s)\n",
1231			 bus->self ? pci_name(bus->self) : "PHB",
1232			 bus->number, i,
1233			 (unsigned long long)res->start,
1234			 (unsigned long long)res->end,
1235			 (unsigned int)res->flags,
1236			 pr, (pr && pr->name) ? pr->name : "nil");
1237
1238		if (pr && !(pr->flags & IORESOURCE_UNSET)) {
1239			if (request_resource(pr, res) == 0)
1240				continue;
1241			/*
1242			 * Must be a conflict with an existing entry.
1243			 * Move that entry (or entries) under the
1244			 * bridge resource and try again.
1245			 */
1246			if (reparent_resources(pr, res) == 0)
1247				continue;
1248		}
1249		pr_warning("PCI: Cannot allocate resource region "
1250			   "%d of PCI bridge %d, will remap\n", i, bus->number);
1251	clear_resource:
1252		res->start = res->end = 0;
1253		res->flags = 0;
1254	}
1255
1256	list_for_each_entry(b, &bus->children, node)
1257		pcibios_allocate_bus_resources(b);
1258}
1259
1260static inline void __devinit alloc_resource(struct pci_dev *dev, int idx)
1261{
1262	struct resource *pr, *r = &dev->resource[idx];
1263
1264	pr_debug("PCI: Allocating %s: Resource %d: %016llx..%016llx [%x]\n",
1265		 pci_name(dev), idx,
1266		 (unsigned long long)r->start,
1267		 (unsigned long long)r->end,
1268		 (unsigned int)r->flags);
1269
1270	pr = pci_find_parent_resource(dev, r);
1271	if (!pr || (pr->flags & IORESOURCE_UNSET) ||
1272	    request_resource(pr, r) < 0) {
1273		printk(KERN_WARNING "PCI: Cannot allocate resource region %d"
1274		       " of device %s, will remap\n", idx, pci_name(dev));
1275		if (pr)
1276			pr_debug("PCI:  parent is %p: %016llx-%016llx [%x]\n",
1277				 pr,
1278				 (unsigned long long)pr->start,
1279				 (unsigned long long)pr->end,
1280				 (unsigned int)pr->flags);
1281		/* We'll assign a new address later */
1282		r->flags |= IORESOURCE_UNSET;
1283		r->end -= r->start;
1284		r->start = 0;
1285	}
1286}
1287
1288static void __init pcibios_allocate_resources(int pass)
1289{
1290	struct pci_dev *dev = NULL;
1291	int idx, disabled;
1292	u16 command;
1293	struct resource *r;
1294
1295	for_each_pci_dev(dev) {
1296		pci_read_config_word(dev, PCI_COMMAND, &command);
1297		for (idx = 0; idx <= PCI_ROM_RESOURCE; idx++) {
1298			r = &dev->resource[idx];
1299			if (r->parent)		/* Already allocated */
1300				continue;
1301			if (!r->flags || (r->flags & IORESOURCE_UNSET))
1302				continue;	/* Not assigned at all */
1303			/* We only allocate ROMs on pass 1 just in case they
1304			 * have been screwed up by firmware
1305			 */
1306			if (idx == PCI_ROM_RESOURCE )
1307				disabled = 1;
1308			if (r->flags & IORESOURCE_IO)
1309				disabled = !(command & PCI_COMMAND_IO);
1310			else
1311				disabled = !(command & PCI_COMMAND_MEMORY);
1312			if (pass == disabled)
1313				alloc_resource(dev, idx);
1314		}
1315		if (pass)
1316			continue;
1317		r = &dev->resource[PCI_ROM_RESOURCE];
1318		if (r->flags) {
1319			/* Turn the ROM off, leave the resource region,
1320			 * but keep it unregistered.
1321			 */
1322			u32 reg;
1323			pci_read_config_dword(dev, dev->rom_base_reg, &reg);
1324			if (reg & PCI_ROM_ADDRESS_ENABLE) {
1325				pr_debug("PCI: Switching off ROM of %s\n",
1326					 pci_name(dev));
1327				r->flags &= ~IORESOURCE_ROM_ENABLE;
1328				pci_write_config_dword(dev, dev->rom_base_reg,
1329						       reg & ~PCI_ROM_ADDRESS_ENABLE);
1330			}
1331		}
1332	}
1333}
1334
1335static void __init pcibios_reserve_legacy_regions(struct pci_bus *bus)
1336{
1337	struct pci_controller *hose = pci_bus_to_host(bus);
1338	resource_size_t	offset;
1339	struct resource *res, *pres;
1340	int i;
1341
1342	pr_debug("Reserving legacy ranges for domain %04x\n", pci_domain_nr(bus));
1343
1344	/* Check for IO */
1345	if (!(hose->io_resource.flags & IORESOURCE_IO))
1346		goto no_io;
1347	offset = (unsigned long)hose->io_base_virt - _IO_BASE;
1348	res = kzalloc(sizeof(struct resource), GFP_KERNEL);
1349	BUG_ON(res == NULL);
1350	res->name = "Legacy IO";
1351	res->flags = IORESOURCE_IO;
1352	res->start = offset;
1353	res->end = (offset + 0xfff) & 0xfffffffful;
1354	pr_debug("Candidate legacy IO: %pR\n", res);
1355	if (request_resource(&hose->io_resource, res)) {
1356		printk(KERN_DEBUG
1357		       "PCI %04x:%02x Cannot reserve Legacy IO %pR\n",
1358		       pci_domain_nr(bus), bus->number, res);
1359		kfree(res);
1360	}
1361
1362 no_io:
1363	/* Check for memory */
1364	offset = hose->pci_mem_offset;
1365	pr_debug("hose mem offset: %016llx\n", (unsigned long long)offset);
1366	for (i = 0; i < 3; i++) {
1367		pres = &hose->mem_resources[i];
1368		if (!(pres->flags & IORESOURCE_MEM))
1369			continue;
1370		pr_debug("hose mem res: %pR\n", pres);
1371		if ((pres->start - offset) <= 0xa0000 &&
1372		    (pres->end - offset) >= 0xbffff)
1373			break;
1374	}
1375	if (i >= 3)
1376		return;
1377	res = kzalloc(sizeof(struct resource), GFP_KERNEL);
1378	BUG_ON(res == NULL);
1379	res->name = "Legacy VGA memory";
1380	res->flags = IORESOURCE_MEM;
1381	res->start = 0xa0000 + offset;
1382	res->end = 0xbffff + offset;
1383	pr_debug("Candidate VGA memory: %pR\n", res);
1384	if (request_resource(pres, res)) {
1385		printk(KERN_DEBUG
1386		       "PCI %04x:%02x Cannot reserve VGA memory %pR\n",
1387		       pci_domain_nr(bus), bus->number, res);
1388		kfree(res);
1389	}
1390}
1391
1392void __init pcibios_resource_survey(void)
1393{
1394	struct pci_bus *b;
1395
1396	/* Allocate and assign resources */
1397	list_for_each_entry(b, &pci_root_buses, node)
1398		pcibios_allocate_bus_resources(b);
1399	pcibios_allocate_resources(0);
1400	pcibios_allocate_resources(1);
1401
1402	/* Before we start assigning unassigned resource, we try to reserve
1403	 * the low IO area and the VGA memory area if they intersect the
1404	 * bus available resources to avoid allocating things on top of them
1405	 */
1406	if (!pci_has_flag(PCI_PROBE_ONLY)) {
1407		list_for_each_entry(b, &pci_root_buses, node)
1408			pcibios_reserve_legacy_regions(b);
1409	}
1410
1411	/* Now, if the platform didn't decide to blindly trust the firmware,
1412	 * we proceed to assigning things that were left unassigned
1413	 */
1414	if (!pci_has_flag(PCI_PROBE_ONLY)) {
1415		pr_debug("PCI: Assigning unassigned resources...\n");
1416		pci_assign_unassigned_resources();
1417	}
1418
1419	/* Call machine dependent fixup */
1420	if (ppc_md.pcibios_fixup)
1421		ppc_md.pcibios_fixup();
1422}
1423
1424#ifdef CONFIG_HOTPLUG
1425
1426/* This is used by the PCI hotplug driver to allocate resource
1427 * of newly plugged busses. We can try to consolidate with the
1428 * rest of the code later, for now, keep it as-is as our main
1429 * resource allocation function doesn't deal with sub-trees yet.
1430 */
1431void pcibios_claim_one_bus(struct pci_bus *bus)
1432{
1433	struct pci_dev *dev;
1434	struct pci_bus *child_bus;
1435
1436	list_for_each_entry(dev, &bus->devices, bus_list) {
1437		int i;
1438
1439		for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1440			struct resource *r = &dev->resource[i];
1441
1442			if (r->parent || !r->start || !r->flags)
1443				continue;
1444
1445			pr_debug("PCI: Claiming %s: "
1446				 "Resource %d: %016llx..%016llx [%x]\n",
1447				 pci_name(dev), i,
1448				 (unsigned long long)r->start,
1449				 (unsigned long long)r->end,
1450				 (unsigned int)r->flags);
1451
1452			pci_claim_resource(dev, i);
1453		}
1454	}
1455
1456	list_for_each_entry(child_bus, &bus->children, node)
1457		pcibios_claim_one_bus(child_bus);
1458}
1459
1460
1461/* pcibios_finish_adding_to_bus
1462 *
1463 * This is to be called by the hotplug code after devices have been
1464 * added to a bus, this include calling it for a PHB that is just
1465 * being added
1466 */
1467void pcibios_finish_adding_to_bus(struct pci_bus *bus)
1468{
1469	pr_debug("PCI: Finishing adding to hotplug bus %04x:%02x\n",
1470		 pci_domain_nr(bus), bus->number);
1471
1472	/* Allocate bus and devices resources */
1473	pcibios_allocate_bus_resources(bus);
1474	pcibios_claim_one_bus(bus);
1475
1476	/* Add new devices to global lists.  Register in proc, sysfs. */
1477	pci_bus_add_devices(bus);
1478
1479	/* Fixup EEH */
1480	eeh_add_device_tree_late(bus);
1481}
1482EXPORT_SYMBOL_GPL(pcibios_finish_adding_to_bus);
1483
1484#endif /* CONFIG_HOTPLUG */
1485
1486int pcibios_enable_device(struct pci_dev *dev, int mask)
1487{
1488	if (ppc_md.pcibios_enable_device_hook)
1489		if (ppc_md.pcibios_enable_device_hook(dev))
1490			return -EINVAL;
1491
1492	return pci_enable_resources(dev, mask);
1493}
1494
1495resource_size_t pcibios_io_space_offset(struct pci_controller *hose)
1496{
1497	return (unsigned long) hose->io_base_virt - _IO_BASE;
1498}
1499
1500static void __devinit pcibios_setup_phb_resources(struct pci_controller *hose, struct list_head *resources)
1501{
1502	struct resource *res;
1503	int i;
1504
1505	/* Hookup PHB IO resource */
1506	res = &hose->io_resource;
1507
1508	if (!res->flags) {
1509		printk(KERN_WARNING "PCI: I/O resource not set for host"
1510		       " bridge %s (domain %d)\n",
1511		       hose->dn->full_name, hose->global_number);
1512#ifdef CONFIG_PPC32
1513		/* Workaround for lack of IO resource only on 32-bit */
1514		res->start = (unsigned long)hose->io_base_virt - isa_io_base;
1515		res->end = res->start + IO_SPACE_LIMIT;
1516		res->flags = IORESOURCE_IO;
1517#endif /* CONFIG_PPC32 */
1518	}
1519
1520	pr_debug("PCI: PHB IO resource    = %016llx-%016llx [%lx]\n",
1521		 (unsigned long long)res->start,
1522		 (unsigned long long)res->end,
1523		 (unsigned long)res->flags);
1524	pci_add_resource_offset(resources, res, pcibios_io_space_offset(hose));
1525
1526	/* Hookup PHB Memory resources */
1527	for (i = 0; i < 3; ++i) {
1528		res = &hose->mem_resources[i];
1529		if (!res->flags) {
1530			if (i > 0)
1531				continue;
1532			printk(KERN_ERR "PCI: Memory resource 0 not set for "
1533			       "host bridge %s (domain %d)\n",
1534			       hose->dn->full_name, hose->global_number);
1535#ifdef CONFIG_PPC32
1536			/* Workaround for lack of MEM resource only on 32-bit */
1537			res->start = hose->pci_mem_offset;
1538			res->end = (resource_size_t)-1LL;
1539			res->flags = IORESOURCE_MEM;
1540#endif /* CONFIG_PPC32 */
1541		}
1542
1543		pr_debug("PCI: PHB MEM resource %d = %016llx-%016llx [%lx]\n", i,
1544			 (unsigned long long)res->start,
1545			 (unsigned long long)res->end,
1546			 (unsigned long)res->flags);
1547		pci_add_resource_offset(resources, res, hose->pci_mem_offset);
1548	}
1549
1550	pr_debug("PCI: PHB MEM offset     = %016llx\n",
1551		 (unsigned long long)hose->pci_mem_offset);
1552	pr_debug("PCI: PHB IO  offset     = %08lx\n",
1553		 (unsigned long)hose->io_base_virt - _IO_BASE);
1554
1555}
1556
1557/*
1558 * Null PCI config access functions, for the case when we can't
1559 * find a hose.
1560 */
1561#define NULL_PCI_OP(rw, size, type)					\
1562static int								\
1563null_##rw##_config_##size(struct pci_dev *dev, int offset, type val)	\
1564{									\
1565	return PCIBIOS_DEVICE_NOT_FOUND;    				\
1566}
1567
1568static int
1569null_read_config(struct pci_bus *bus, unsigned int devfn, int offset,
1570		 int len, u32 *val)
1571{
1572	return PCIBIOS_DEVICE_NOT_FOUND;
1573}
1574
1575static int
1576null_write_config(struct pci_bus *bus, unsigned int devfn, int offset,
1577		  int len, u32 val)
1578{
1579	return PCIBIOS_DEVICE_NOT_FOUND;
1580}
1581
1582static struct pci_ops null_pci_ops =
1583{
1584	.read = null_read_config,
1585	.write = null_write_config,
1586};
1587
1588/*
1589 * These functions are used early on before PCI scanning is done
1590 * and all of the pci_dev and pci_bus structures have been created.
1591 */
1592static struct pci_bus *
1593fake_pci_bus(struct pci_controller *hose, int busnr)
1594{
1595	static struct pci_bus bus;
1596
1597	if (hose == 0) {
1598		printk(KERN_ERR "Can't find hose for PCI bus %d!\n", busnr);
1599	}
1600	bus.number = busnr;
1601	bus.sysdata = hose;
1602	bus.ops = hose? hose->ops: &null_pci_ops;
1603	return &bus;
1604}
1605
1606#define EARLY_PCI_OP(rw, size, type)					\
1607int early_##rw##_config_##size(struct pci_controller *hose, int bus,	\
1608			       int devfn, int offset, type value)	\
1609{									\
1610	return pci_bus_##rw##_config_##size(fake_pci_bus(hose, bus),	\
1611					    devfn, offset, value);	\
1612}
1613
1614EARLY_PCI_OP(read, byte, u8 *)
1615EARLY_PCI_OP(read, word, u16 *)
1616EARLY_PCI_OP(read, dword, u32 *)
1617EARLY_PCI_OP(write, byte, u8)
1618EARLY_PCI_OP(write, word, u16)
1619EARLY_PCI_OP(write, dword, u32)
1620
1621extern int pci_bus_find_capability (struct pci_bus *bus, unsigned int devfn, int cap);
1622int early_find_capability(struct pci_controller *hose, int bus, int devfn,
1623			  int cap)
1624{
1625	return pci_bus_find_capability(fake_pci_bus(hose, bus), devfn, cap);
1626}
1627
1628struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus)
1629{
1630	struct pci_controller *hose = bus->sysdata;
1631
1632	return of_node_get(hose->dn);
1633}
1634
1635/**
1636 * pci_scan_phb - Given a pci_controller, setup and scan the PCI bus
1637 * @hose: Pointer to the PCI host controller instance structure
1638 */
1639void __devinit pcibios_scan_phb(struct pci_controller *hose)
1640{
1641	LIST_HEAD(resources);
1642	struct pci_bus *bus;
1643	struct device_node *node = hose->dn;
1644	int mode;
1645
1646	pr_debug("PCI: Scanning PHB %s\n",
1647		 node ? node->full_name : "<NO NAME>");
1648
1649	/* Get some IO space for the new PHB */
1650	pcibios_setup_phb_io_space(hose);
1651
1652	/* Wire up PHB bus resources */
1653	pcibios_setup_phb_resources(hose, &resources);
1654
1655	/* Create an empty bus for the toplevel */
1656	bus = pci_create_root_bus(hose->parent, hose->first_busno,
1657				  hose->ops, hose, &resources);
1658	if (bus == NULL) {
1659		pr_err("Failed to create bus for PCI domain %04x\n",
1660			hose->global_number);
1661		pci_free_resource_list(&resources);
1662		return;
1663	}
1664	bus->secondary = hose->first_busno;
1665	hose->bus = bus;
1666
1667	/* Get probe mode and perform scan */
1668	mode = PCI_PROBE_NORMAL;
1669	if (node && ppc_md.pci_probe_mode)
1670		mode = ppc_md.pci_probe_mode(bus);
1671	pr_debug("    probe mode: %d\n", mode);
1672	if (mode == PCI_PROBE_DEVTREE) {
1673		bus->subordinate = hose->last_busno;
1674		of_scan_bus(node, bus);
1675	}
1676
1677	if (mode == PCI_PROBE_NORMAL)
1678		hose->last_busno = bus->subordinate = pci_scan_child_bus(bus);
1679
1680	/* Platform gets a chance to do some global fixups before
1681	 * we proceed to resource allocation
1682	 */
1683	if (ppc_md.pcibios_fixup_phb)
1684		ppc_md.pcibios_fixup_phb(hose);
1685
1686	/* Configure PCI Express settings */
1687	if (bus && !pci_has_flag(PCI_PROBE_ONLY)) {
1688		struct pci_bus *child;
1689		list_for_each_entry(child, &bus->children, node) {
1690			struct pci_dev *self = child->self;
1691			if (!self)
1692				continue;
1693			pcie_bus_configure_settings(child, self->pcie_mpss);
1694		}
1695	}
1696}
1697
1698static void fixup_hide_host_resource_fsl(struct pci_dev *dev)
1699{
1700	int i, class = dev->class >> 8;
1701	/* When configured as agent, programing interface = 1 */
1702	int prog_if = dev->class & 0xf;
1703
1704	if ((class == PCI_CLASS_PROCESSOR_POWERPC ||
1705	     class == PCI_CLASS_BRIDGE_OTHER) &&
1706		(dev->hdr_type == PCI_HEADER_TYPE_NORMAL) &&
1707		(prog_if == 0) &&
1708		(dev->bus->parent == NULL)) {
1709		for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
1710			dev->resource[i].start = 0;
1711			dev->resource[i].end = 0;
1712			dev->resource[i].flags = 0;
1713		}
1714	}
1715}
1716DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MOTOROLA, PCI_ANY_ID, fixup_hide_host_resource_fsl);
1717DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_FREESCALE, PCI_ANY_ID, fixup_hide_host_resource_fsl);
1718