Archived
14
0
Fork 0
This repository has been archived on 2022-02-17. You can view files and clone it, but cannot push or open issues or pull requests.
linux-2.6/include/asm-powerpc/pci.h
Benjamin Herrenschmidt 3d5134ee83 [POWERPC] Rewrite IO allocation & mapping on powerpc64
This rewrites pretty much from scratch the handling of MMIO and PIO
space allocations on powerpc64.  The main goals are:

 - Get rid of imalloc and use more common code where possible
 - Simplify the current mess so that PIO space is allocated and
   mapped in a single place for PCI bridges
 - Handle allocation constraints of PIO for all bridges including
   hot plugged ones within the 2GB space reserved for IO ports,
   so that devices on hotplugged busses will now work with drivers
   that assume IO ports fit in an int.
 - Cleanup and separate tracking of the ISA space in the reserved
   low 64K of IO space. No ISA -> Nothing mapped there.

I booted a cell blade with IDE on PIO and MMIO and a dual G5 so
far, that's it :-)

With this patch, all allocations are done using the code in
mm/vmalloc.c, though we use the low level __get_vm_area with
explicit start/stop constraints in order to manage separate
areas for vmalloc/vmap, ioremap, and PCI IOs.

This greatly simplifies a lot of things, as you can see in the
diffstat of that patch :-)

A new pair of functions pcibios_map/unmap_io_space() now replace
all of the previous code that used to manipulate PCI IOs space.
The allocation is done at mapping time, which is now called from
scan_phb's, just before the devices are probed (instead of after,
which is by itself a bug fix). The only other caller is the PCI
hotplug code for hot adding PCI-PCI bridges (slots).

imalloc is gone, as is the "sub-allocation" thing, but I do beleive
that hotplug should still work in the sense that the space allocation
is always done by the PHB, but if you unmap a child bus of this PHB
(which seems to be possible), then the code should properly tear
down all the HPTE mappings for that area of the PHB allocated IO space.

I now always reserve the first 64K of IO space for the bridge with
the ISA bus on it. I have moved the code for tracking ISA in a separate
file which should also make it smarter if we ever are capable of
hot unplugging or re-plugging an ISA bridge.

This should have a side effect on platforms like powermac where VGA IOs
will no longer work. This is done on purpose though as they would have
worked semi-randomly before. The idea at this point is to isolate drivers
that might need to access those and fix them by providing a proper
function to obtain an offset to the legacy IOs of a given bus.

Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: Paul Mackerras <paulus@samba.org>
2007-06-14 22:29:56 +10:00

257 lines
7 KiB
C

#ifndef __ASM_POWERPC_PCI_H
#define __ASM_POWERPC_PCI_H
#ifdef __KERNEL__
/*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/dma-mapping.h>
#include <asm/machdep.h>
#include <asm/scatterlist.h>
#include <asm/io.h>
#include <asm/prom.h>
#include <asm/pci-bridge.h>
#include <asm-generic/pci-dma-compat.h>
#define PCIBIOS_MIN_IO 0x1000
#define PCIBIOS_MIN_MEM 0x10000000
struct pci_dev;
/* Values for the `which' argument to sys_pciconfig_iobase syscall. */
#define IOBASE_BRIDGE_NUMBER 0
#define IOBASE_MEMORY 1
#define IOBASE_IO 2
#define IOBASE_ISA_IO 3
#define IOBASE_ISA_MEM 4
/*
* Set this to 1 if you want the kernel to re-assign all PCI
* bus numbers
*/
extern int pci_assign_all_buses;
#define pcibios_assign_all_busses() (pci_assign_all_buses)
#define pcibios_scan_all_fns(a, b) 0
static inline void pcibios_set_master(struct pci_dev *dev)
{
/* No special bus mastering setup handling */
}
static inline void pcibios_penalize_isa_irq(int irq, int active)
{
/* We don't do dynamic PCI IRQ allocation */
}
#define HAVE_ARCH_PCI_GET_LEGACY_IDE_IRQ
static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
{
if (ppc_md.pci_get_legacy_ide_irq)
return ppc_md.pci_get_legacy_ide_irq(dev, channel);
return channel ? 15 : 14;
}
#ifdef CONFIG_PPC64
/*
* We want to avoid touching the cacheline size or MWI bit.
* pSeries firmware sets the cacheline size (which is not the cpu cacheline
* size in all cases) and hardware treats MWI the same as memory write.
*/
#define PCI_DISABLE_MWI
#ifdef CONFIG_PCI
extern void set_pci_dma_ops(struct dma_mapping_ops *dma_ops);
extern struct dma_mapping_ops *get_pci_dma_ops(void);
/* For DAC DMA, we currently don't support it by default, but
* we let 64-bit platforms override this.
*/
static inline int pci_dac_dma_supported(struct pci_dev *hwdev,u64 mask)
{
struct dma_mapping_ops *d = get_pci_dma_ops();
if (d && d->dac_dma_supported)
return d->dac_dma_supported(&hwdev->dev, mask);
return 0;
}
static inline void pci_dma_burst_advice(struct pci_dev *pdev,
enum pci_dma_burst_strategy *strat,
unsigned long *strategy_parameter)
{
unsigned long cacheline_size;
u8 byte;
pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &byte);
if (byte == 0)
cacheline_size = 1024;
else
cacheline_size = (int) byte * 4;
*strat = PCI_DMA_BURST_MULTIPLE;
*strategy_parameter = cacheline_size;
}
#else /* CONFIG_PCI */
#define set_pci_dma_ops(d)
#define get_pci_dma_ops() NULL
#endif
extern int pci_domain_nr(struct pci_bus *bus);
/* Decide whether to display the domain number in /proc */
extern int pci_proc_domain(struct pci_bus *bus);
#else /* 32-bit */
#ifdef CONFIG_PCI
static inline void pci_dma_burst_advice(struct pci_dev *pdev,
enum pci_dma_burst_strategy *strat,
unsigned long *strategy_parameter)
{
*strat = PCI_DMA_BURST_INFINITY;
*strategy_parameter = ~0UL;
}
#endif
/*
* At present there are very few 32-bit PPC machines that can have
* memory above the 4GB point, and we don't support that.
*/
#define pci_dac_dma_supported(pci_dev, mask) (0)
/* Return the index of the PCI controller for device PDEV. */
#define pci_domain_nr(bus) ((struct pci_controller *)(bus)->sysdata)->index
/* Set the name of the bus as it appears in /proc/bus/pci */
static inline int pci_proc_domain(struct pci_bus *bus)
{
return 0;
}
#endif /* CONFIG_PPC64 */
struct vm_area_struct;
/* Map a range of PCI memory or I/O space for a device into user space */
int pci_mmap_page_range(struct pci_dev *pdev, struct vm_area_struct *vma,
enum pci_mmap_state mmap_state, int write_combine);
/* Tell drivers/pci/proc.c that we have pci_mmap_page_range() */
#define HAVE_PCI_MMAP 1
#if defined(CONFIG_PPC64) || defined(CONFIG_NOT_COHERENT_CACHE)
/*
* For 64-bit kernels, pci_unmap_{single,page} is not a nop.
* For 32-bit non-coherent kernels, pci_dma_sync_single_for_cpu() and
* so on are not nops.
* and thus...
*/
#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \
dma_addr_t ADDR_NAME;
#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \
__u32 LEN_NAME;
#define pci_unmap_addr(PTR, ADDR_NAME) \
((PTR)->ADDR_NAME)
#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \
(((PTR)->ADDR_NAME) = (VAL))
#define pci_unmap_len(PTR, LEN_NAME) \
((PTR)->LEN_NAME)
#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \
(((PTR)->LEN_NAME) = (VAL))
#else /* 32-bit && coherent */
/* pci_unmap_{page,single} is a nop so... */
#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)
#define DECLARE_PCI_UNMAP_LEN(LEN_NAME)
#define pci_unmap_addr(PTR, ADDR_NAME) (0)
#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0)
#define pci_unmap_len(PTR, LEN_NAME) (0)
#define pci_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)
#endif /* CONFIG_PPC64 || CONFIG_NOT_COHERENT_CACHE */
#ifdef CONFIG_PPC64
/* The PCI address space does not equal the physical memory address
* space (we have an IOMMU). The IDE and SCSI device layers use
* this boolean for bounce buffer decisions.
*/
#define PCI_DMA_BUS_IS_PHYS (0)
#else /* 32-bit */
/* The PCI address space does equal the physical memory
* address space (no IOMMU). The IDE and SCSI device layers use
* this boolean for bounce buffer decisions.
*/
#define PCI_DMA_BUS_IS_PHYS (1)
#endif /* CONFIG_PPC64 */
extern void pcibios_resource_to_bus(struct pci_dev *dev,
struct pci_bus_region *region,
struct resource *res);
extern void pcibios_bus_to_resource(struct pci_dev *dev,
struct resource *res,
struct pci_bus_region *region);
static inline struct resource *pcibios_select_root(struct pci_dev *pdev,
struct resource *res)
{
struct resource *root = NULL;
if (res->flags & IORESOURCE_IO)
root = &ioport_resource;
if (res->flags & IORESOURCE_MEM)
root = &iomem_resource;
return root;
}
extern void pcibios_fixup_device_resources(struct pci_dev *dev,
struct pci_bus *bus);
extern void pcibios_setup_new_device(struct pci_dev *dev);
extern void pcibios_claim_one_bus(struct pci_bus *b);
extern struct pci_controller *init_phb_dynamic(struct device_node *dn);
extern struct pci_dev *of_create_pci_dev(struct device_node *node,
struct pci_bus *bus, int devfn);
extern void of_scan_pci_bridge(struct device_node *node,
struct pci_dev *dev);
extern void of_scan_bus(struct device_node *node, struct pci_bus *bus);
extern int pci_read_irq_line(struct pci_dev *dev);
extern void pcibios_add_platform_entries(struct pci_dev *dev);
struct file;
extern pgprot_t pci_phys_mem_access_prot(struct file *file,
unsigned long pfn,
unsigned long size,
pgprot_t prot);
#define HAVE_ARCH_PCI_RESOURCE_TO_USER
extern void pci_resource_to_user(const struct pci_dev *dev, int bar,
const struct resource *rsrc,
resource_size_t *start, resource_size_t *end);
#endif /* __KERNEL__ */
#endif /* __ASM_POWERPC_PCI_H */