diff options
29 files changed, 239 insertions, 174 deletions
| diff --git a/MAINTAINERS b/MAINTAINERS index 8285ed4676b6..624e6516fdd3 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -6449,19 +6449,52 @@ F:	drivers/pci/  F:	include/linux/pci*  F:	arch/x86/pci/ +PCI DRIVER FOR IMX6 +M:	Richard Zhu <r65037@freescale.com> +M:	Shawn Guo <shawn.guo@linaro.org> +L:	linux-pci@vger.kernel.org +L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) +S:	Maintained +F:	drivers/pci/host/*imx6* + +PCI DRIVER FOR MVEBU (Marvell Armada 370 and Armada XP SOC support) +M:	Thomas Petazzoni <thomas.petazzoni@free-electrons.com> +M:	Jason Cooper <jason@lakedaemon.net> +L:	linux-pci@vger.kernel.org +L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) +S:	Maintained +F:	drivers/pci/host/*mvebu* +  PCI DRIVER FOR NVIDIA TEGRA  M:	Thierry Reding <thierry.reding@gmail.com>  L:	linux-tegra@vger.kernel.org +L:	linux-pci@vger.kernel.org  S:	Supported  F:	Documentation/devicetree/bindings/pci/nvidia,tegra20-pcie.txt  F:	drivers/pci/host/pci-tegra.c +PCI DRIVER FOR RENESAS R-CAR +M:	Simon Horman <horms@verge.net.au> +L:	linux-pci@vger.kernel.org +L:	linux-sh@vger.kernel.org +S:	Maintained +F:	drivers/pci/host/*rcar* +  PCI DRIVER FOR SAMSUNG EXYNOS  M:	Jingoo Han <jg1.han@samsung.com>  L:	linux-pci@vger.kernel.org +L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) +L:	linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)  S:	Maintained  F:	drivers/pci/host/pci-exynos.c +PCI DRIVER FOR SYNOPSIS DESIGNWARE +M:	Mohit Kumar <mohit.kumar@st.com> +M:	Jingoo Han <jg1.han@samsung.com> +L:	linux-pci@vger.kernel.org +S:	Maintained +F:	drivers/pci/host/*designware* +  PCMCIA SUBSYSTEM  P:	Linux PCMCIA Team  L:	linux-pcmcia@lists.infradead.org diff --git a/arch/alpha/kernel/pci_iommu.c b/arch/alpha/kernel/pci_iommu.c index a21d0ab3b19e..eddee7720343 100644 --- a/arch/alpha/kernel/pci_iommu.c +++ b/arch/alpha/kernel/pci_iommu.c @@ -325,7 +325,7 @@ pci_map_single_1(struct pci_dev *pdev, void *cpu_addr, size_t size,  /* Helper for generic DMA-mapping functions. */  static struct pci_dev *alpha_gendev_to_pci(struct device *dev)  { -	if (dev && dev->bus == &pci_bus_type) +	if (dev && dev_is_pci(dev))  		return to_pci_dev(dev);  	/* Assume that non-PCI devices asking for DMA are either ISA or EISA, diff --git a/arch/arm/common/it8152.c b/arch/arm/common/it8152.c index 001f4913799c..5114b68e99d5 100644 --- a/arch/arm/common/it8152.c +++ b/arch/arm/common/it8152.c @@ -257,7 +257,7 @@ static int it8152_needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t s   */  static int it8152_pci_platform_notify(struct device *dev)  { -	if (dev->bus == &pci_bus_type) { +	if (dev_is_pci(dev)) {  		if (dev->dma_mask)  			*dev->dma_mask = (SZ_64M - 1) | PHYS_OFFSET;  		dev->coherent_dma_mask = (SZ_64M - 1) | PHYS_OFFSET; @@ -268,7 +268,7 @@ static int it8152_pci_platform_notify(struct device *dev)  static int it8152_pci_platform_notify_remove(struct device *dev)  { -	if (dev->bus == &pci_bus_type) +	if (dev_is_pci(dev))  		dmabounce_unregister_dev(dev);  	return 0; diff --git a/arch/arm/mach-ixp4xx/common-pci.c b/arch/arm/mach-ixp4xx/common-pci.c index 6d6bde3e15fa..200970d56f6d 100644 --- a/arch/arm/mach-ixp4xx/common-pci.c +++ b/arch/arm/mach-ixp4xx/common-pci.c @@ -326,7 +326,7 @@ static int ixp4xx_needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t s   */  static int ixp4xx_pci_platform_notify(struct device *dev)  { -	if(dev->bus == &pci_bus_type) { +	if (dev_is_pci(dev)) {  		*dev->dma_mask =  SZ_64M - 1;  		dev->coherent_dma_mask = SZ_64M - 1;  		dmabounce_register_dev(dev, 2048, 4096, ixp4xx_needs_bounce); @@ -336,9 +336,9 @@ static int ixp4xx_pci_platform_notify(struct device *dev)  static int ixp4xx_pci_platform_notify_remove(struct device *dev)  { -	if(dev->bus == &pci_bus_type) { +	if (dev_is_pci(dev))  		dmabounce_unregister_dev(dev); -	} +  	return 0;  } diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c index 4c530a82fc46..8e858b593e4f 100644 --- a/arch/ia64/hp/common/sba_iommu.c +++ b/arch/ia64/hp/common/sba_iommu.c @@ -255,7 +255,7 @@ static u64 prefetch_spill_page;  #endif  #ifdef CONFIG_PCI -# define GET_IOC(dev)	(((dev)->bus == &pci_bus_type)						\ +# define GET_IOC(dev)	((dev_is_pci(dev))						\  			 ? ((struct ioc *) PCI_CONTROLLER(to_pci_dev(dev))->iommu) : NULL)  #else  # define GET_IOC(dev)	NULL diff --git a/arch/ia64/sn/pci/pci_dma.c b/arch/ia64/sn/pci/pci_dma.c index 3290d6e00c31..d0853e8e8623 100644 --- a/arch/ia64/sn/pci/pci_dma.c +++ b/arch/ia64/sn/pci/pci_dma.c @@ -34,7 +34,7 @@   */  static int sn_dma_supported(struct device *dev, u64 mask)  { -	BUG_ON(dev->bus != &pci_bus_type); +	BUG_ON(!dev_is_pci(dev));  	if (mask < 0x7fffffff)  		return 0; @@ -50,7 +50,7 @@ static int sn_dma_supported(struct device *dev, u64 mask)   */  int sn_dma_set_mask(struct device *dev, u64 dma_mask)  { -	BUG_ON(dev->bus != &pci_bus_type); +	BUG_ON(!dev_is_pci(dev));  	if (!sn_dma_supported(dev, dma_mask))  		return 0; @@ -85,7 +85,7 @@ static void *sn_dma_alloc_coherent(struct device *dev, size_t size,  	struct pci_dev *pdev = to_pci_dev(dev);  	struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); -	BUG_ON(dev->bus != &pci_bus_type); +	BUG_ON(!dev_is_pci(dev));  	/*  	 * Allocate the memory. @@ -143,7 +143,7 @@ static void sn_dma_free_coherent(struct device *dev, size_t size, void *cpu_addr  	struct pci_dev *pdev = to_pci_dev(dev);  	struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); -	BUG_ON(dev->bus != &pci_bus_type); +	BUG_ON(!dev_is_pci(dev));  	provider->dma_unmap(pdev, dma_handle, 0);  	free_pages((unsigned long)cpu_addr, get_order(size)); @@ -187,7 +187,7 @@ static dma_addr_t sn_dma_map_page(struct device *dev, struct page *page,  	dmabarr = dma_get_attr(DMA_ATTR_WRITE_BARRIER, attrs); -	BUG_ON(dev->bus != &pci_bus_type); +	BUG_ON(!dev_is_pci(dev));  	phys_addr = __pa(cpu_addr);  	if (dmabarr) @@ -223,7 +223,7 @@ static void sn_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,  	struct pci_dev *pdev = to_pci_dev(dev);  	struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); -	BUG_ON(dev->bus != &pci_bus_type); +	BUG_ON(!dev_is_pci(dev));  	provider->dma_unmap(pdev, dma_addr, dir);  } @@ -247,7 +247,7 @@ static void sn_dma_unmap_sg(struct device *dev, struct scatterlist *sgl,  	struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev);  	struct scatterlist *sg; -	BUG_ON(dev->bus != &pci_bus_type); +	BUG_ON(!dev_is_pci(dev));  	for_each_sg(sgl, sg, nhwentries, i) {  		provider->dma_unmap(pdev, sg->dma_address, dir); @@ -284,7 +284,7 @@ static int sn_dma_map_sg(struct device *dev, struct scatterlist *sgl,  	dmabarr = dma_get_attr(DMA_ATTR_WRITE_BARRIER, attrs); -	BUG_ON(dev->bus != &pci_bus_type); +	BUG_ON(!dev_is_pci(dev));  	/*  	 * Setup a DMA address for each entry in the scatterlist. @@ -323,26 +323,26 @@ static int sn_dma_map_sg(struct device *dev, struct scatterlist *sgl,  static void sn_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,  				       size_t size, enum dma_data_direction dir)  { -	BUG_ON(dev->bus != &pci_bus_type); +	BUG_ON(!dev_is_pci(dev));  }  static void sn_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,  					  size_t size,  					  enum dma_data_direction dir)  { -	BUG_ON(dev->bus != &pci_bus_type); +	BUG_ON(!dev_is_pci(dev));  }  static void sn_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,  				   int nelems, enum dma_data_direction dir)  { -	BUG_ON(dev->bus != &pci_bus_type); +	BUG_ON(!dev_is_pci(dev));  }  static void sn_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,  				      int nelems, enum dma_data_direction dir)  { -	BUG_ON(dev->bus != &pci_bus_type); +	BUG_ON(!dev_is_pci(dev));  }  static int sn_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) diff --git a/arch/parisc/kernel/drivers.c b/arch/parisc/kernel/drivers.c index 14285caec71a..dba508fe1683 100644 --- a/arch/parisc/kernel/drivers.c +++ b/arch/parisc/kernel/drivers.c @@ -282,18 +282,6 @@ find_pa_parent_type(const struct parisc_device *padev, int type)  	return NULL;  } -#ifdef CONFIG_PCI -static inline int is_pci_dev(struct device *dev) -{ -	return dev->bus == &pci_bus_type; -} -#else -static inline int is_pci_dev(struct device *dev) -{ -	return 0; -} -#endif -  /*   * get_node_path fills in @path with the firmware path to the device.   * Note that if @node is a parisc device, we don't fill in the 'mod' field. @@ -306,7 +294,7 @@ static void get_node_path(struct device *dev, struct hardware_path *path)  	int i = 5;  	memset(&path->bc, -1, 6); -	if (is_pci_dev(dev)) { +	if (dev_is_pci(dev)) {  		unsigned int devfn = to_pci_dev(dev)->devfn;  		path->mod = PCI_FUNC(devfn);  		path->bc[i--] = PCI_SLOT(devfn); @@ -314,7 +302,7 @@ static void get_node_path(struct device *dev, struct hardware_path *path)  	}  	while (dev != &root) { -		if (is_pci_dev(dev)) { +		if (dev_is_pci(dev)) {  			unsigned int devfn = to_pci_dev(dev)->devfn;  			path->bc[i--] = PCI_SLOT(devfn) | (PCI_FUNC(devfn)<< 5);  		} else if (dev->bus == &parisc_bus_type) { @@ -695,7 +683,7 @@ static int check_parent(struct device * dev, void * data)  		if (dev->bus == &parisc_bus_type) {  			if (match_parisc_device(dev, d->index, d->modpath))  				d->dev = dev; -		} else if (is_pci_dev(dev)) { +		} else if (dev_is_pci(dev)) {  			if (match_pci_device(dev, d->index, d->modpath))  				d->dev = dev;  		} else if (dev->bus == NULL) { @@ -753,7 +741,7 @@ struct device *hwpath_to_device(struct hardware_path *modpath)  		if (!parent)  			return NULL;  	} -	if (is_pci_dev(parent)) /* pci devices already parse MOD */ +	if (dev_is_pci(parent)) /* pci devices already parse MOD */  		return parent;  	else  		return parse_tree_node(parent, 6, modpath); @@ -772,7 +760,7 @@ void device_to_hwpath(struct device *dev, struct hardware_path *path)  		padev = to_parisc_device(dev);  		get_node_path(dev->parent, path);  		path->mod = padev->hw_path; -	} else if (is_pci_dev(dev)) { +	} else if (dev_is_pci(dev)) {  		get_node_path(dev, path);  	}  } diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c index 070ed141aac7..76663b019eb5 100644 --- a/arch/sparc/kernel/iommu.c +++ b/arch/sparc/kernel/iommu.c @@ -854,7 +854,7 @@ int dma_supported(struct device *dev, u64 device_mask)  		return 1;  #ifdef CONFIG_PCI -	if (dev->bus == &pci_bus_type) +	if (dev_is_pci(dev))  		return pci64_dma_supported(to_pci_dev(dev), device_mask);  #endif diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c index 2096468de9b2..e7e215dfa866 100644 --- a/arch/sparc/kernel/ioport.c +++ b/arch/sparc/kernel/ioport.c @@ -666,10 +666,9 @@ EXPORT_SYMBOL(dma_ops);   */  int dma_supported(struct device *dev, u64 mask)  { -#ifdef CONFIG_PCI -	if (dev->bus == &pci_bus_type) +	if (dev_is_pci(dev))  		return 1; -#endif +  	return 0;  }  EXPORT_SYMBOL(dma_supported); diff --git a/arch/x86/include/asm/pci.h b/arch/x86/include/asm/pci.h index 947b5c417e83..0de52c5bf9a2 100644 --- a/arch/x86/include/asm/pci.h +++ b/arch/x86/include/asm/pci.h @@ -104,7 +104,7 @@ extern void pci_iommu_alloc(void);  struct msi_desc;  int native_setup_msi_irqs(struct pci_dev *dev, int nvec, int type);  void native_teardown_msi_irq(unsigned int irq); -void native_restore_msi_irqs(struct pci_dev *dev, int irq); +void native_restore_msi_irqs(struct pci_dev *dev);  int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc,  		  unsigned int irq_base, unsigned int irq_offset);  #else diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h index 0f1be11e43d2..e45e4da96bf1 100644 --- a/arch/x86/include/asm/x86_init.h +++ b/arch/x86/include/asm/x86_init.h @@ -181,7 +181,7 @@ struct x86_msi_ops {  			       u8 hpet_id);  	void (*teardown_msi_irq)(unsigned int irq);  	void (*teardown_msi_irqs)(struct pci_dev *dev); -	void (*restore_msi_irqs)(struct pci_dev *dev, int irq); +	void (*restore_msi_irqs)(struct pci_dev *dev);  	int  (*setup_hpet_msi)(unsigned int irq, unsigned int id);  	u32 (*msi_mask_irq)(struct msi_desc *desc, u32 mask, u32 flag);  	u32 (*msix_mask_irq)(struct msi_desc *desc, u32 flag); diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index 6c0b43bd024b..d359d0fffa50 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c @@ -1034,9 +1034,7 @@ static int mp_config_acpi_gsi(struct device *dev, u32 gsi, int trigger,  	if (!acpi_ioapic)  		return 0; -	if (!dev) -		return 0; -	if (dev->bus != &pci_bus_type) +	if (!dev || !dev_is_pci(dev))  		return 0;  	pdev = to_pci_dev(dev); diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c index 021783b1f46a..e48b674639cc 100644 --- a/arch/x86/kernel/x86_init.c +++ b/arch/x86/kernel/x86_init.c @@ -136,9 +136,9 @@ void arch_teardown_msi_irq(unsigned int irq)  	x86_msi.teardown_msi_irq(irq);  } -void arch_restore_msi_irqs(struct pci_dev *dev, int irq) +void arch_restore_msi_irqs(struct pci_dev *dev)  { -	x86_msi.restore_msi_irqs(dev, irq); +	x86_msi.restore_msi_irqs(dev);  }  u32 arch_msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag)  { diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c index 5eee4959785d..103e702ec5a7 100644 --- a/arch/x86/pci/xen.c +++ b/arch/x86/pci/xen.c @@ -337,7 +337,7 @@ out:  	return ret;  } -static void xen_initdom_restore_msi_irqs(struct pci_dev *dev, int irq) +static void xen_initdom_restore_msi_irqs(struct pci_dev *dev)  {  	int ret = 0; diff --git a/drivers/eisa/eisa-bus.c b/drivers/eisa/eisa-bus.c index 272a3ec35957..8842cde69177 100644 --- a/drivers/eisa/eisa-bus.c +++ b/drivers/eisa/eisa-bus.c @@ -232,8 +232,10 @@ static int __init eisa_init_device(struct eisa_root_device *root,  static int __init eisa_register_device(struct eisa_device *edev)  {  	int rc = device_register(&edev->dev); -	if (rc) +	if (rc) { +		put_device(&edev->dev);  		return rc; +	}  	rc = device_create_file(&edev->dev, &dev_attr_signature);  	if (rc) diff --git a/drivers/pci/host/pci-mvebu.c b/drivers/pci/host/pci-mvebu.c index c269e430c760..2aa7b77c7c88 100644 --- a/drivers/pci/host/pci-mvebu.c +++ b/drivers/pci/host/pci-mvebu.c @@ -447,6 +447,11 @@ static int mvebu_sw_pci_bridge_read(struct mvebu_pcie_port *port,  		*value = 0;  		break; +	case PCI_INTERRUPT_LINE: +		/* LINE PIN MIN_GNT MAX_LAT */ +		*value = 0; +		break; +  	default:  		*value = 0xffffffff;  		return PCIBIOS_BAD_REGISTER_NUMBER; diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c index 1fe2d6fb19d5..68311ec849ee 100644 --- a/drivers/pci/iov.c +++ b/drivers/pci/iov.c @@ -441,6 +441,7 @@ static int sriov_init(struct pci_dev *dev, int pos)  found:  	pci_write_config_word(dev, pos + PCI_SRIOV_CTRL, ctrl); +	pci_write_config_word(dev, pos + PCI_SRIOV_NUM_VF, 0);  	pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &offset);  	pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &stride);  	if (!offset || (total > 1 && !stride)) diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index 3fcd67a16677..51bf0400a889 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c @@ -116,7 +116,7 @@ void __weak arch_teardown_msi_irqs(struct pci_dev *dev)  	return default_teardown_msi_irqs(dev);  } -void default_restore_msi_irqs(struct pci_dev *dev, int irq) +static void default_restore_msi_irq(struct pci_dev *dev, int irq)  {  	struct msi_desc *entry; @@ -134,9 +134,9 @@ void default_restore_msi_irqs(struct pci_dev *dev, int irq)  		write_msi_msg(irq, &entry->msg);  } -void __weak arch_restore_msi_irqs(struct pci_dev *dev, int irq) +void __weak arch_restore_msi_irqs(struct pci_dev *dev)  { -	return default_restore_msi_irqs(dev, irq); +	return default_restore_msi_irqs(dev);  }  static void msi_set_enable(struct pci_dev *dev, int enable) @@ -262,6 +262,15 @@ void unmask_msi_irq(struct irq_data *data)  	msi_set_mask_bit(data, 0);  } +void default_restore_msi_irqs(struct pci_dev *dev) +{ +	struct msi_desc *entry; + +	list_for_each_entry(entry, &dev->msi_list, list) { +		default_restore_msi_irq(dev, entry->irq); +	} +} +  void __read_msi_msg(struct msi_desc *entry, struct msi_msg *msg)  {  	BUG_ON(entry->dev->current_state != PCI_D0); @@ -430,7 +439,7 @@ static void __pci_restore_msi_state(struct pci_dev *dev)  	pci_intx_for_msi(dev, 0);  	msi_set_enable(dev, 0); -	arch_restore_msi_irqs(dev, dev->irq); +	arch_restore_msi_irqs(dev);  	pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control);  	msi_mask_irq(entry, msi_capable_mask(control), entry->masked); @@ -455,8 +464,8 @@ static void __pci_restore_msix_state(struct pci_dev *dev)  	control |= PCI_MSIX_FLAGS_ENABLE | PCI_MSIX_FLAGS_MASKALL;  	pci_write_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, control); +	arch_restore_msi_irqs(dev);  	list_for_each_entry(entry, &dev->msi_list, list) { -		arch_restore_msi_irqs(dev, entry->irq);  		msix_mask_irq(entry, entry->masked);  	} diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c index 577074efbe62..e0431f1af33b 100644 --- a/drivers/pci/pci-acpi.c +++ b/drivers/pci/pci-acpi.c @@ -358,7 +358,7 @@ static void pci_acpi_cleanup(struct device *dev)  static bool pci_acpi_bus_match(struct device *dev)  { -	return dev->bus == &pci_bus_type; +	return dev_is_pci(dev);  }  static struct acpi_bus_type acpi_pci_bus = { diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index 9042fdbd7244..25f0bc659164 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c @@ -19,6 +19,7 @@  #include <linux/cpu.h>  #include <linux/pm_runtime.h>  #include <linux/suspend.h> +#include <linux/kexec.h>  #include "pci.h"  struct pci_dynid { @@ -288,12 +289,27 @@ static int pci_call_probe(struct pci_driver *drv, struct pci_dev *dev,  	int error, node;  	struct drv_dev_and_id ddi = { drv, dev, id }; -	/* Execute driver initialization on node where the device's -	   bus is attached to.  This way the driver likely allocates -	   its local memory on the right node without any need to -	   change it. */ +	/* +	 * Execute driver initialization on node where the device is +	 * attached.  This way the driver likely allocates its local memory +	 * on the right node. +	 */  	node = dev_to_node(&dev->dev); -	if (node >= 0) { + +	/* +	 * On NUMA systems, we are likely to call a PF probe function using +	 * work_on_cpu().  If that probe calls pci_enable_sriov() (which +	 * adds the VF devices via pci_bus_add_device()), we may re-enter +	 * this function to call the VF probe function.  Calling +	 * work_on_cpu() again will cause a lockdep warning.  Since VFs are +	 * always on the same node as the PF, we can work around this by +	 * avoiding work_on_cpu() when we're already on the correct node. +	 * +	 * Preemption is enabled, so it's theoretically unsafe to use +	 * numa_node_id(), but even if we run the probe function on the +	 * wrong node, it should be functionally correct. +	 */ +	if (node >= 0 && node != numa_node_id()) {  		int cpu;  		get_online_cpus(); @@ -305,6 +321,7 @@ static int pci_call_probe(struct pci_driver *drv, struct pci_dev *dev,  		put_online_cpus();  	} else  		error = local_pci_probe(&ddi); +  	return error;  } @@ -399,12 +416,17 @@ static void pci_device_shutdown(struct device *dev)  	pci_msi_shutdown(pci_dev);  	pci_msix_shutdown(pci_dev); +#ifdef CONFIG_KEXEC  	/* -	 * Turn off Bus Master bit on the device to tell it to not -	 * continue to do DMA. Don't touch devices in D3cold or unknown states. +	 * If this is a kexec reboot, turn off Bus Master bit on the +	 * device to tell it to not continue to do DMA. Don't touch +	 * devices in D3cold or unknown states. +	 * If it is not a kexec reboot, firmware will hit the PCI +	 * devices with big hammer and stop their DMA any way.  	 */ -	if (pci_dev->current_state <= PCI_D3hot) +	if (kexec_in_progress && (pci_dev->current_state <= PCI_D3hot))  		pci_clear_master(pci_dev); +#endif  }  #ifdef CONFIG_PM diff --git a/drivers/pci/pcie/aer/aerdrv_acpi.c b/drivers/pci/pcie/aer/aerdrv_acpi.c index cf611ab2193a..4d6991794fa2 100644 --- a/drivers/pci/pcie/aer/aerdrv_acpi.c +++ b/drivers/pci/pcie/aer/aerdrv_acpi.c @@ -50,14 +50,37 @@ struct aer_hest_parse_info {  	int firmware_first;  }; +static int hest_source_is_pcie_aer(struct acpi_hest_header *hest_hdr) +{ +	if (hest_hdr->type == ACPI_HEST_TYPE_AER_ROOT_PORT || +	    hest_hdr->type == ACPI_HEST_TYPE_AER_ENDPOINT || +	    hest_hdr->type == ACPI_HEST_TYPE_AER_BRIDGE) +		return 1; +	return 0; +} +  static int aer_hest_parse(struct acpi_hest_header *hest_hdr, void *data)  {  	struct aer_hest_parse_info *info = data;  	struct acpi_hest_aer_common *p;  	int ff; +	if (!hest_source_is_pcie_aer(hest_hdr)) +		return 0; +  	p = (struct acpi_hest_aer_common *)(hest_hdr + 1);  	ff = !!(p->flags & ACPI_HEST_FIRMWARE_FIRST); + +	/* +	 * If no specific device is supplied, determine whether +	 * FIRMWARE_FIRST is set for *any* PCIe device. +	 */ +	if (!info->pci_dev) { +		info->firmware_first |= ff; +		return 0; +	} + +	/* Otherwise, check the specific device */  	if (p->flags & ACPI_HEST_GLOBAL) {  		if (hest_match_type(hest_hdr, info->pci_dev))  			info->firmware_first = ff; @@ -97,33 +120,20 @@ int pcie_aer_get_firmware_first(struct pci_dev *dev)  static bool aer_firmware_first; -static int aer_hest_parse_aff(struct acpi_hest_header *hest_hdr, void *data) -{ -	struct acpi_hest_aer_common *p; - -	if (aer_firmware_first) -		return 0; - -	switch (hest_hdr->type) { -	case ACPI_HEST_TYPE_AER_ROOT_PORT: -	case ACPI_HEST_TYPE_AER_ENDPOINT: -	case ACPI_HEST_TYPE_AER_BRIDGE: -		p = (struct acpi_hest_aer_common *)(hest_hdr + 1); -		aer_firmware_first = !!(p->flags & ACPI_HEST_FIRMWARE_FIRST); -	default: -		return 0; -	} -} -  /**   * aer_acpi_firmware_first - Check if APEI should control AER.   */  bool aer_acpi_firmware_first(void)  {  	static bool parsed = false; +	struct aer_hest_parse_info info = { +		.pci_dev	= NULL,	/* Check all PCIe devices */ +		.firmware_first	= 0, +	};  	if (!parsed) { -		apei_hest_parse(aer_hest_parse_aff, NULL); +		apei_hest_parse(aer_hest_parse, &info); +		aer_firmware_first = info.firmware_first;  		parsed = true;  	}  	return aer_firmware_first; diff --git a/drivers/pci/pcie/aer/aerdrv_errprint.c b/drivers/pci/pcie/aer/aerdrv_errprint.c index 2c7c9f5f592c..34ff7026440c 100644 --- a/drivers/pci/pcie/aer/aerdrv_errprint.c +++ b/drivers/pci/pcie/aer/aerdrv_errprint.c @@ -124,6 +124,21 @@ static const char *aer_agent_string[] = {  	"Transmitter ID"  }; +static void __print_tlp_header(struct pci_dev *dev, +			       struct aer_header_log_regs *t) +{ +	unsigned char *tlp = (unsigned char *)&t; + +	dev_err(&dev->dev, "  TLP Header:" +		" %02x%02x%02x%02x %02x%02x%02x%02x" +		" %02x%02x%02x%02x %02x%02x%02x%02x\n", +		*(tlp + 3), *(tlp + 2), *(tlp + 1), *tlp, +		*(tlp + 7), *(tlp + 6), *(tlp + 5), *(tlp + 4), +		*(tlp + 11), *(tlp + 10), *(tlp + 9), +		*(tlp + 8), *(tlp + 15), *(tlp + 14), +		*(tlp + 13), *(tlp + 12)); +} +  static void __aer_print_error(struct pci_dev *dev,  			      struct aer_err_info *info)  { @@ -153,48 +168,39 @@ static void __aer_print_error(struct pci_dev *dev,  void aer_print_error(struct pci_dev *dev, struct aer_err_info *info)  { +	int layer, agent;  	int id = ((dev->bus->number << 8) | dev->devfn); -	if (info->status == 0) { +	if (!info->status) {  		dev_err(&dev->dev,  			"PCIe Bus Error: severity=%s, type=Unaccessible, "  			"id=%04x(Unregistered Agent ID)\n",  			aer_error_severity_string[info->severity], id); -	} else { -		int layer, agent; +		goto out; +	} -		layer = AER_GET_LAYER_ERROR(info->severity, info->status); -		agent = AER_GET_AGENT(info->severity, info->status); +	layer = AER_GET_LAYER_ERROR(info->severity, info->status); +	agent = AER_GET_AGENT(info->severity, info->status); -		dev_err(&dev->dev, -			"PCIe Bus Error: severity=%s, type=%s, id=%04x(%s)\n", -			aer_error_severity_string[info->severity], -			aer_error_layer[layer], id, aer_agent_string[agent]); +	dev_err(&dev->dev, +		"PCIe Bus Error: severity=%s, type=%s, id=%04x(%s)\n", +		aer_error_severity_string[info->severity], +		aer_error_layer[layer], id, aer_agent_string[agent]); -		dev_err(&dev->dev, -			"  device [%04x:%04x] error status/mask=%08x/%08x\n", -			dev->vendor, dev->device, -			info->status, info->mask); +	dev_err(&dev->dev, +		"  device [%04x:%04x] error status/mask=%08x/%08x\n", +		dev->vendor, dev->device, +		info->status, info->mask); -		__aer_print_error(dev, info); +	__aer_print_error(dev, info); -		if (info->tlp_header_valid) { -			unsigned char *tlp = (unsigned char *) &info->tlp; -			dev_err(&dev->dev, "  TLP Header:" -				" %02x%02x%02x%02x %02x%02x%02x%02x" -				" %02x%02x%02x%02x %02x%02x%02x%02x\n", -				*(tlp + 3), *(tlp + 2), *(tlp + 1), *tlp, -				*(tlp + 7), *(tlp + 6), *(tlp + 5), *(tlp + 4), -				*(tlp + 11), *(tlp + 10), *(tlp + 9), -				*(tlp + 8), *(tlp + 15), *(tlp + 14), -				*(tlp + 13), *(tlp + 12)); -		} -	} +	if (info->tlp_header_valid) +		__print_tlp_header(dev, &info->tlp); +out:  	if (info->id && info->error_dev_num > 1 && info->id == id) -		dev_err(&dev->dev, -			   "  Error of this Agent(%04x) is reported first\n", -			id); +		dev_err(&dev->dev, "  Error of this Agent(%04x) is reported first\n", id); +  	trace_aer_event(dev_name(&dev->dev), (info->status & ~info->mask),  			info->severity);  } @@ -228,6 +234,7 @@ void cper_print_aer(struct pci_dev *dev, int cper_severity,  	const char **status_strs;  	aer_severity = cper_severity_to_aer(cper_severity); +  	if (aer_severity == AER_CORRECTABLE) {  		status = aer->cor_status;  		mask = aer->cor_mask; @@ -240,28 +247,22 @@ void cper_print_aer(struct pci_dev *dev, int cper_severity,  		status_strs_size = ARRAY_SIZE(aer_uncorrectable_error_string);  		tlp_header_valid = status & AER_LOG_TLP_MASKS;  	} +  	layer = AER_GET_LAYER_ERROR(aer_severity, status);  	agent = AER_GET_AGENT(aer_severity, status); -	dev_err(&dev->dev, "aer_status: 0x%08x, aer_mask: 0x%08x\n", -	       status, mask); + +	dev_err(&dev->dev, "aer_status: 0x%08x, aer_mask: 0x%08x\n", status, mask);  	cper_print_bits("", status, status_strs, status_strs_size);  	dev_err(&dev->dev, "aer_layer=%s, aer_agent=%s\n", -	       aer_error_layer[layer], aer_agent_string[agent]); +		aer_error_layer[layer], aer_agent_string[agent]); +  	if (aer_severity != AER_CORRECTABLE)  		dev_err(&dev->dev, "aer_uncor_severity: 0x%08x\n", -		       aer->uncor_severity); -	if (tlp_header_valid) { -		const unsigned char *tlp; -		tlp = (const unsigned char *)&aer->header_log; -		dev_err(&dev->dev, "aer_tlp_header:" -			" %02x%02x%02x%02x %02x%02x%02x%02x" -			" %02x%02x%02x%02x %02x%02x%02x%02x\n", -			*(tlp + 3), *(tlp + 2), *(tlp + 1), *tlp, -			*(tlp + 7), *(tlp + 6), *(tlp + 5), *(tlp + 4), -			*(tlp + 11), *(tlp + 10), *(tlp + 9), -			*(tlp + 8), *(tlp + 15), *(tlp + 14), -			*(tlp + 13), *(tlp + 12)); -	} +			aer->uncor_severity); + +	if (tlp_header_valid) +		__print_tlp_header(dev, &aer->header_log); +  	trace_aer_event(dev_name(&dev->dev), (status & ~mask),  			aer_severity);  } diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c index 0b6e76604068..ce9d9ae17bfd 100644 --- a/drivers/pci/pcie/portdrv_core.c +++ b/drivers/pci/pcie/portdrv_core.c @@ -554,7 +554,7 @@ int pcie_port_service_register(struct pcie_port_service_driver *new)  	if (pcie_ports_disabled)  		return -ENODEV; -	new->driver.name = (char *)new->name; +	new->driver.name = new->name;  	new->driver.bus = &pcie_port_bus_type;  	new->driver.probe = pcie_port_probe_service;  	new->driver.remove = pcie_port_remove_service; diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c index 219a4106480a..2e344a5581ae 100644 --- a/drivers/pci/setup-bus.c +++ b/drivers/pci/setup-bus.c @@ -538,7 +538,8 @@ static void pci_setup_bridge_io(struct pci_bus *bus)  	struct pci_bus_region region;  	unsigned long io_mask;  	u8 io_base_lo, io_limit_lo; -	u32 l, io_upper16; +	u16 l; +	u32 io_upper16;  	io_mask = PCI_IO_RANGE_MASK;  	if (bridge->io_window_1k) @@ -548,11 +549,10 @@ static void pci_setup_bridge_io(struct pci_bus *bus)  	res = bus->resource[0];  	pcibios_resource_to_bus(bridge, ®ion, res);  	if (res->flags & IORESOURCE_IO) { -		pci_read_config_dword(bridge, PCI_IO_BASE, &l); -		l &= 0xffff0000; +		pci_read_config_word(bridge, PCI_IO_BASE, &l);  		io_base_lo = (region.start >> 8) & io_mask;  		io_limit_lo = (region.end >> 8) & io_mask; -		l |= ((u32) io_limit_lo << 8) | io_base_lo; +		l = ((u16) io_limit_lo << 8) | io_base_lo;  		/* Set up upper 16 bits of I/O base/limit. */  		io_upper16 = (region.end & 0xffff0000) | (region.start >> 16);  		dev_info(&bridge->dev, "  bridge window %pR\n", res); @@ -564,7 +564,7 @@ static void pci_setup_bridge_io(struct pci_bus *bus)  	/* Temporarily disable the I/O range before updating PCI_IO_BASE. */  	pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, 0x0000ffff);  	/* Update lower 16 bits of I/O base/limit. */ -	pci_write_config_dword(bridge, PCI_IO_BASE, l); +	pci_write_config_word(bridge, PCI_IO_BASE, l);  	/* Update upper 16 bits of I/O base/limit. */  	pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, io_upper16);  } @@ -665,21 +665,23 @@ static void pci_bridge_check_ranges(struct pci_bus *bus)  	pci_read_config_word(bridge, PCI_IO_BASE, &io);  	if (!io) { -		pci_write_config_word(bridge, PCI_IO_BASE, 0xf0f0); +		pci_write_config_word(bridge, PCI_IO_BASE, 0xe0f0);  		pci_read_config_word(bridge, PCI_IO_BASE, &io);  		pci_write_config_word(bridge, PCI_IO_BASE, 0x0);  	}  	if (io)  		b_res[0].flags |= IORESOURCE_IO; +  	/*  DECchip 21050 pass 2 errata: the bridge may miss an address  	    disconnect boundary by one PCI data phase.  	    Workaround: do not use prefetching on this device. */  	if (bridge->vendor == PCI_VENDOR_ID_DEC && bridge->device == 0x0001)  		return; +  	pci_read_config_dword(bridge, PCI_PREF_MEMORY_BASE, &pmem);  	if (!pmem) {  		pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, -					       0xfff0fff0); +					       0xffe0fff0);  		pci_read_config_dword(bridge, PCI_PREF_MEMORY_BASE, &pmem);  		pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, 0x0);  	} diff --git a/include/linux/kexec.h b/include/linux/kexec.h index d78d28a733b1..5fd33dc1fe3a 100644 --- a/include/linux/kexec.h +++ b/include/linux/kexec.h @@ -198,6 +198,9 @@ extern u32 vmcoreinfo_note[VMCOREINFO_NOTE_SIZE/4];  extern size_t vmcoreinfo_size;  extern size_t vmcoreinfo_max_size; +/* flag to track if kexec reboot is in progress */ +extern bool kexec_in_progress; +  int __init parse_crashkernel(char *cmdline, unsigned long long system_ram,  		unsigned long long *crash_size, unsigned long long *crash_base);  int parse_crashkernel_high(char *cmdline, unsigned long long system_ram, diff --git a/include/linux/msi.h b/include/linux/msi.h index 009b02481436..92a2f991262a 100644 --- a/include/linux/msi.h +++ b/include/linux/msi.h @@ -60,10 +60,10 @@ void arch_teardown_msi_irq(unsigned int irq);  int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type);  void arch_teardown_msi_irqs(struct pci_dev *dev);  int arch_msi_check_device(struct pci_dev* dev, int nvec, int type); -void arch_restore_msi_irqs(struct pci_dev *dev, int irq); +void arch_restore_msi_irqs(struct pci_dev *dev);  void default_teardown_msi_irqs(struct pci_dev *dev); -void default_restore_msi_irqs(struct pci_dev *dev, int irq); +void default_restore_msi_irqs(struct pci_dev *dev);  u32 default_msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag);  u32 default_msix_mask_irq(struct msi_desc *desc, u32 flag); diff --git a/include/linux/pci.h b/include/linux/pci.h index 1084a15175e0..eb8078aeadc8 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -1567,65 +1567,65 @@ enum pci_fixup_pass {  /* Anonymous variables would be nice... */  #define DECLARE_PCI_FIXUP_SECTION(section, name, vendor, device, class,	\  				  class_shift, hook)			\ -	static const struct pci_fixup __pci_fixup_##name __used		\ +	static const struct pci_fixup __PASTE(__pci_fixup_##name,__LINE__) __used	\  	__attribute__((__section__(#section), aligned((sizeof(void *)))))    \  		= { vendor, device, class, class_shift, hook };  #define DECLARE_PCI_FIXUP_CLASS_EARLY(vendor, device, class,		\  					 class_shift, hook)		\  	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_early,			\ -		vendor##device##hook, vendor, device, class, class_shift, hook) +		hook, vendor, device, class, class_shift, hook)  #define DECLARE_PCI_FIXUP_CLASS_HEADER(vendor, device, class,		\  					 class_shift, hook)		\  	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_header,			\ -		vendor##device##hook, vendor, device, class, class_shift, hook) +		hook, vendor, device, class, class_shift, hook)  #define DECLARE_PCI_FIXUP_CLASS_FINAL(vendor, device, class,		\  					 class_shift, hook)		\  	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_final,			\ -		vendor##device##hook, vendor, device, class, class_shift, hook) +		hook, vendor, device, class, class_shift, hook)  #define DECLARE_PCI_FIXUP_CLASS_ENABLE(vendor, device, class,		\  					 class_shift, hook)		\  	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_enable,			\ -		vendor##device##hook, vendor, device, class, class_shift, hook) +		hook, vendor, device, class, class_shift, hook)  #define DECLARE_PCI_FIXUP_CLASS_RESUME(vendor, device, class,		\  					 class_shift, hook)		\  	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume,			\ -		resume##vendor##device##hook, vendor, device, class,	\ +		resume##hook, vendor, device, class,	\  		class_shift, hook)  #define DECLARE_PCI_FIXUP_CLASS_RESUME_EARLY(vendor, device, class,	\  					 class_shift, hook)		\  	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early,		\ -		resume_early##vendor##device##hook, vendor, device,	\ +		resume_early##hook, vendor, device,	\  		class, class_shift, hook)  #define DECLARE_PCI_FIXUP_CLASS_SUSPEND(vendor, device, class,		\  					 class_shift, hook)		\  	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend,			\ -		suspend##vendor##device##hook, vendor, device, class,	\ +		suspend##hook, vendor, device, class,	\  		class_shift, hook)  #define DECLARE_PCI_FIXUP_EARLY(vendor, device, hook)			\  	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_early,			\ -		vendor##device##hook, vendor, device, PCI_ANY_ID, 0, hook) +		hook, vendor, device, PCI_ANY_ID, 0, hook)  #define DECLARE_PCI_FIXUP_HEADER(vendor, device, hook)			\  	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_header,			\ -		vendor##device##hook, vendor, device, PCI_ANY_ID, 0, hook) +		hook, vendor, device, PCI_ANY_ID, 0, hook)  #define DECLARE_PCI_FIXUP_FINAL(vendor, device, hook)			\  	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_final,			\ -		vendor##device##hook, vendor, device, PCI_ANY_ID, 0, hook) +		hook, vendor, device, PCI_ANY_ID, 0, hook)  #define DECLARE_PCI_FIXUP_ENABLE(vendor, device, hook)			\  	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_enable,			\ -		vendor##device##hook, vendor, device, PCI_ANY_ID, 0, hook) +		hook, vendor, device, PCI_ANY_ID, 0, hook)  #define DECLARE_PCI_FIXUP_RESUME(vendor, device, hook)			\  	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume,			\ -		resume##vendor##device##hook, vendor, device,		\ +		resume##hook, vendor, device,		\  		PCI_ANY_ID, 0, hook)  #define DECLARE_PCI_FIXUP_RESUME_EARLY(vendor, device, hook)		\  	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early,		\ -		resume_early##vendor##device##hook, vendor, device,	\ +		resume_early##hook, vendor, device,	\  		PCI_ANY_ID, 0, hook)  #define DECLARE_PCI_FIXUP_SUSPEND(vendor, device, hook)			\  	DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend,			\ -		suspend##vendor##device##hook, vendor, device,		\ +		suspend##hook, vendor, device,		\  		PCI_ANY_ID, 0, hook)  #ifdef CONFIG_PCI_QUIRKS diff --git a/kernel/kexec.c b/kernel/kexec.c index 490afc03627e..d0d8fca54065 100644 --- a/kernel/kexec.c +++ b/kernel/kexec.c @@ -47,6 +47,9 @@ u32 vmcoreinfo_note[VMCOREINFO_NOTE_SIZE/4];  size_t vmcoreinfo_size;  size_t vmcoreinfo_max_size = sizeof(vmcoreinfo_data); +/* Flag to indicate we are going to kexec a new kernel */ +bool kexec_in_progress = false; +  /* Location of the reserved area for the crash kernel */  struct resource crashk_res = {  	.name  = "Crash kernel", @@ -1675,6 +1678,7 @@ int kernel_kexec(void)  	} else  #endif  	{ +		kexec_in_progress = true;  		kernel_restart_prepare(NULL);  		printk(KERN_EMERG "Starting new kernel\n");  		machine_shutdown(); diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 987293d03ebc..5690b8eabfbc 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -2840,19 +2840,6 @@ already_gone:  	return false;  } -static bool __flush_work(struct work_struct *work) -{ -	struct wq_barrier barr; - -	if (start_flush_work(work, &barr)) { -		wait_for_completion(&barr.done); -		destroy_work_on_stack(&barr.work); -		return true; -	} else { -		return false; -	} -} -  /**   * flush_work - wait for a work to finish executing the last queueing instance   * @work: the work to flush @@ -2866,10 +2853,18 @@ static bool __flush_work(struct work_struct *work)   */  bool flush_work(struct work_struct *work)  { +	struct wq_barrier barr; +  	lock_map_acquire(&work->lockdep_map);  	lock_map_release(&work->lockdep_map); -	return __flush_work(work); +	if (start_flush_work(work, &barr)) { +		wait_for_completion(&barr.done); +		destroy_work_on_stack(&barr.work); +		return true; +	} else { +		return false; +	}  }  EXPORT_SYMBOL_GPL(flush_work); @@ -4814,14 +4809,7 @@ long work_on_cpu(int cpu, long (*fn)(void *), void *arg)  	INIT_WORK_ONSTACK(&wfc.work, work_for_cpu_fn);  	schedule_work_on(cpu, &wfc.work); - -	/* -	 * The work item is on-stack and can't lead to deadlock through -	 * flushing.  Use __flush_work() to avoid spurious lockdep warnings -	 * when work_on_cpu()s are nested. -	 */ -	__flush_work(&wfc.work); - +	flush_work(&wfc.work);  	return wfc.ret;  }  EXPORT_SYMBOL_GPL(work_on_cpu); | 
