Fixes: - CVE-2020-10757 The "mtd: rawnand: Pass a nand_chip object to nand_release()" commit was backported which needed some adaptations to other code. Run tested: ath79 Build tested: ath79 Signed-off-by: Hauke Mehrtens <hauke@hauke-m.de>
		
			
				
	
	
		
			377 lines
		
	
	
		
			10 KiB
		
	
	
	
		
			Diff
		
	
	
	
	
	
			
		
		
	
	
			377 lines
		
	
	
		
			10 KiB
		
	
	
	
		
			Diff
		
	
	
	
	
	
| --- a/drivers/clk/clk-devres.c
 | |
| +++ b/drivers/clk/clk-devres.c
 | |
| @@ -34,6 +34,17 @@ struct clk *devm_clk_get(struct device *
 | |
|  }
 | |
|  EXPORT_SYMBOL(devm_clk_get);
 | |
|  
 | |
| +struct clk *devm_clk_get_optional(struct device *dev, const char *id)
 | |
| +{
 | |
| +        struct clk *clk = devm_clk_get(dev, id);
 | |
| +
 | |
| +        if (clk == ERR_PTR(-ENOENT))
 | |
| +                return NULL;
 | |
| +
 | |
| +        return clk;
 | |
| +}
 | |
| +EXPORT_SYMBOL(devm_clk_get_optional);
 | |
| +
 | |
|  struct clk_bulk_devres {
 | |
|  	struct clk_bulk_data *clks;
 | |
|  	int num_clks;
 | |
| --- a/drivers/pci/controller/pcie-mediatek.c
 | |
| +++ b/drivers/pci/controller/pcie-mediatek.c
 | |
| @@ -15,6 +15,7 @@
 | |
|  #include <linux/irqdomain.h>
 | |
|  #include <linux/kernel.h>
 | |
|  #include <linux/msi.h>
 | |
| +#include <linux/module.h>
 | |
|  #include <linux/of_address.h>
 | |
|  #include <linux/of_pci.h>
 | |
|  #include <linux/of_platform.h>
 | |
| @@ -167,6 +168,7 @@ struct mtk_pcie_soc {
 | |
|   * @phy: pointer to PHY control block
 | |
|   * @lane: lane count
 | |
|   * @slot: port slot
 | |
| + * @irq: GIC irq
 | |
|   * @irq_domain: legacy INTx IRQ domain
 | |
|   * @inner_domain: inner IRQ domain
 | |
|   * @msi_domain: MSI IRQ domain
 | |
| @@ -187,6 +189,7 @@ struct mtk_pcie_port {
 | |
|  	struct phy *phy;
 | |
|  	u32 lane;
 | |
|  	u32 slot;
 | |
| +	int irq;
 | |
|  	struct irq_domain *irq_domain;
 | |
|  	struct irq_domain *inner_domain;
 | |
|  	struct irq_domain *msi_domain;
 | |
| @@ -230,10 +233,8 @@ static void mtk_pcie_subsys_powerdown(st
 | |
|  
 | |
|  	clk_disable_unprepare(pcie->free_ck);
 | |
|  
 | |
| -	if (dev->pm_domain) {
 | |
| -		pm_runtime_put_sync(dev);
 | |
| -		pm_runtime_disable(dev);
 | |
| -	}
 | |
| +	pm_runtime_put_sync(dev);
 | |
| +	pm_runtime_disable(dev);
 | |
|  }
 | |
|  
 | |
|  static void mtk_pcie_port_free(struct mtk_pcie_port *port)
 | |
| @@ -537,6 +538,27 @@ static void mtk_pcie_enable_msi(struct m
 | |
|  	writel(val, port->base + PCIE_INT_MASK);
 | |
|  }
 | |
|  
 | |
| +static void mtk_pcie_irq_teardown(struct mtk_pcie *pcie)
 | |
| +{
 | |
| +	struct mtk_pcie_port *port, *tmp;
 | |
| +
 | |
| +	list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
 | |
| +		irq_set_chained_handler_and_data(port->irq, NULL, NULL);
 | |
| +
 | |
| +		if (port->irq_domain)
 | |
| +			irq_domain_remove(port->irq_domain);
 | |
| +
 | |
| +		if (IS_ENABLED(CONFIG_PCI_MSI)) {
 | |
| +			if (port->msi_domain)
 | |
| +				irq_domain_remove(port->msi_domain);
 | |
| +			if (port->inner_domain)
 | |
| +				irq_domain_remove(port->inner_domain);
 | |
| +		}
 | |
| +
 | |
| +		irq_dispose_mapping(port->irq);
 | |
| +	}
 | |
| +}
 | |
| +
 | |
|  static int mtk_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
 | |
|  			     irq_hw_number_t hwirq)
 | |
|  {
 | |
| @@ -566,6 +588,7 @@ static int mtk_pcie_init_irq_domain(stru
 | |
|  
 | |
|  	port->irq_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
 | |
|  						 &intx_domain_ops, port);
 | |
| +	of_node_put(pcie_intc_node);
 | |
|  	if (!port->irq_domain) {
 | |
|  		dev_err(dev, "failed to get INTx IRQ domain\n");
 | |
|  		return -ENODEV;
 | |
| @@ -627,7 +650,7 @@ static int mtk_pcie_setup_irq(struct mtk
 | |
|  	struct mtk_pcie *pcie = port->pcie;
 | |
|  	struct device *dev = pcie->dev;
 | |
|  	struct platform_device *pdev = to_platform_device(dev);
 | |
| -	int err, irq;
 | |
| +	int err;
 | |
|  
 | |
|  	err = mtk_pcie_init_irq_domain(port, node);
 | |
|  	if (err) {
 | |
| @@ -635,8 +658,9 @@ static int mtk_pcie_setup_irq(struct mtk
 | |
|  		return err;
 | |
|  	}
 | |
|  
 | |
| -	irq = platform_get_irq(pdev, port->slot);
 | |
| -	irq_set_chained_handler_and_data(irq, mtk_pcie_intr_handler, port);
 | |
| +	port->irq = platform_get_irq(pdev, port->slot);
 | |
| +	irq_set_chained_handler_and_data(port->irq,
 | |
| +					 mtk_pcie_intr_handler, port);
 | |
|  
 | |
|  	return 0;
 | |
|  }
 | |
| @@ -912,49 +936,29 @@ static int mtk_pcie_parse_port(struct mt
 | |
|  
 | |
|  	/* sys_ck might be divided into the following parts in some chips */
 | |
|  	snprintf(name, sizeof(name), "ahb_ck%d", slot);
 | |
| -	port->ahb_ck = devm_clk_get(dev, name);
 | |
| -	if (IS_ERR(port->ahb_ck)) {
 | |
| -		if (PTR_ERR(port->ahb_ck) == -EPROBE_DEFER)
 | |
| -			return -EPROBE_DEFER;
 | |
| -
 | |
| -		port->ahb_ck = NULL;
 | |
| -	}
 | |
| +	port->ahb_ck = devm_clk_get_optional(dev, name);
 | |
| +	if (IS_ERR(port->ahb_ck))
 | |
| +		return PTR_ERR(port->ahb_ck);
 | |
|  
 | |
|  	snprintf(name, sizeof(name), "axi_ck%d", slot);
 | |
| -	port->axi_ck = devm_clk_get(dev, name);
 | |
| -	if (IS_ERR(port->axi_ck)) {
 | |
| -		if (PTR_ERR(port->axi_ck) == -EPROBE_DEFER)
 | |
| -			return -EPROBE_DEFER;
 | |
| -
 | |
| -		port->axi_ck = NULL;
 | |
| -	}
 | |
| +	port->axi_ck = devm_clk_get_optional(dev, name);
 | |
| +	if (IS_ERR(port->axi_ck))
 | |
| +		return PTR_ERR(port->axi_ck);
 | |
|  
 | |
|  	snprintf(name, sizeof(name), "aux_ck%d", slot);
 | |
| -	port->aux_ck = devm_clk_get(dev, name);
 | |
| -	if (IS_ERR(port->aux_ck)) {
 | |
| -		if (PTR_ERR(port->aux_ck) == -EPROBE_DEFER)
 | |
| -			return -EPROBE_DEFER;
 | |
| -
 | |
| -		port->aux_ck = NULL;
 | |
| -	}
 | |
| +	port->aux_ck = devm_clk_get_optional(dev, name);
 | |
| +	if (IS_ERR(port->aux_ck))
 | |
| +		return PTR_ERR(port->aux_ck);
 | |
|  
 | |
|  	snprintf(name, sizeof(name), "obff_ck%d", slot);
 | |
| -	port->obff_ck = devm_clk_get(dev, name);
 | |
| -	if (IS_ERR(port->obff_ck)) {
 | |
| -		if (PTR_ERR(port->obff_ck) == -EPROBE_DEFER)
 | |
| -			return -EPROBE_DEFER;
 | |
| -
 | |
| -		port->obff_ck = NULL;
 | |
| -	}
 | |
| +	port->obff_ck = devm_clk_get_optional(dev, name);
 | |
| +	if (IS_ERR(port->obff_ck))
 | |
| +		return PTR_ERR(port->obff_ck);
 | |
|  
 | |
|  	snprintf(name, sizeof(name), "pipe_ck%d", slot);
 | |
| -	port->pipe_ck = devm_clk_get(dev, name);
 | |
| -	if (IS_ERR(port->pipe_ck)) {
 | |
| -		if (PTR_ERR(port->pipe_ck) == -EPROBE_DEFER)
 | |
| -			return -EPROBE_DEFER;
 | |
| -
 | |
| -		port->pipe_ck = NULL;
 | |
| -	}
 | |
| +	port->pipe_ck = devm_clk_get_optional(dev, name);
 | |
| +	if (IS_ERR(port->pipe_ck))
 | |
| +		return PTR_ERR(port->pipe_ck);
 | |
|  
 | |
|  	snprintf(name, sizeof(name), "pcie-rst%d", slot);
 | |
|  	port->reset = devm_reset_control_get_optional_exclusive(dev, name);
 | |
| @@ -1007,10 +1011,8 @@ static int mtk_pcie_subsys_powerup(struc
 | |
|  		pcie->free_ck = NULL;
 | |
|  	}
 | |
|  
 | |
| -	if (dev->pm_domain) {
 | |
| -		pm_runtime_enable(dev);
 | |
| -		pm_runtime_get_sync(dev);
 | |
| -	}
 | |
| +	pm_runtime_enable(dev);
 | |
| +	pm_runtime_get_sync(dev);
 | |
|  
 | |
|  	/* enable top level clock */
 | |
|  	err = clk_prepare_enable(pcie->free_ck);
 | |
| @@ -1022,10 +1024,8 @@ static int mtk_pcie_subsys_powerup(struc
 | |
|  	return 0;
 | |
|  
 | |
|  err_free_ck:
 | |
| -	if (dev->pm_domain) {
 | |
| -		pm_runtime_put_sync(dev);
 | |
| -		pm_runtime_disable(dev);
 | |
| -	}
 | |
| +	pm_runtime_put_sync(dev);
 | |
| +	pm_runtime_disable(dev);
 | |
|  
 | |
|  	return err;
 | |
|  }
 | |
| @@ -1130,36 +1130,6 @@ static int mtk_pcie_request_resources(st
 | |
|  		return err;
 | |
|  
 | |
|  	err = devm_pci_remap_iospace(dev, &pcie->pio, pcie->io.start);
 | |
| -	if (err)
 | |
| -		return err;
 | |
| -
 | |
| -	return 0;
 | |
| -}
 | |
| -
 | |
| -static int mtk_pcie_register_host(struct pci_host_bridge *host)
 | |
| -{
 | |
| -	struct mtk_pcie *pcie = pci_host_bridge_priv(host);
 | |
| -	struct pci_bus *child;
 | |
| -	int err;
 | |
| -
 | |
| -	host->busnr = pcie->busn.start;
 | |
| -	host->dev.parent = pcie->dev;
 | |
| -	host->ops = pcie->soc->ops;
 | |
| -	host->map_irq = of_irq_parse_and_map_pci;
 | |
| -	host->swizzle_irq = pci_common_swizzle;
 | |
| -	host->sysdata = pcie;
 | |
| -
 | |
| -	err = pci_scan_root_bus_bridge(host);
 | |
| -	if (err < 0)
 | |
| -		return err;
 | |
| -
 | |
| -	pci_bus_size_bridges(host->bus);
 | |
| -	pci_bus_assign_resources(host->bus);
 | |
| -
 | |
| -	list_for_each_entry(child, &host->bus->children, node)
 | |
| -		pcie_bus_configure_settings(child);
 | |
| -
 | |
| -	pci_bus_add_devices(host->bus);
 | |
|  
 | |
|  	return 0;
 | |
|  }
 | |
| @@ -1190,7 +1160,14 @@ static int mtk_pcie_probe(struct platfor
 | |
|  	if (err)
 | |
|  		goto put_resources;
 | |
|  
 | |
| -	err = mtk_pcie_register_host(host);
 | |
| +	host->busnr = pcie->busn.start;
 | |
| +	host->dev.parent = pcie->dev;
 | |
| +	host->ops = pcie->soc->ops;
 | |
| +	host->map_irq = of_irq_parse_and_map_pci;
 | |
| +	host->swizzle_irq = pci_common_swizzle;
 | |
| +	host->sysdata = pcie;
 | |
| +
 | |
| +	err = pci_host_probe(host);
 | |
|  	if (err)
 | |
|  		goto put_resources;
 | |
|  
 | |
| @@ -1203,6 +1180,80 @@ put_resources:
 | |
|  	return err;
 | |
|  }
 | |
|  
 | |
| +
 | |
| +static void mtk_pcie_free_resources(struct mtk_pcie *pcie)
 | |
| +{
 | |
| +	struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
 | |
| +	struct list_head *windows = &host->windows;
 | |
| +
 | |
| +	pci_free_resource_list(windows);
 | |
| +}
 | |
| +
 | |
| +static int mtk_pcie_remove(struct platform_device *pdev)
 | |
| +{
 | |
| +	struct mtk_pcie *pcie = platform_get_drvdata(pdev);
 | |
| +	struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
 | |
| +
 | |
| +	pci_stop_root_bus(host->bus);
 | |
| +	pci_remove_root_bus(host->bus);
 | |
| +	mtk_pcie_free_resources(pcie);
 | |
| +
 | |
| +	mtk_pcie_irq_teardown(pcie);
 | |
| +
 | |
| +	mtk_pcie_put_resources(pcie);
 | |
| +
 | |
| +	return 0;
 | |
| +}
 | |
| +
 | |
| +static int __maybe_unused mtk_pcie_suspend_noirq(struct device *dev)
 | |
| +{
 | |
| +	struct mtk_pcie *pcie = dev_get_drvdata(dev);
 | |
| +	struct mtk_pcie_port *port;
 | |
| +
 | |
| +	if (list_empty(&pcie->ports))
 | |
| +		return 0;
 | |
| +
 | |
| +	list_for_each_entry(port, &pcie->ports, list) {
 | |
| +		clk_disable_unprepare(port->pipe_ck);
 | |
| +		clk_disable_unprepare(port->obff_ck);
 | |
| +		clk_disable_unprepare(port->axi_ck);
 | |
| +		clk_disable_unprepare(port->aux_ck);
 | |
| +		clk_disable_unprepare(port->ahb_ck);
 | |
| +		clk_disable_unprepare(port->sys_ck);
 | |
| +		phy_power_off(port->phy);
 | |
| +		phy_exit(port->phy);
 | |
| +	}
 | |
| +
 | |
| +	clk_disable_unprepare(pcie->free_ck);
 | |
| +
 | |
| +	return 0;
 | |
| +}
 | |
| +
 | |
| +static int __maybe_unused mtk_pcie_resume_noirq(struct device *dev)
 | |
| +{
 | |
| +	struct mtk_pcie *pcie = dev_get_drvdata(dev);
 | |
| +	struct mtk_pcie_port *port, *tmp;
 | |
| +
 | |
| +	if (list_empty(&pcie->ports))
 | |
| +		return 0;
 | |
| +
 | |
| +	clk_prepare_enable(pcie->free_ck);
 | |
| +
 | |
| +	list_for_each_entry_safe(port, tmp, &pcie->ports, list)
 | |
| +		mtk_pcie_enable_port(port);
 | |
| +
 | |
| +	/* In case of EP was removed while system suspend. */
 | |
| +	if (list_empty(&pcie->ports))
 | |
| +		clk_disable_unprepare(pcie->free_ck);
 | |
| +
 | |
| +	return 0;
 | |
| +}
 | |
| +
 | |
| +static const struct dev_pm_ops mtk_pcie_pm_ops = {
 | |
| +	SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(mtk_pcie_suspend_noirq,
 | |
| +				      mtk_pcie_resume_noirq)
 | |
| +};
 | |
| +
 | |
|  static const struct mtk_pcie_soc mtk_pcie_soc_v1 = {
 | |
|  	.ops = &mtk_pcie_ops,
 | |
|  	.startup = mtk_pcie_startup_port,
 | |
| @@ -1241,10 +1292,13 @@ static const struct of_device_id mtk_pci
 | |
|  
 | |
|  static struct platform_driver mtk_pcie_driver = {
 | |
|  	.probe = mtk_pcie_probe,
 | |
| +	.remove = mtk_pcie_remove,
 | |
|  	.driver = {
 | |
|  		.name = "mtk-pcie",
 | |
|  		.of_match_table = mtk_pcie_ids,
 | |
|  		.suppress_bind_attrs = true,
 | |
| +		.pm = &mtk_pcie_pm_ops,
 | |
|  	},
 | |
|  };
 | |
| -builtin_platform_driver(mtk_pcie_driver);
 | |
| +module_platform_driver(mtk_pcie_driver);
 | |
| +MODULE_LICENSE("GPL v2");
 | |
| --- a/include/linux/clk.h
 | |
| +++ b/include/linux/clk.h
 | |
| @@ -349,6 +349,17 @@ int __must_check devm_clk_bulk_get(struc
 | |
|  struct clk *devm_clk_get(struct device *dev, const char *id);
 | |
|  
 | |
|  /**
 | |
| + * devm_clk_get_optional - lookup and obtain a managed reference to an optional
 | |
| + *                         clock producer.
 | |
| + * @dev: device for clock "consumer"
 | |
| + * @id: clock consumer ID
 | |
| + *
 | |
| + * Behaves the same as devm_clk_get() except where there is no clock producer.
 | |
| + * In this case, instead of returning -ENOENT, the function returns NULL.
 | |
| + */
 | |
| +struct clk *devm_clk_get_optional(struct device *dev, const char *id);
 | |
| +
 | |
| +/**
 | |
|   * devm_get_clk_from_child - lookup and obtain a managed reference to a
 | |
|   *			     clock producer from child node.
 | |
|   * @dev: device for clock "consumer"
 |