Manually merged:
  backport-5.4
     011-kbuild-export-SUBARCH.patch
  layerscape
    701-net-0262-net-dsa-ocelot-add-tagger-for-Ocelot-Felix-switches.patch
All other modifications made by update_kernel.sh
Build-tested: x86/64, lantiq/xrx200, ramips/mt7621
Run-tested: ipq806x (R7800), lantiq/xrx200, x86/64, ramips (RT-AC57U)
No dmesg regressions, everything functional
Signed-off-by: John Audia <graysky@archlinux.us>
[minor commit message adjustments]
Signed-off-by: Adrian Schmutzler <freifunk@adrianschmutzler.de>
		
	
			
		
			
				
	
	
		
			116 lines
		
	
	
		
			4.1 KiB
		
	
	
	
		
			Diff
		
	
	
	
	
	
			
		
		
	
	
			116 lines
		
	
	
		
			4.1 KiB
		
	
	
	
		
			Diff
		
	
	
	
	
	
From b763f24aed409296eb76d085c279b2c40462f8a1 Mon Sep 17 00:00:00 2001
 | 
						|
From: Christoph Hellwig <hch@lst.de>
 | 
						|
Date: Tue, 19 Nov 2019 17:38:58 +0100
 | 
						|
Subject: [PATCH] dma-direct: exclude dma_direct_map_resource from the
 | 
						|
 min_low_pfn check
 | 
						|
 | 
						|
commit 68a33b1794665ba8a1d1ef1d3bfcc7c587d380a6 upstream.
 | 
						|
 | 
						|
The valid memory address check in dma_capable only makes sense when mapping
 | 
						|
normal memory, not when using dma_map_resource to map a device resource.
 | 
						|
Add a new boolean argument to dma_capable to exclude that check for the
 | 
						|
dma_map_resource case.
 | 
						|
 | 
						|
Fixes: b12d66278dd6 ("dma-direct: check for overflows on 32 bit DMA addresses")
 | 
						|
Reported-by: Marek Szyprowski <m.szyprowski@samsung.com>
 | 
						|
Signed-off-by: Christoph Hellwig <hch@lst.de>
 | 
						|
Acked-by: Marek Szyprowski <m.szyprowski@samsung.com>
 | 
						|
Tested-by: Marek Szyprowski <m.szyprowski@samsung.com>
 | 
						|
---
 | 
						|
 arch/x86/kernel/amd_gart_64.c | 4 ++--
 | 
						|
 drivers/xen/swiotlb-xen.c     | 4 ++--
 | 
						|
 include/linux/dma-direct.h    | 5 +++--
 | 
						|
 kernel/dma/direct.c           | 4 ++--
 | 
						|
 kernel/dma/swiotlb.c          | 2 +-
 | 
						|
 5 files changed, 10 insertions(+), 9 deletions(-)
 | 
						|
 | 
						|
--- a/arch/x86/kernel/amd_gart_64.c
 | 
						|
+++ b/arch/x86/kernel/amd_gart_64.c
 | 
						|
@@ -185,13 +185,13 @@ static void iommu_full(struct device *de
 | 
						|
 static inline int
 | 
						|
 need_iommu(struct device *dev, unsigned long addr, size_t size)
 | 
						|
 {
 | 
						|
-	return force_iommu || !dma_capable(dev, addr, size);
 | 
						|
+	return force_iommu || !dma_capable(dev, addr, size, true);
 | 
						|
 }
 | 
						|
 
 | 
						|
 static inline int
 | 
						|
 nonforced_iommu(struct device *dev, unsigned long addr, size_t size)
 | 
						|
 {
 | 
						|
-	return !dma_capable(dev, addr, size);
 | 
						|
+	return !dma_capable(dev, addr, size, true);
 | 
						|
 }
 | 
						|
 
 | 
						|
 /* Map a single continuous physical area into the IOMMU.
 | 
						|
--- a/drivers/xen/swiotlb-xen.c
 | 
						|
+++ b/drivers/xen/swiotlb-xen.c
 | 
						|
@@ -381,7 +381,7 @@ static dma_addr_t xen_swiotlb_map_page(s
 | 
						|
 	 * we can safely return the device addr and not worry about bounce
 | 
						|
 	 * buffering it.
 | 
						|
 	 */
 | 
						|
-	if (dma_capable(dev, dev_addr, size) &&
 | 
						|
+	if (dma_capable(dev, dev_addr, size, true) &&
 | 
						|
 	    !range_straddles_page_boundary(phys, size) &&
 | 
						|
 		!xen_arch_need_swiotlb(dev, phys, dev_addr) &&
 | 
						|
 		swiotlb_force != SWIOTLB_FORCE)
 | 
						|
@@ -403,7 +403,7 @@ static dma_addr_t xen_swiotlb_map_page(s
 | 
						|
 	/*
 | 
						|
 	 * Ensure that the address returned is DMA'ble
 | 
						|
 	 */
 | 
						|
-	if (unlikely(!dma_capable(dev, dev_addr, size))) {
 | 
						|
+	if (unlikely(!dma_capable(dev, dev_addr, size, true))) {
 | 
						|
 		swiotlb_tbl_unmap_single(dev, map, size, size, dir,
 | 
						|
 				attrs | DMA_ATTR_SKIP_CPU_SYNC);
 | 
						|
 		return DMA_MAPPING_ERROR;
 | 
						|
--- a/include/linux/dma-direct.h
 | 
						|
+++ b/include/linux/dma-direct.h
 | 
						|
@@ -51,14 +51,15 @@ static inline phys_addr_t dma_to_phys(st
 | 
						|
 	return __sme_clr(__dma_to_phys(dev, daddr));
 | 
						|
 }
 | 
						|
 
 | 
						|
-static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
 | 
						|
+static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size,
 | 
						|
+		bool is_ram)
 | 
						|
 {
 | 
						|
 	dma_addr_t end = addr + size - 1;
 | 
						|
 
 | 
						|
 	if (!dev->dma_mask)
 | 
						|
 		return false;
 | 
						|
 
 | 
						|
-	if (!IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT) &&
 | 
						|
+	if (is_ram && !IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT) &&
 | 
						|
 	    min(addr, end) < phys_to_dma(dev, PFN_PHYS(min_low_pfn)))
 | 
						|
 		return false;
 | 
						|
 
 | 
						|
--- a/kernel/dma/direct.c
 | 
						|
+++ b/kernel/dma/direct.c
 | 
						|
@@ -326,7 +326,7 @@ static inline bool dma_direct_possible(s
 | 
						|
 		size_t size)
 | 
						|
 {
 | 
						|
 	return swiotlb_force != SWIOTLB_FORCE &&
 | 
						|
-		dma_capable(dev, dma_addr, size);
 | 
						|
+		dma_capable(dev, dma_addr, size, true);
 | 
						|
 }
 | 
						|
 
 | 
						|
 dma_addr_t dma_direct_map_page(struct device *dev, struct page *page,
 | 
						|
@@ -375,7 +375,7 @@ dma_addr_t dma_direct_map_resource(struc
 | 
						|
 {
 | 
						|
 	dma_addr_t dma_addr = paddr;
 | 
						|
 
 | 
						|
-	if (unlikely(!dma_capable(dev, dma_addr, size))) {
 | 
						|
+	if (unlikely(!dma_capable(dev, dma_addr, size, false))) {
 | 
						|
 		report_addr(dev, dma_addr, size);
 | 
						|
 		return DMA_MAPPING_ERROR;
 | 
						|
 	}
 | 
						|
--- a/kernel/dma/swiotlb.c
 | 
						|
+++ b/kernel/dma/swiotlb.c
 | 
						|
@@ -678,7 +678,7 @@ bool swiotlb_map(struct device *dev, phy
 | 
						|
 
 | 
						|
 	/* Ensure that the address returned is DMA'ble */
 | 
						|
 	*dma_addr = __phys_to_dma(dev, *phys);
 | 
						|
-	if (unlikely(!dma_capable(dev, *dma_addr, size))) {
 | 
						|
+	if (unlikely(!dma_capable(dev, *dma_addr, size, true))) {
 | 
						|
 		swiotlb_tbl_unmap_single(dev, *phys, size, size, dir,
 | 
						|
 			attrs | DMA_ATTR_SKIP_CPU_SYNC);
 | 
						|
 		return false;
 |