mirror of
				git://git.openwrt.org/openwrt/openwrt.git
				synced 2025-10-30 21:44:27 -04:00 
			
		
		
		
	Refreshed all patches. Compile-tested on: cns3xxx Runtime-tested on: cns3xxx Signed-off-by: Koen Vandeputte <koen.vandeputte@ncentric.com>
		
			
				
	
	
		
			166 lines
		
	
	
		
			5.8 KiB
		
	
	
	
		
			Diff
		
	
	
	
	
	
			
		
		
	
	
			166 lines
		
	
	
		
			5.8 KiB
		
	
	
	
		
			Diff
		
	
	
	
	
	
| --- a/arch/arm/kernel/atags.h
 | |
| +++ b/arch/arm/kernel/atags.h
 | |
| @@ -5,7 +5,7 @@ void convert_to_tag_list(struct tag *tag
 | |
|  const struct machine_desc *setup_machine_tags(phys_addr_t __atags_pointer,
 | |
|  	unsigned int machine_nr);
 | |
|  #else
 | |
| -static inline const struct machine_desc *
 | |
| +static inline const struct machine_desc * __init __noreturn
 | |
|  setup_machine_tags(phys_addr_t __atags_pointer, unsigned int machine_nr)
 | |
|  {
 | |
|  	early_print("no ATAGS support: can't continue\n");
 | |
| --- a/arch/arm64/include/asm/cpufeature.h
 | |
| +++ b/arch/arm64/include/asm/cpufeature.h
 | |
| @@ -347,7 +347,7 @@ static inline bool cpu_have_feature(unsi
 | |
|  }
 | |
|  
 | |
|  /* System capability check for constant caps */
 | |
| -static inline bool __cpus_have_const_cap(int num)
 | |
| +static __always_inline bool __cpus_have_const_cap(int num)
 | |
|  {
 | |
|  	if (num >= ARM64_NCAPS)
 | |
|  		return false;
 | |
| @@ -361,7 +361,7 @@ static inline bool cpus_have_cap(unsigne
 | |
|  	return test_bit(num, cpu_hwcaps);
 | |
|  }
 | |
|  
 | |
| -static inline bool cpus_have_const_cap(int num)
 | |
| +static __always_inline bool cpus_have_const_cap(int num)
 | |
|  {
 | |
|  	if (static_branch_likely(&arm64_const_caps_ready))
 | |
|  		return __cpus_have_const_cap(num);
 | |
| --- a/arch/mips/include/asm/bitops.h
 | |
| +++ b/arch/mips/include/asm/bitops.h
 | |
| @@ -462,7 +462,7 @@ static inline void __clear_bit_unlock(un
 | |
|   * Return the bit position (0..63) of the most significant 1 bit in a word
 | |
|   * Returns -1 if no 1 bit exists
 | |
|   */
 | |
| -static inline unsigned long __fls(unsigned long word)
 | |
| +static __always_inline unsigned long __fls(unsigned long word)
 | |
|  {
 | |
|  	int num;
 | |
|  
 | |
| @@ -528,7 +528,7 @@ static inline unsigned long __fls(unsign
 | |
|   * Returns 0..SZLONG-1
 | |
|   * Undefined if no bit exists, so code should check against 0 first.
 | |
|   */
 | |
| -static inline unsigned long __ffs(unsigned long word)
 | |
| +static __always_inline unsigned long __ffs(unsigned long word)
 | |
|  {
 | |
|  	return __fls(word & -word);
 | |
|  }
 | |
| --- a/arch/mips/kernel/cpu-bugs64.c
 | |
| +++ b/arch/mips/kernel/cpu-bugs64.c
 | |
| @@ -42,8 +42,8 @@ static inline void align_mod(const int a
 | |
|  		: GCC_IMM_ASM() (align), GCC_IMM_ASM() (mod));
 | |
|  }
 | |
|  
 | |
| -static inline void mult_sh_align_mod(long *v1, long *v2, long *w,
 | |
| -				     const int align, const int mod)
 | |
| +static __always_inline void mult_sh_align_mod(long *v1, long *v2, long *w,
 | |
| +					      const int align, const int mod)
 | |
|  {
 | |
|  	unsigned long flags;
 | |
|  	int m1, m2;
 | |
| --- a/arch/powerpc/kernel/prom_init.c
 | |
| +++ b/arch/powerpc/kernel/prom_init.c
 | |
| @@ -474,14 +474,14 @@ static int __init prom_next_node(phandle
 | |
|  	}
 | |
|  }
 | |
|  
 | |
| -static inline int prom_getprop(phandle node, const char *pname,
 | |
| -			       void *value, size_t valuelen)
 | |
| +static inline int __init prom_getprop(phandle node, const char *pname,
 | |
| +				      void *value, size_t valuelen)
 | |
|  {
 | |
|  	return call_prom("getprop", 4, 1, node, ADDR(pname),
 | |
|  			 (u32)(unsigned long) value, (u32) valuelen);
 | |
|  }
 | |
|  
 | |
| -static inline int prom_getproplen(phandle node, const char *pname)
 | |
| +static inline int __init prom_getproplen(phandle node, const char *pname)
 | |
|  {
 | |
|  	return call_prom("getproplen", 2, 1, node, ADDR(pname));
 | |
|  }
 | |
| --- a/arch/s390/include/asm/cpacf.h
 | |
| +++ b/arch/s390/include/asm/cpacf.h
 | |
| @@ -184,7 +184,7 @@ static inline int __cpacf_check_opcode(u
 | |
|  	}
 | |
|  }
 | |
|  
 | |
| -static inline int cpacf_query(unsigned int opcode, cpacf_mask_t *mask)
 | |
| +static __always_inline int cpacf_query(unsigned int opcode, cpacf_mask_t *mask)
 | |
|  {
 | |
|  	if (__cpacf_check_opcode(opcode)) {
 | |
|  		__cpacf_query(opcode, mask);
 | |
| --- a/arch/x86/Kconfig.debug
 | |
| +++ b/arch/x86/Kconfig.debug
 | |
| @@ -284,20 +284,6 @@ config CPA_DEBUG
 | |
|  	---help---
 | |
|  	  Do change_page_attr() self-tests every 30 seconds.
 | |
|  
 | |
| -config OPTIMIZE_INLINING
 | |
| -	bool "Allow gcc to uninline functions marked 'inline'"
 | |
| -	---help---
 | |
| -	  This option determines if the kernel forces gcc to inline the functions
 | |
| -	  developers have marked 'inline'. Doing so takes away freedom from gcc to
 | |
| -	  do what it thinks is best, which is desirable for the gcc 3.x series of
 | |
| -	  compilers. The gcc 4.x series have a rewritten inlining algorithm and
 | |
| -	  enabling this option will generate a smaller kernel there. Hopefully
 | |
| -	  this algorithm is so good that allowing gcc 4.x and above to make the
 | |
| -	  decision will become the default in the future. Until then this option
 | |
| -	  is there to test gcc for this.
 | |
| -
 | |
| -	  If unsure, say N.
 | |
| -
 | |
|  config DEBUG_ENTRY
 | |
|  	bool "Debug low-level entry code"
 | |
|  	depends on DEBUG_KERNEL
 | |
| --- a/lib/Kconfig.debug
 | |
| +++ b/lib/Kconfig.debug
 | |
| @@ -305,6 +305,20 @@ config HEADERS_CHECK
 | |
|  	  exported to $(INSTALL_HDR_PATH) (usually 'usr/include' in
 | |
|  	  your build tree), to make sure they're suitable.
 | |
|  
 | |
| +config OPTIMIZE_INLINING
 | |
| +	bool "Allow compiler to uninline functions marked 'inline'"
 | |
| +	help
 | |
| +	  This option determines if the kernel forces gcc to inline the functions
 | |
| +	  developers have marked 'inline'. Doing so takes away freedom from gcc to
 | |
| +	  do what it thinks is best, which is desirable for the gcc 3.x series of
 | |
| +	  compilers. The gcc 4.x series have a rewritten inlining algorithm and
 | |
| +	  enabling this option will generate a smaller kernel there. Hopefully
 | |
| +	  this algorithm is so good that allowing gcc 4.x and above to make the
 | |
| +	  decision will become the default in the future. Until then this option
 | |
| +	  is there to test gcc for this.
 | |
| +
 | |
| +	  If unsure, say N.
 | |
| +
 | |
|  config DEBUG_SECTION_MISMATCH
 | |
|  	bool "Enable full Section mismatch analysis"
 | |
|  	help
 | |
| --- a/arch/x86/Kconfig
 | |
| +++ b/arch/x86/Kconfig
 | |
| @@ -296,9 +296,6 @@ config ZONE_DMA32
 | |
|  config AUDIT_ARCH
 | |
|  	def_bool y if X86_64
 | |
|  
 | |
| -config ARCH_SUPPORTS_OPTIMIZED_INLINING
 | |
| -	def_bool y
 | |
| -
 | |
|  config ARCH_SUPPORTS_DEBUG_PAGEALLOC
 | |
|  	def_bool y
 | |
|  
 | |
| --- a/include/linux/compiler-gcc.h
 | |
| +++ b/include/linux/compiler-gcc.h
 | |
| @@ -90,8 +90,7 @@
 | |
|   * of extern inline functions at link time.
 | |
|   * A lot of inline functions can cause havoc with function tracing.
 | |
|   */
 | |
| -#if !defined(CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING) ||		\
 | |
| -    !defined(CONFIG_OPTIMIZE_INLINING) || (__GNUC__ < 4)
 | |
| +#if !defined(CONFIG_OPTIMIZE_INLINING) || (__GNUC__ < 4)
 | |
|  #define inline \
 | |
|  	inline __attribute__((always_inline, unused)) notrace __gnu_inline
 | |
|  #else
 |