diff options
Diffstat (limited to 'arch/arm/mm')
| -rw-r--r-- | arch/arm/mm/Makefile | 1 | ||||
| -rw-r--r-- | arch/arm/mm/cache-l2x0.c | 76 | ||||
| -rw-r--r-- | arch/arm/mm/cache-v6.S | 17 | ||||
| -rw-r--r-- | arch/arm/mm/fault.c | 4 | ||||
| -rw-r--r-- | arch/arm/mm/mmu.c | 106 | ||||
| -rw-r--r-- | arch/arm/mm/rodata.c | 159 | 
6 files changed, 322 insertions, 41 deletions
diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile index 9e51be96f63..8045a48c847 100644 --- a/arch/arm/mm/Makefile +++ b/arch/arm/mm/Makefile @@ -7,6 +7,7 @@ obj-y				:= dma-mapping.o extable.o fault.o init.o \  obj-$(CONFIG_MMU)		+= fault-armv.o flush.o idmap.o ioremap.o \  				   mmap.o pgd.o mmu.o +obj-$(CONFIG_DEBUG_RODATA)	+= rodata.o  ifneq ($(CONFIG_MMU),y)  obj-y				+= nommu.o diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c index c465faca51b..90a130f98ac 100644 --- a/arch/arm/mm/cache-l2x0.c +++ b/arch/arm/mm/cache-l2x0.c @@ -33,6 +33,9 @@ static void __iomem *l2x0_base;  static DEFINE_RAW_SPINLOCK(l2x0_lock);  static u32 l2x0_way_mask;	/* Bitmask of active ways */  static u32 l2x0_size; +static u32 l2x0_cache_id; +static unsigned int l2x0_sets; +static unsigned int l2x0_ways;  static unsigned long sync_reg_offset = L2X0_CACHE_SYNC;  /* Aurora don't have the cache ID register available, so we have to @@ -49,6 +52,13 @@ struct l2x0_of_data {  static bool of_init = false; +static inline bool is_pl310_rev(int rev) +{ +	return (l2x0_cache_id & +		(L2X0_CACHE_ID_PART_MASK | L2X0_CACHE_ID_REV_MASK)) == +			(L2X0_CACHE_ID_PART_L310 | rev); +} +  static inline void cache_wait_way(void __iomem *reg, unsigned long mask)  {  	/* wait for cache operation by line or way to complete */ @@ -137,6 +147,23 @@ static void l2x0_cache_sync(void)  	raw_spin_unlock_irqrestore(&l2x0_lock, flags);  } +#ifdef CONFIG_PL310_ERRATA_727915 +static void l2x0_for_each_set_way(void __iomem *reg) +{ +	int set; +	int way; +	unsigned long flags; + +	for (way = 0; way < l2x0_ways; way++) { +		raw_spin_lock_irqsave(&l2x0_lock, flags); +		for (set = 0; set < l2x0_sets; set++) +			writel_relaxed((way << 28) | (set << 5), reg); +		cache_sync(); +		raw_spin_unlock_irqrestore(&l2x0_lock, flags); +	} +} +#endif +  static void __l2x0_flush_all(void)  {  	debug_writel(0x03); @@ -150,6 +177,13 @@ static void l2x0_flush_all(void)  {  	unsigned long flags; +#ifdef CONFIG_PL310_ERRATA_727915 +	if (is_pl310_rev(REV_PL310_R2P0)) { +		l2x0_for_each_set_way(l2x0_base + L2X0_CLEAN_INV_LINE_IDX); +		return; +	} +#endif +  	/* clean all ways */  	raw_spin_lock_irqsave(&l2x0_lock, flags);  	__l2x0_flush_all(); @@ -160,11 +194,20 @@ static void l2x0_clean_all(void)  {  	unsigned long flags; +#ifdef CONFIG_PL310_ERRATA_727915 +	if (is_pl310_rev(REV_PL310_R2P0)) { +		l2x0_for_each_set_way(l2x0_base + L2X0_CLEAN_LINE_IDX); +		return; +	} +#endif +  	/* clean all ways */  	raw_spin_lock_irqsave(&l2x0_lock, flags); +	debug_writel(0x03);  	writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_WAY);  	cache_wait_way(l2x0_base + L2X0_CLEAN_WAY, l2x0_way_mask);  	cache_sync(); +	debug_writel(0x00);  	raw_spin_unlock_irqrestore(&l2x0_lock, flags);  } @@ -323,65 +366,64 @@ static void l2x0_unlock(u32 cache_id)  void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask)  {  	u32 aux; -	u32 cache_id;  	u32 way_size = 0; -	int ways;  	int way_size_shift = L2X0_WAY_SIZE_SHIFT;  	const char *type;  	l2x0_base = base;  	if (cache_id_part_number_from_dt) -		cache_id = cache_id_part_number_from_dt; +		l2x0_cache_id = cache_id_part_number_from_dt;  	else -		cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID); +		l2x0_cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID);  	aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);  	aux &= aux_mask;  	aux |= aux_val;  	/* Determine the number of ways */ -	switch (cache_id & L2X0_CACHE_ID_PART_MASK) { +	switch (l2x0_cache_id & L2X0_CACHE_ID_PART_MASK) {  	case L2X0_CACHE_ID_PART_L310:  		if (aux & (1 << 16)) -			ways = 16; +			l2x0_ways = 16;  		else -			ways = 8; +			l2x0_ways = 8;  		type = "L310";  #ifdef CONFIG_PL310_ERRATA_753970  		/* Unmapped register. */  		sync_reg_offset = L2X0_DUMMY_REG;  #endif -		if ((cache_id & L2X0_CACHE_ID_RTL_MASK) <= L2X0_CACHE_ID_RTL_R3P0) +		if ((l2x0_cache_id & L2X0_CACHE_ID_RTL_MASK) <= L2X0_CACHE_ID_RTL_R3P0)  			outer_cache.set_debug = pl310_set_debug;  		break;  	case L2X0_CACHE_ID_PART_L210: -		ways = (aux >> 13) & 0xf; +		l2x0_ways = (aux >> 13) & 0xf;  		type = "L210";  		break;  	case AURORA_CACHE_ID:  		sync_reg_offset = AURORA_SYNC_REG; -		ways = (aux >> 13) & 0xf; -		ways = 2 << ((ways + 1) >> 2); +		l2x0_ways = (aux >> 13) & 0xf; +		l2x0_ways = 2 << ((l2x0_ways + 1) >> 2);  		way_size_shift = AURORA_WAY_SIZE_SHIFT;  		type = "Aurora";  		break;  	default:  		/* Assume unknown chips have 8 ways */ -		ways = 8; +		l2x0_ways = 8;  		type = "L2x0 series";  		break;  	} -	l2x0_way_mask = (1 << ways) - 1; +	l2x0_way_mask = (1 << l2x0_ways) - 1;  	/*  	 * L2 cache Size =  Way size * Number of ways  	 */  	way_size = (aux & L2X0_AUX_CTRL_WAY_SIZE_MASK) >> 17; -	way_size = 1 << (way_size + way_size_shift); +	way_size = SZ_1K << (way_size + way_size_shift); -	l2x0_size = ways * way_size * SZ_1K; +	l2x0_size = l2x0_ways * way_size; +	l2x0_sets = way_size / CACHE_LINE_SIZE;  	/*  	 * Check if l2x0 controller is already enabled. @@ -390,7 +432,7 @@ void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask)  	 */  	if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {  		/* Make sure that I&D is not locked down when starting */ -		l2x0_unlock(cache_id); +		l2x0_unlock(l2x0_cache_id);  		/* l2x0 controller is disabled */  		writel_relaxed(aux, l2x0_base + L2X0_AUX_CTRL); @@ -419,7 +461,7 @@ void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask)  	printk(KERN_INFO "%s cache controller enabled\n", type);  	printk(KERN_INFO "l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d B\n", -			ways, cache_id, aux, l2x0_size); +			l2x0_ways, l2x0_cache_id, aux, l2x0_size);  }  #ifdef CONFIG_OF diff --git a/arch/arm/mm/cache-v6.S b/arch/arm/mm/cache-v6.S index d8fd4d4bd3d..7a3d3d8d98d 100644 --- a/arch/arm/mm/cache-v6.S +++ b/arch/arm/mm/cache-v6.S @@ -270,6 +270,11 @@ v6_dma_clean_range:   *	- end     - virtual end address of region   */  ENTRY(v6_dma_flush_range) +#ifdef CONFIG_CACHE_FLUSH_RANGE_LIMIT +	sub	r2, r1, r0 +	cmp	r2, #CONFIG_CACHE_FLUSH_RANGE_LIMIT +	bhi	v6_dma_flush_dcache_all +#endif  #ifdef CONFIG_DMA_CACHE_RWFO  	ldrb	r2, [r0]		@ read for ownership  	strb	r2, [r0]		@ write for ownership @@ -292,6 +297,18 @@ ENTRY(v6_dma_flush_range)  	mcr	p15, 0, r0, c7, c10, 4		@ drain write buffer  	mov	pc, lr +#ifdef CONFIG_CACHE_FLUSH_RANGE_LIMIT +v6_dma_flush_dcache_all: +	mov	r0, #0 +#ifdef HARVARD_CACHE +	mcr	p15, 0, r0, c7, c14, 0		@ D cache clean+invalidate +#else +	mcr	p15, 0, r0, c7, c15, 0		@ Cache clean+invalidate +#endif +	mcr	p15, 0, r0, c7, c10, 4		@ drain write buffer +	mov	pc, lr +#endif +  /*   *	dma_map_area(start, size, dir)   *	- start	- kernel virtual start address diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index 5dbf13f954f..b835c9e3b77 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c @@ -276,10 +276,10 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)  		local_irq_enable();  	/* -	 * If we're in an interrupt or have no user +	 * If we're in an interrupt, or have no irqs, or have no user  	 * context, we must not take the fault..  	 */ -	if (in_atomic() || !mm) +	if (in_atomic() || irqs_disabled() || !mm)  		goto no_context;  	/* diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 4d409e6a552..bcfc6ffb90c 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -595,25 +595,47 @@ static void __init *early_alloc(unsigned long sz)  	return early_alloc_aligned(sz, sz);  } -static pte_t * __init early_pte_alloc(pmd_t *pmd, unsigned long addr, unsigned long prot) +static pte_t * __init early_pte_alloc(pmd_t *pmd) +{ +	if (pmd_none(*pmd) || pmd_bad(*pmd)) +		return early_alloc(PTE_HWTABLE_OFF + PTE_HWTABLE_SIZE); +	return pmd_page_vaddr(*pmd); +} + +static void __init early_pte_install(pmd_t *pmd, pte_t *pte, unsigned long prot) +{ +	__pmd_populate(pmd, __pa(pte), prot); +	BUG_ON(pmd_bad(*pmd)); +} + +#ifdef CONFIG_HIGHMEM +static pte_t * __init early_pte_alloc_and_install(pmd_t *pmd, +	unsigned long addr, unsigned long prot)  {  	if (pmd_none(*pmd)) { -		pte_t *pte = early_alloc(PTE_HWTABLE_OFF + PTE_HWTABLE_SIZE); -		__pmd_populate(pmd, __pa(pte), prot); +		pte_t *pte = early_pte_alloc(pmd); +		early_pte_install(pmd, pte, prot);  	}  	BUG_ON(pmd_bad(*pmd));  	return pte_offset_kernel(pmd, addr);  } +#endif  static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,  				  unsigned long end, unsigned long pfn,  				  const struct mem_type *type)  { -	pte_t *pte = early_pte_alloc(pmd, addr, type->prot_l1); +	pte_t *start_pte = early_pte_alloc(pmd); +	pte_t *pte = start_pte + pte_index(addr); + +	/* If replacing a section mapping, the whole section must be replaced */ +	BUG_ON(!pmd_none(*pmd) && pmd_bad(*pmd) && ((addr | end) & ~PMD_MASK)); +  	do {  		set_pte_ext(pte, pfn_pte(pfn, __pgprot(type->prot_pte)), 0);  		pfn++;  	} while (pte++, addr += PAGE_SIZE, addr != end); +	early_pte_install(pmd, start_pte, type->prot_l1);  }  static void __init __map_init_section(pmd_t *pmd, unsigned long addr, @@ -645,7 +667,8 @@ static void __init __map_init_section(pmd_t *pmd, unsigned long addr,  static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,  				      unsigned long end, phys_addr_t phys, -				      const struct mem_type *type) +				      const struct mem_type *type, +				      bool force_pages)  {  	pmd_t *pmd = pmd_offset(pud, addr);  	unsigned long next; @@ -662,7 +685,8 @@ static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,  		 * aligned to a section boundary.  		 */  		if (type->prot_sect && -				((addr | next | phys) & ~SECTION_MASK) == 0) { +				((addr | next | phys) & ~SECTION_MASK) == 0 && +				!force_pages) {  			__map_init_section(pmd, addr, next, phys, type);  		} else {  			alloc_init_pte(pmd, addr, next, @@ -675,14 +699,15 @@ static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,  }  static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr, -	unsigned long end, unsigned long phys, const struct mem_type *type) +	unsigned long end, unsigned long phys, const struct mem_type *type, +	bool force_pages)  {  	pud_t *pud = pud_offset(pgd, addr);  	unsigned long next;  	do {  		next = pud_addr_end(addr, end); -		alloc_init_pmd(pud, addr, next, phys, type); +		alloc_init_pmd(pud, addr, next, phys, type, force_pages);  		phys += next - addr;  	} while (pud++, addr = next, addr != end);  } @@ -756,7 +781,7 @@ static void __init create_36bit_mapping(struct map_desc *md,   * offsets, and we take full advantage of sections and   * supersections.   */ -static void __init create_mapping(struct map_desc *md) +static void __init create_mapping(struct map_desc *md, bool force_pages)  {  	unsigned long addr, length, end;  	phys_addr_t phys; @@ -806,7 +831,7 @@ static void __init create_mapping(struct map_desc *md)  	do {  		unsigned long next = pgd_addr_end(addr, end); -		alloc_init_pud(pgd, addr, next, phys, type); +		alloc_init_pud(pgd, addr, next, phys, type, force_pages);  		phys += next - addr;  		addr = next; @@ -828,7 +853,7 @@ void __init iotable_init(struct map_desc *io_desc, int nr)  	svm = early_alloc_aligned(sizeof(*svm) * nr, __alignof__(*svm));  	for (md = io_desc; nr; md++, nr--) { -		create_mapping(md); +		create_mapping(md, false);  		vm = &svm->vm;  		vm->addr = (void *)(md->virtual & PAGE_MASK); @@ -949,7 +974,7 @@ void __init debug_ll_io_init(void)  	map.virtual &= PAGE_MASK;  	map.length = PAGE_SIZE;  	map.type = MT_DEVICE; -	create_mapping(&map); +	create_mapping(&map, false);  }  #endif @@ -994,6 +1019,28 @@ void __init sanity_check_meminfo(void)  		struct membank *bank = &meminfo.bank[j];  		*bank = meminfo.bank[i]; +#ifdef CONFIG_SPARSEMEM +		if (pfn_to_section_nr(bank_pfn_start(bank)) != +		    pfn_to_section_nr(bank_pfn_end(bank) - 1)) { +			phys_addr_t sz; +			unsigned long start_pfn = bank_pfn_start(bank); +			unsigned long end_pfn = SECTION_ALIGN_UP(start_pfn + 1); +			sz = ((phys_addr_t)(end_pfn - start_pfn) << PAGE_SHIFT); + +			if (meminfo.nr_banks >= NR_BANKS) { +				pr_crit("NR_BANKS too low, ignoring %lld bytes of memory\n", +					(unsigned long long)(bank->size - sz)); +			} else { +				memmove(bank + 1, bank, +					(meminfo.nr_banks - i) * sizeof(*bank)); +				meminfo.nr_banks++; +				bank[1].size -= sz; +				bank[1].start = __pfn_to_phys(end_pfn); +			} +			bank->size = sz; +		} +#endif +  		if (bank->start > ULONG_MAX)  			highmem = 1; @@ -1191,7 +1238,7 @@ static void __init devicemaps_init(struct machine_desc *mdesc)  	map.virtual = MODULES_VADDR;  	map.length = ((unsigned long)_etext - map.virtual + ~SECTION_MASK) & SECTION_MASK;  	map.type = MT_ROM; -	create_mapping(&map); +	create_mapping(&map, false);  #endif  	/* @@ -1202,14 +1249,14 @@ static void __init devicemaps_init(struct machine_desc *mdesc)  	map.virtual = FLUSH_BASE;  	map.length = SZ_1M;  	map.type = MT_CACHECLEAN; -	create_mapping(&map); +	create_mapping(&map, false);  #endif  #ifdef FLUSH_BASE_MINICACHE  	map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS + SZ_1M);  	map.virtual = FLUSH_BASE_MINICACHE;  	map.length = SZ_1M;  	map.type = MT_MINICLEAN; -	create_mapping(&map); +	create_mapping(&map, false);  #endif  	/* @@ -1221,12 +1268,12 @@ static void __init devicemaps_init(struct machine_desc *mdesc)  	map.virtual = 0xffff0000;  	map.length = PAGE_SIZE;  	map.type = MT_HIGH_VECTORS; -	create_mapping(&map); +	create_mapping(&map, false);  	if (!vectors_high()) {  		map.virtual = 0;  		map.type = MT_LOW_VECTORS; -		create_mapping(&map); +		create_mapping(&map, false);  	}  	/* @@ -1252,20 +1299,23 @@ static void __init devicemaps_init(struct machine_desc *mdesc)  static void __init kmap_init(void)  {  #ifdef CONFIG_HIGHMEM -	pkmap_page_table = early_pte_alloc(pmd_off_k(PKMAP_BASE), +	pkmap_page_table = early_pte_alloc_and_install(pmd_off_k(PKMAP_BASE),  		PKMAP_BASE, _PAGE_KERNEL_TABLE);  #endif  } +  static void __init map_lowmem(void)  {  	struct memblock_region *reg; +	phys_addr_t start; +	phys_addr_t end; +	struct map_desc map;  	/* Map all the lowmem memory banks. */  	for_each_memblock(memory, reg) { -		phys_addr_t start = reg->base; -		phys_addr_t end = start + reg->size; -		struct map_desc map; +		start = reg->base; +		end = start + reg->size;  		if (end > arm_lowmem_limit)  			end = arm_lowmem_limit; @@ -1277,8 +1327,20 @@ static void __init map_lowmem(void)  		map.length = end - start;  		map.type = MT_MEMORY; -		create_mapping(&map); +		create_mapping(&map, false);  	} + +#ifdef CONFIG_DEBUG_RODATA +	start = __pa(_stext) & PMD_MASK; +	end = ALIGN(__pa(__end_rodata), PMD_SIZE); + +	map.pfn = __phys_to_pfn(start); +	map.virtual = __phys_to_virt(start); +	map.length = end - start; +	map.type = MT_MEMORY; + +	create_mapping(&map, true); +#endif  }  /* diff --git a/arch/arm/mm/rodata.c b/arch/arm/mm/rodata.c new file mode 100644 index 00000000000..9a8eb841c42 --- /dev/null +++ b/arch/arm/mm/rodata.c @@ -0,0 +1,159 @@ +/* + *  linux/arch/arm/mm/rodata.c + * + *  Copyright (C) 2011 Google, Inc. + * + *  Author: Colin Cross <ccross@android.com> + * + *  Based on x86 implementation in arch/x86/mm/init_32.c + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/kernel.h> +#include <linux/mm.h> +#include <linux/module.h> + +#include <asm/cache.h> +#include <asm/pgtable.h> +#include <asm/rodata.h> +#include <asm/sections.h> +#include <asm/tlbflush.h> + +#include "mm.h" + +static int kernel_set_to_readonly __read_mostly; + +#ifdef CONFIG_DEBUG_RODATA_TEST +static const int rodata_test_data = 0xC3; + +static noinline void rodata_test(void) +{ +	int result; + +	pr_info("%s: attempting to write to read-only section:\n", __func__); + +	if (*(volatile int *)&rodata_test_data != 0xC3) { +		pr_err("read only data changed before test\n"); +		return; +	} + +	/* +	 * Attempt to to write to rodata_test_data, trapping the expected +	 * data abort.  If the trap executed, result will be 1.  If it didn't, +	 * result will be 0xFF. +	 */ +	asm volatile( +		"0:	str	%[zero], [%[rodata_test_data]]\n" +		"	mov	%[result], #0xFF\n" +		"	b	2f\n" +		"1:	mov	%[result], #1\n" +		"2:\n" + +		/* Exception fixup - if store at label 0 faults, jumps to 1 */ +		".pushsection __ex_table, \"a\"\n" +		"	.long	0b, 1b\n" +		".popsection\n" + +		: [result] "=r" (result) +		: [rodata_test_data] "r" (&rodata_test_data), [zero] "r" (0) +		: "memory" +	); + +	if (result == 1) +		pr_info("write to read-only section trapped, success\n"); +	else +		pr_err("write to read-only section NOT trapped, test failed\n"); + +	if (*(volatile int *)&rodata_test_data != 0xC3) +		pr_err("read only data changed during write\n"); +} +#else +static inline void rodata_test(void) { } +#endif + +static int set_page_attributes(unsigned long virt, int numpages, +	pte_t (*f)(pte_t)) +{ +	pmd_t *pmd; +	pte_t *pte; +	unsigned long start = virt; +	unsigned long end = virt + (numpages << PAGE_SHIFT); +	unsigned long pmd_end; + +	while (virt < end) { +		pmd = pmd_off_k(virt); +		pmd_end = min(ALIGN(virt + 1, PMD_SIZE), end); + +		if ((pmd_val(*pmd) & PMD_TYPE_MASK) != PMD_TYPE_TABLE) { +			pr_err("%s: pmd %p=%08lx for %08lx not page table\n", +				__func__, pmd, pmd_val(*pmd), virt); +			virt = pmd_end; +			continue; +		} + +		while (virt < pmd_end) { +			pte = pte_offset_kernel(pmd, virt); +			set_pte_ext(pte, f(*pte), 0); +			virt += PAGE_SIZE; +		} +	} + +	flush_tlb_kernel_range(start, end); + +	return 0; +} + +int set_memory_ro(unsigned long virt, int numpages) +{ +	return set_page_attributes(virt, numpages, pte_wrprotect); +} +EXPORT_SYMBOL(set_memory_ro); + +int set_memory_rw(unsigned long virt, int numpages) +{ +	return set_page_attributes(virt, numpages, pte_mkwrite); +} +EXPORT_SYMBOL(set_memory_rw); + +void set_kernel_text_rw(void) +{ +	unsigned long start = PAGE_ALIGN((unsigned long)_text); +	unsigned long size = PAGE_ALIGN((unsigned long)__end_rodata) - start; + +	if (!kernel_set_to_readonly) +		return; + +	pr_debug("Set kernel text: %lx - %lx to read-write\n", +		 start, start + size); + +	set_memory_rw(start, size >> PAGE_SHIFT); +} + +void set_kernel_text_ro(void) +{ +	unsigned long start = PAGE_ALIGN((unsigned long)_text); +	unsigned long size = PAGE_ALIGN((unsigned long)__end_rodata) - start; + +	if (!kernel_set_to_readonly) +		return; + +	pr_info_once("Write protecting the kernel text section %lx - %lx\n", +		start, start + size); + +	pr_debug("Set kernel text: %lx - %lx to read only\n", +		 start, start + size); + +	set_memory_ro(start, size >> PAGE_SHIFT); +} + +void mark_rodata_ro(void) +{ +	kernel_set_to_readonly = 1; + +	set_kernel_text_ro(); + +	rodata_test(); +}  |