diff options
| author | Rusty Russell <rusty@rustcorp.com.au> | 2013-04-22 14:10:39 +0930 | 
|---|---|---|
| committer | Rusty Russell <rusty@rustcorp.com.au> | 2013-04-22 15:31:39 +0930 | 
| commit | e1d12606f756bdb8328a66a2873dca6c46bcb4e5 (patch) | |
| tree | 25464220a1fe74504cd5a8d0801913594e25539d | |
| parent | 93a2cdff98243df06bafd3c4f3b31b38f0d0fe3e (diff) | |
| download | olio-linux-3.10-e1d12606f756bdb8328a66a2873dca6c46bcb4e5.tar.xz olio-linux-3.10-e1d12606f756bdb8328a66a2873dca6c46bcb4e5.zip  | |
lguest: make check_gpte et. al return bool.
This is a bit neater: we can immediately return if a PTE/PGD/PMD entry
is invalid (which also kills the guest).  It means we don't risk using
invalid entries as we reshuffle the code.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
| -rw-r--r-- | drivers/lguest/page_tables.c | 33 | 
1 files changed, 23 insertions, 10 deletions
diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c index 758466299b0..f074f34acb8 100644 --- a/drivers/lguest/page_tables.c +++ b/drivers/lguest/page_tables.c @@ -259,26 +259,35 @@ static void release_pte(pte_t pte)  }  /*:*/ -static void check_gpte(struct lg_cpu *cpu, pte_t gpte) +static bool check_gpte(struct lg_cpu *cpu, pte_t gpte)  {  	if ((pte_flags(gpte) & _PAGE_PSE) || -	    pte_pfn(gpte) >= cpu->lg->pfn_limit) +	    pte_pfn(gpte) >= cpu->lg->pfn_limit) {  		kill_guest(cpu, "bad page table entry"); +		return false; +	} +	return true;  } -static void check_gpgd(struct lg_cpu *cpu, pgd_t gpgd) +static bool check_gpgd(struct lg_cpu *cpu, pgd_t gpgd)  {  	if ((pgd_flags(gpgd) & ~CHECK_GPGD_MASK) || -	   (pgd_pfn(gpgd) >= cpu->lg->pfn_limit)) +	    (pgd_pfn(gpgd) >= cpu->lg->pfn_limit)) {  		kill_guest(cpu, "bad page directory entry"); +		return false; +	} +	return true;  }  #ifdef CONFIG_X86_PAE -static void check_gpmd(struct lg_cpu *cpu, pmd_t gpmd) +static bool check_gpmd(struct lg_cpu *cpu, pmd_t gpmd)  {  	if ((pmd_flags(gpmd) & ~_PAGE_TABLE) || -	   (pmd_pfn(gpmd) >= cpu->lg->pfn_limit)) +	    (pmd_pfn(gpmd) >= cpu->lg->pfn_limit)) {  		kill_guest(cpu, "bad page middle directory entry"); +		return false; +	} +	return true;  }  #endif @@ -336,7 +345,8 @@ bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode)  			return false;  		}  		/* We check that the Guest pgd is OK. */ -		check_gpgd(cpu, gpgd); +		if (!check_gpgd(cpu, gpgd)) +			return false;  		/*  		 * And we copy the flags to the shadow PGD entry.  The page  		 * number in the shadow PGD is the page we just allocated. @@ -372,7 +382,8 @@ bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode)  		}  		/* We check that the Guest pmd is OK. */ -		check_gpmd(cpu, gpmd); +		if (!check_gpmd(cpu, gpmd)) +			return false;  		/*  		 * And we copy the flags to the shadow PMD entry.  The page @@ -421,7 +432,8 @@ bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode)  	 * Check that the Guest PTE flags are OK, and the page number is below  	 * the pfn_limit (ie. not mapping the Launcher binary).  	 */ -	check_gpte(cpu, gpte); +	if (!check_gpte(cpu, gpte)) +		return false;  	/* Add the _PAGE_ACCESSED and (for a write) _PAGE_DIRTY flag */  	gpte = pte_mkyoung(gpte); @@ -857,7 +869,8 @@ static void do_set_pte(struct lg_cpu *cpu, int idx,  			 * micro-benchmark.  			 */  			if (pte_flags(gpte) & (_PAGE_DIRTY | _PAGE_ACCESSED)) { -				check_gpte(cpu, gpte); +				if (!check_gpte(cpu, gpte)) +					return;  				set_pte(spte,  					gpte_to_spte(cpu, gpte,  						pte_flags(gpte) & _PAGE_DIRTY));  |