diff options
Diffstat (limited to 'arch/tile/mm/pgtable.c')
| -rw-r--r-- | arch/tile/mm/pgtable.c | 38 | 
1 files changed, 24 insertions, 14 deletions
diff --git a/arch/tile/mm/pgtable.c b/arch/tile/mm/pgtable.c index 87303693a07..2410aa899b3 100644 --- a/arch/tile/mm/pgtable.c +++ b/arch/tile/mm/pgtable.c @@ -177,14 +177,10 @@ void shatter_huge_page(unsigned long addr)  	if (!pmd_huge_page(*pmd))  		return; -	/* -	 * Grab the pgd_lock, since we may need it to walk the pgd_list, -	 * and since we need some kind of lock here to avoid races. -	 */ -	spin_lock_irqsave(&pgd_lock, flags); +	spin_lock_irqsave(&init_mm.page_table_lock, flags);  	if (!pmd_huge_page(*pmd)) {  		/* Lost the race to convert the huge page. */ -		spin_unlock_irqrestore(&pgd_lock, flags); +		spin_unlock_irqrestore(&init_mm.page_table_lock, flags);  		return;  	} @@ -194,6 +190,7 @@ void shatter_huge_page(unsigned long addr)  #ifdef __PAGETABLE_PMD_FOLDED  	/* Walk every pgd on the system and update the pmd there. */ +	spin_lock(&pgd_lock);  	list_for_each(pos, &pgd_list) {  		pmd_t *copy_pmd;  		pgd = list_to_pgd(pos) + pgd_index(addr); @@ -201,6 +198,7 @@ void shatter_huge_page(unsigned long addr)  		copy_pmd = pmd_offset(pud, addr);  		__set_pmd(copy_pmd, *pmd);  	} +	spin_unlock(&pgd_lock);  #endif  	/* Tell every cpu to notice the change. */ @@ -208,7 +206,7 @@ void shatter_huge_page(unsigned long addr)  		     cpu_possible_mask, NULL, 0);  	/* Hold the lock until the TLB flush is finished to avoid races. */ -	spin_unlock_irqrestore(&pgd_lock, flags); +	spin_unlock_irqrestore(&init_mm.page_table_lock, flags);  }  /* @@ -217,9 +215,13 @@ void shatter_huge_page(unsigned long addr)   * against pageattr.c; it is the unique case in which a valid change   * of kernel pagetables can't be lazily synchronized by vmalloc faults.   * vmalloc faults work because attached pagetables are never freed. - * The locking scheme was chosen on the basis of manfred's - * recommendations and having no core impact whatsoever. - * -- wli + * + * The lock is always taken with interrupts disabled, unlike on x86 + * and other platforms, because we need to take the lock in + * shatter_huge_page(), which may be called from an interrupt context. + * We are not at risk from the tlbflush IPI deadlock that was seen on + * x86, since we use the flush_remote() API to have the hypervisor do + * the TLB flushes regardless of irq disabling.   */  DEFINE_SPINLOCK(pgd_lock);  LIST_HEAD(pgd_list); @@ -469,10 +471,18 @@ void __set_pte(pte_t *ptep, pte_t pte)  void set_pte(pte_t *ptep, pte_t pte)  { -	struct page *page = pfn_to_page(pte_pfn(pte)); - -	/* Update the home of a PTE if necessary */ -	pte = pte_set_home(pte, page_home(page)); +	if (pte_present(pte) && +	    (!CHIP_HAS_MMIO() || hv_pte_get_mode(pte) != HV_PTE_MODE_MMIO)) { +		/* The PTE actually references physical memory. */ +		unsigned long pfn = pte_pfn(pte); +		if (pfn_valid(pfn)) { +			/* Update the home of the PTE from the struct page. */ +			pte = pte_set_home(pte, page_home(pfn_to_page(pfn))); +		} else if (hv_pte_get_mode(pte) == 0) { +			/* remap_pfn_range(), etc, must supply PTE mode. */ +			panic("set_pte(): out-of-range PFN and mode 0\n"); +		} +	}  	__set_pte(ptep, pte);  }  |