diff options
| author | Grant Likely <grant.likely@secretlab.ca> | 2012-05-08 11:35:37 -0600 | 
|---|---|---|
| committer | Grant Likely <grant.likely@secretlab.ca> | 2012-05-08 11:35:37 -0600 | 
| commit | 7b96c686223a5c902d6a59c7d178f3904f0ab757 (patch) | |
| tree | fe328ed56ad3719de3cfebad72ef74e34f1ed92b /arch/s390/mm/pgtable.c | |
| parent | f141ed65f256ec036c7fba604da6b7c448096ef9 (diff) | |
| parent | d48b97b403d23f6df0b990cee652bdf9a52337a3 (diff) | |
| download | olio-linux-3.10-7b96c686223a5c902d6a59c7d178f3904f0ab757.tar.xz olio-linux-3.10-7b96c686223a5c902d6a59c7d178f3904f0ab757.zip  | |
Merge tag 'v3.4-rc6' into gpio/next
Linux 3.4-rc6
Diffstat (limited to 'arch/s390/mm/pgtable.c')
| -rw-r--r-- | arch/s390/mm/pgtable.c | 63 | 
1 files changed, 60 insertions, 3 deletions
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index 373adf69b01..6e765bf0067 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c @@ -678,8 +678,6 @@ void page_table_free(struct mm_struct *mm, unsigned long *table)  	}  } -#ifdef CONFIG_HAVE_RCU_TABLE_FREE -  static void __page_table_free_rcu(void *table, unsigned bit)  {  	struct page *page; @@ -733,7 +731,66 @@ void __tlb_remove_table(void *_table)  		free_pages((unsigned long) table, ALLOC_ORDER);  } -#endif +static void tlb_remove_table_smp_sync(void *arg) +{ +	/* Simply deliver the interrupt */ +} + +static void tlb_remove_table_one(void *table) +{ +	/* +	 * This isn't an RCU grace period and hence the page-tables cannot be +	 * assumed to be actually RCU-freed. +	 * +	 * It is however sufficient for software page-table walkers that rely +	 * on IRQ disabling. See the comment near struct mmu_table_batch. +	 */ +	smp_call_function(tlb_remove_table_smp_sync, NULL, 1); +	__tlb_remove_table(table); +} + +static void tlb_remove_table_rcu(struct rcu_head *head) +{ +	struct mmu_table_batch *batch; +	int i; + +	batch = container_of(head, struct mmu_table_batch, rcu); + +	for (i = 0; i < batch->nr; i++) +		__tlb_remove_table(batch->tables[i]); + +	free_page((unsigned long)batch); +} + +void tlb_table_flush(struct mmu_gather *tlb) +{ +	struct mmu_table_batch **batch = &tlb->batch; + +	if (*batch) { +		__tlb_flush_mm(tlb->mm); +		call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu); +		*batch = NULL; +	} +} + +void tlb_remove_table(struct mmu_gather *tlb, void *table) +{ +	struct mmu_table_batch **batch = &tlb->batch; + +	if (*batch == NULL) { +		*batch = (struct mmu_table_batch *) +			__get_free_page(GFP_NOWAIT | __GFP_NOWARN); +		if (*batch == NULL) { +			__tlb_flush_mm(tlb->mm); +			tlb_remove_table_one(table); +			return; +		} +		(*batch)->nr = 0; +	} +	(*batch)->tables[(*batch)->nr++] = table; +	if ((*batch)->nr == MAX_TABLE_BATCH) +		tlb_table_flush(tlb); +}  /*   * switch on pgstes for its userspace process (for kvm)  |