diff options
Diffstat (limited to 'arch/ia64/kernel')
| -rw-r--r-- | arch/ia64/kernel/acpi.c | 2 | ||||
| -rw-r--r-- | arch/ia64/kernel/pci-swiotlb.c | 14 | ||||
| -rw-r--r-- | arch/ia64/kernel/perfmon.c | 18 | 
3 files changed, 16 insertions, 18 deletions
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c index ac795d311f4..6f38b6120d9 100644 --- a/arch/ia64/kernel/acpi.c +++ b/arch/ia64/kernel/acpi.c @@ -839,7 +839,7 @@ static __init int setup_additional_cpus(char *s)  early_param("additional_cpus", setup_additional_cpus);  /* - * cpu_possible_map should be static, it cannot change as CPUs + * cpu_possible_mask should be static, it cannot change as CPUs   * are onlined, or offlined. The reason is per-cpu data-structures   * are allocated by some modules at init time, and dont expect to   * do this dynamically on cpu arrival/departure. diff --git a/arch/ia64/kernel/pci-swiotlb.c b/arch/ia64/kernel/pci-swiotlb.c index d9485d952ed..939260aeac9 100644 --- a/arch/ia64/kernel/pci-swiotlb.c +++ b/arch/ia64/kernel/pci-swiotlb.c @@ -15,16 +15,24 @@ int swiotlb __read_mostly;  EXPORT_SYMBOL(swiotlb);  static void *ia64_swiotlb_alloc_coherent(struct device *dev, size_t size, -					 dma_addr_t *dma_handle, gfp_t gfp) +					 dma_addr_t *dma_handle, gfp_t gfp, +					 struct dma_attrs *attrs)  {  	if (dev->coherent_dma_mask != DMA_BIT_MASK(64))  		gfp |= GFP_DMA;  	return swiotlb_alloc_coherent(dev, size, dma_handle, gfp);  } +static void ia64_swiotlb_free_coherent(struct device *dev, size_t size, +				       void *vaddr, dma_addr_t dma_addr, +				       struct dma_attrs *attrs) +{ +	swiotlb_free_coherent(dev, size, vaddr, dma_addr); +} +  struct dma_map_ops swiotlb_dma_ops = { -	.alloc_coherent = ia64_swiotlb_alloc_coherent, -	.free_coherent = swiotlb_free_coherent, +	.alloc = ia64_swiotlb_alloc_coherent, +	.free = ia64_swiotlb_free_coherent,  	.map_page = swiotlb_map_page,  	.unmap_page = swiotlb_unmap_page,  	.map_sg = swiotlb_map_sg_attrs, diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index 9d0fd7d5bb8..f00ba025375 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c @@ -604,12 +604,6 @@ pfm_unprotect_ctx_ctxsw(pfm_context_t *x, unsigned long f)  	spin_unlock(&(x)->ctx_lock);  } -static inline unsigned int -pfm_do_munmap(struct mm_struct *mm, unsigned long addr, size_t len, int acct) -{ -	return do_munmap(mm, addr, len); -} -  static inline unsigned long   pfm_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags, unsigned long exec)  { @@ -1458,8 +1452,9 @@ pfm_unreserve_session(pfm_context_t *ctx, int is_syswide, unsigned int cpu)   * a PROTECT_CTX() section.   */  static int -pfm_remove_smpl_mapping(struct task_struct *task, void *vaddr, unsigned long size) +pfm_remove_smpl_mapping(void *vaddr, unsigned long size)  { +	struct task_struct *task = current;  	int r;  	/* sanity checks */ @@ -1473,13 +1468,8 @@ pfm_remove_smpl_mapping(struct task_struct *task, void *vaddr, unsigned long siz  	/*  	 * does the actual unmapping  	 */ -	down_write(&task->mm->mmap_sem); +	r = vm_munmap((unsigned long)vaddr, size); -	DPRINT(("down_write done smpl_vaddr=%p size=%lu\n", vaddr, size)); - -	r = pfm_do_munmap(task->mm, (unsigned long)vaddr, size, 0); - -	up_write(&task->mm->mmap_sem);  	if (r !=0) {  		printk(KERN_ERR "perfmon: [%d] unable to unmap sampling buffer @%p size=%lu\n", task_pid_nr(task), vaddr, size);  	} @@ -1945,7 +1935,7 @@ pfm_flush(struct file *filp, fl_owner_t id)  	 * because some VM function reenables interrupts.  	 *  	 */ -	if (smpl_buf_vaddr) pfm_remove_smpl_mapping(current, smpl_buf_vaddr, smpl_buf_size); +	if (smpl_buf_vaddr) pfm_remove_smpl_mapping(smpl_buf_vaddr, smpl_buf_size);  	return 0;  }  |