diff options
Diffstat (limited to 'arch/x86/kernel')
28 files changed, 212 insertions, 114 deletions
diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c index 103b6ab368d..146a49c763a 100644 --- a/arch/x86/kernel/acpi/sleep.c +++ b/arch/x86/kernel/acpi/sleep.c @@ -24,6 +24,10 @@ unsigned long acpi_realmode_flags;  static char temp_stack[4096];  #endif +asmlinkage void acpi_enter_s3(void) +{ +	acpi_enter_sleep_state(3, wake_sleep_flags); +}  /**   * acpi_suspend_lowlevel - save kernel state   * diff --git a/arch/x86/kernel/acpi/sleep.h b/arch/x86/kernel/acpi/sleep.h index 416d4be13fe..d68677a2a01 100644 --- a/arch/x86/kernel/acpi/sleep.h +++ b/arch/x86/kernel/acpi/sleep.h @@ -3,12 +3,16 @@   */  #include <asm/trampoline.h> +#include <linux/linkage.h>  extern unsigned long saved_video_mode;  extern long saved_magic;  extern int wakeup_pmode_return; +extern u8 wake_sleep_flags; +extern asmlinkage void acpi_enter_s3(void); +  extern unsigned long acpi_copy_wakeup_routine(unsigned long);  extern void wakeup_long64(void); diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S index 13ab720573e..72610839f03 100644 --- a/arch/x86/kernel/acpi/wakeup_32.S +++ b/arch/x86/kernel/acpi/wakeup_32.S @@ -74,9 +74,7 @@ restore_registers:  ENTRY(do_suspend_lowlevel)  	call	save_processor_state  	call	save_registers -	pushl	$3 -	call	acpi_enter_sleep_state -	addl	$4, %esp +	call	acpi_enter_s3  #	In case of S3 failure, we'll emerge here.  Jump  # 	to ret_point to recover diff --git a/arch/x86/kernel/acpi/wakeup_64.S b/arch/x86/kernel/acpi/wakeup_64.S index 8ea5164cbd0..014d1d28c39 100644 --- a/arch/x86/kernel/acpi/wakeup_64.S +++ b/arch/x86/kernel/acpi/wakeup_64.S @@ -71,9 +71,7 @@ ENTRY(do_suspend_lowlevel)  	movq	%rsi, saved_rsi  	addq	$8, %rsp -	movl	$3, %edi -	xorl	%eax, %eax -	call	acpi_enter_sleep_state +	call	acpi_enter_s3  	/* in case something went wrong, restore the machine status and go on */  	jmp	resume_point diff --git a/arch/x86/kernel/amd_gart_64.c b/arch/x86/kernel/amd_gart_64.c index b1e7c7f7a0a..e66311200cb 100644 --- a/arch/x86/kernel/amd_gart_64.c +++ b/arch/x86/kernel/amd_gart_64.c @@ -477,7 +477,7 @@ error:  /* allocate and map a coherent mapping */  static void *  gart_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr, -		    gfp_t flag) +		    gfp_t flag, struct dma_attrs *attrs)  {  	dma_addr_t paddr;  	unsigned long align_mask; @@ -500,7 +500,8 @@ gart_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr,  		}  		__free_pages(page, get_order(size));  	} else -		return dma_generic_alloc_coherent(dev, size, dma_addr, flag); +		return dma_generic_alloc_coherent(dev, size, dma_addr, flag, +						  attrs);  	return NULL;  } @@ -508,7 +509,7 @@ gart_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr,  /* free a coherent mapping */  static void  gart_free_coherent(struct device *dev, size_t size, void *vaddr, -		   dma_addr_t dma_addr) +		   dma_addr_t dma_addr, struct dma_attrs *attrs)  {  	gart_unmap_page(dev, dma_addr, size, DMA_BIDIRECTIONAL, NULL);  	free_pages((unsigned long)vaddr, get_order(size)); @@ -700,8 +701,8 @@ static struct dma_map_ops gart_dma_ops = {  	.unmap_sg			= gart_unmap_sg,  	.map_page			= gart_map_page,  	.unmap_page			= gart_unmap_page, -	.alloc_coherent			= gart_alloc_coherent, -	.free_coherent			= gart_free_coherent, +	.alloc				= gart_alloc_coherent, +	.free				= gart_free_coherent,  	.mapping_error			= gart_mapping_error,  }; diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index 11544d8f1e9..edc24480469 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c @@ -1637,9 +1637,11 @@ static int __init apic_verify(void)  	mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;  	/* The BIOS may have set up the APIC at some other address */ -	rdmsr(MSR_IA32_APICBASE, l, h); -	if (l & MSR_IA32_APICBASE_ENABLE) -		mp_lapic_addr = l & MSR_IA32_APICBASE_BASE; +	if (boot_cpu_data.x86 >= 6) { +		rdmsr(MSR_IA32_APICBASE, l, h); +		if (l & MSR_IA32_APICBASE_ENABLE) +			mp_lapic_addr = l & MSR_IA32_APICBASE_BASE; +	}  	pr_info("Found and enabled local APIC!\n");  	return 0; @@ -1657,13 +1659,15 @@ int __init apic_force_enable(unsigned long addr)  	 * MSR. This can only be done in software for Intel P6 or later  	 * and AMD K7 (Model > 1) or later.  	 */ -	rdmsr(MSR_IA32_APICBASE, l, h); -	if (!(l & MSR_IA32_APICBASE_ENABLE)) { -		pr_info("Local APIC disabled by BIOS -- reenabling.\n"); -		l &= ~MSR_IA32_APICBASE_BASE; -		l |= MSR_IA32_APICBASE_ENABLE | addr; -		wrmsr(MSR_IA32_APICBASE, l, h); -		enabled_via_apicbase = 1; +	if (boot_cpu_data.x86 >= 6) { +		rdmsr(MSR_IA32_APICBASE, l, h); +		if (!(l & MSR_IA32_APICBASE_ENABLE)) { +			pr_info("Local APIC disabled by BIOS -- reenabling.\n"); +			l &= ~MSR_IA32_APICBASE_BASE; +			l |= MSR_IA32_APICBASE_ENABLE | addr; +			wrmsr(MSR_IA32_APICBASE, l, h); +			enabled_via_apicbase = 1; +		}  	}  	return apic_verify();  } @@ -2209,10 +2213,12 @@ static void lapic_resume(void)  		 * FIXME! This will be wrong if we ever support suspend on  		 * SMP! We'll need to do this as part of the CPU restore!  		 */ -		rdmsr(MSR_IA32_APICBASE, l, h); -		l &= ~MSR_IA32_APICBASE_BASE; -		l |= MSR_IA32_APICBASE_ENABLE | mp_lapic_addr; -		wrmsr(MSR_IA32_APICBASE, l, h); +		if (boot_cpu_data.x86 >= 6) { +			rdmsr(MSR_IA32_APICBASE, l, h); +			l &= ~MSR_IA32_APICBASE_BASE; +			l |= MSR_IA32_APICBASE_ENABLE | mp_lapic_addr; +			wrmsr(MSR_IA32_APICBASE, l, h); +		}  	}  	maxlvt = lapic_get_maxlvt(); diff --git a/arch/x86/kernel/apic/apic_numachip.c b/arch/x86/kernel/apic/apic_numachip.c index 899803e0321..23e75422e01 100644 --- a/arch/x86/kernel/apic/apic_numachip.c +++ b/arch/x86/kernel/apic/apic_numachip.c @@ -207,8 +207,11 @@ static void __init map_csrs(void)  static void fixup_cpu_id(struct cpuinfo_x86 *c, int node)  { -	c->phys_proc_id = node; -	per_cpu(cpu_llc_id, smp_processor_id()) = node; + +	if (c->phys_proc_id != node) { +		c->phys_proc_id = node; +		per_cpu(cpu_llc_id, smp_processor_id()) = node; +	}  }  static int __init numachip_system_init(void) diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c index 8a778db45e3..991e315f422 100644 --- a/arch/x86/kernel/apic/x2apic_phys.c +++ b/arch/x86/kernel/apic/x2apic_phys.c @@ -24,6 +24,12 @@ static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id)  {  	if (x2apic_phys)  		return x2apic_enabled(); +	else if ((acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID) && +		(acpi_gbl_FADT.flags & ACPI_FADT_APIC_PHYSICAL) && +		x2apic_enabled()) { +		printk(KERN_DEBUG "System requires x2apic physical mode\n"); +		return 1; +	}  	else  		return 0;  } diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 0a44b90602b..146bb6218ee 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -26,7 +26,8 @@   *	contact AMD for precise details and a CPU swap.   *   *	See	http://www.multimania.com/poulot/k6bug.html - *		http://www.amd.com/K6/k6docs/revgd.html + *	and	section 2.6.2 of "AMD-K6 Processor Revision Guide - Model 6" + *		(Publication # 21266  Issue Date: August 1998)   *   *	The following test is erm.. interesting. AMD neglected to up   *	the chip setting when fixing the bug but they also tweaked some @@ -94,7 +95,6 @@ static void __cpuinit init_amd_k6(struct cpuinfo_x86 *c)  				"system stability may be impaired when more than 32 MB are used.\n");  		else  			printk(KERN_CONT "probably OK (after B9730xxxx).\n"); -		printk(KERN_INFO "Please see http://membres.lycos.fr/poulot/k6bug.html\n");  	}  	/* K6 with old style WHCR */ @@ -353,10 +353,11 @@ static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c)  		node = per_cpu(cpu_llc_id, cpu);  	/* -	 * If core numbers are inconsistent, it's likely a multi-fabric platform, -	 * so invoke platform-specific handler +	 * On multi-fabric platform (e.g. Numascale NumaChip) a +	 * platform-specific handler needs to be called to fixup some +	 * IDs of the CPU.  	 */ -	if (c->phys_proc_id != node) +	if (x86_cpuinit.fixup_cpu_id)  		x86_cpuinit.fixup_cpu_id(c, node);  	if (!node_online(node)) { @@ -579,6 +580,24 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)  		}  	} +	/* re-enable TopologyExtensions if switched off by BIOS */ +	if ((c->x86 == 0x15) && +	    (c->x86_model >= 0x10) && (c->x86_model <= 0x1f) && +	    !cpu_has(c, X86_FEATURE_TOPOEXT)) { +		u64 val; + +		if (!rdmsrl_amd_safe(0xc0011005, &val)) { +			val |= 1ULL << 54; +			wrmsrl_amd_safe(0xc0011005, val); +			rdmsrl(0xc0011005, val); +			if (val & (1ULL << 54)) { +				set_cpu_cap(c, X86_FEATURE_TOPOEXT); +				printk(KERN_INFO FW_INFO "CPU: Re-enabling " +				  "disabled Topology Extensions Support\n"); +			} +		} +	} +  	cpu_detect_cache_sizes(c);  	/* Multi core CPU? */ diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 67e258362a3..cf79302198a 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -1163,15 +1163,6 @@ static void dbg_restore_debug_regs(void)  #endif /* ! CONFIG_KGDB */  /* - * Prints an error where the NUMA and configured core-number mismatch and the - * platform didn't override this to fix it up - */ -void __cpuinit x86_default_fixup_cpu_id(struct cpuinfo_x86 *c, int node) -{ -	pr_err("NUMA core number %d differs from configured core number %d\n", node, c->phys_proc_id); -} - -/*   * cpu_init() initializes state that is per-CPU. Some data is already   * initialized (naturally) in the bootstrap process, such as the GDT   * and IDT. We reload them nevertheless, this function acts as a diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c index 73d08ed98a6..b8f3653dddb 100644 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c @@ -433,14 +433,14 @@ int amd_set_l3_disable_slot(struct amd_northbridge *nb, int cpu, unsigned slot,  	/*  check if @slot is already used or the index is already disabled */  	ret = amd_get_l3_disable_slot(nb, slot);  	if (ret >= 0) -		return -EINVAL; +		return -EEXIST;  	if (index > nb->l3_cache.indices)  		return -EINVAL;  	/* check whether the other slot has disabled the same index already */  	if (index == amd_get_l3_disable_slot(nb, !slot)) -		return -EINVAL; +		return -EEXIST;  	amd_l3_disable_index(nb, cpu, slot, index); @@ -468,8 +468,8 @@ static ssize_t store_cache_disable(struct _cpuid4_info *this_leaf,  	err = amd_set_l3_disable_slot(this_leaf->base.nb, cpu, slot, val);  	if (err) {  		if (err == -EEXIST) -			printk(KERN_WARNING "L3 disable slot %d in use!\n", -					    slot); +			pr_warning("L3 slot %d in use/index already disabled!\n", +				   slot);  		return err;  	}  	return count; diff --git a/arch/x86/kernel/cpu/perf_event_p4.c b/arch/x86/kernel/cpu/perf_event_p4.c index ef484d9d0a2..a2dfacfd710 100644 --- a/arch/x86/kernel/cpu/perf_event_p4.c +++ b/arch/x86/kernel/cpu/perf_event_p4.c @@ -1271,6 +1271,17 @@ done:  	return num ? -EINVAL : 0;  } +PMU_FORMAT_ATTR(cccr, "config:0-31" ); +PMU_FORMAT_ATTR(escr, "config:32-62"); +PMU_FORMAT_ATTR(ht,   "config:63"   ); + +static struct attribute *intel_p4_formats_attr[] = { +	&format_attr_cccr.attr, +	&format_attr_escr.attr, +	&format_attr_ht.attr, +	NULL, +}; +  static __initconst const struct x86_pmu p4_pmu = {  	.name			= "Netburst P4/Xeon",  	.handle_irq		= p4_pmu_handle_irq, @@ -1305,6 +1316,8 @@ static __initconst const struct x86_pmu p4_pmu = {  	 * the former idea is taken from OProfile code  	 */  	.perfctr_second_write	= 1, + +	.format_attrs		= intel_p4_formats_attr,  };  __init int p4_pmu_init(void) diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c index 7734bcbb5a3..2d6e6498c17 100644 --- a/arch/x86/kernel/i387.c +++ b/arch/x86/kernel/i387.c @@ -235,6 +235,7 @@ int init_fpu(struct task_struct *tsk)  	if (tsk_used_math(tsk)) {  		if (HAVE_HWFP && tsk == current)  			unlazy_fpu(tsk); +		tsk->thread.fpu.last_cpu = ~0;  		return 0;  	} diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c index 7943e0c21bd..3dafc6003b7 100644 --- a/arch/x86/kernel/irq.c +++ b/arch/x86/kernel/irq.c @@ -282,8 +282,13 @@ void fixup_irqs(void)  		else if (!(warned++))  			set_affinity = 0; +		/* +		 * We unmask if the irq was not marked masked by the +		 * core code. That respects the lazy irq disable +		 * behaviour. +		 */  		if (!irqd_can_move_in_process_context(data) && -		    !irqd_irq_disabled(data) && chip->irq_unmask) +		    !irqd_irq_masked(data) && chip->irq_unmask)  			chip->irq_unmask(data);  		raw_spin_unlock(&desc->lock); diff --git a/arch/x86/kernel/kdebugfs.c b/arch/x86/kernel/kdebugfs.c index 90fcf62854b..1d5d31ea686 100644 --- a/arch/x86/kernel/kdebugfs.c +++ b/arch/x86/kernel/kdebugfs.c @@ -68,16 +68,9 @@ static ssize_t setup_data_read(struct file *file, char __user *user_buf,  	return count;  } -static int setup_data_open(struct inode *inode, struct file *file) -{ -	file->private_data = inode->i_private; - -	return 0; -} -  static const struct file_operations fops_setup_data = {  	.read		= setup_data_read, -	.open		= setup_data_open, +	.open		= simple_open,  	.llseek		= default_llseek,  }; diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c index db6720edfdd..8bfb6146f75 100644 --- a/arch/x86/kernel/kgdb.c +++ b/arch/x86/kernel/kgdb.c @@ -43,6 +43,8 @@  #include <linux/smp.h>  #include <linux/nmi.h>  #include <linux/hw_breakpoint.h> +#include <linux/uaccess.h> +#include <linux/memory.h>  #include <asm/debugreg.h>  #include <asm/apicdef.h> @@ -741,6 +743,64 @@ void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip)  	regs->ip = ip;  } +int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt) +{ +	int err; +	char opc[BREAK_INSTR_SIZE]; + +	bpt->type = BP_BREAKPOINT; +	err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr, +				BREAK_INSTR_SIZE); +	if (err) +		return err; +	err = probe_kernel_write((char *)bpt->bpt_addr, +				 arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE); +#ifdef CONFIG_DEBUG_RODATA +	if (!err) +		return err; +	/* +	 * It is safe to call text_poke() because normal kernel execution +	 * is stopped on all cores, so long as the text_mutex is not locked. +	 */ +	if (mutex_is_locked(&text_mutex)) +		return -EBUSY; +	text_poke((void *)bpt->bpt_addr, arch_kgdb_ops.gdb_bpt_instr, +		  BREAK_INSTR_SIZE); +	err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE); +	if (err) +		return err; +	if (memcmp(opc, arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE)) +		return -EINVAL; +	bpt->type = BP_POKE_BREAKPOINT; +#endif /* CONFIG_DEBUG_RODATA */ +	return err; +} + +int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt) +{ +#ifdef CONFIG_DEBUG_RODATA +	int err; +	char opc[BREAK_INSTR_SIZE]; + +	if (bpt->type != BP_POKE_BREAKPOINT) +		goto knl_write; +	/* +	 * It is safe to call text_poke() because normal kernel execution +	 * is stopped on all cores, so long as the text_mutex is not locked. +	 */ +	if (mutex_is_locked(&text_mutex)) +		goto knl_write; +	text_poke((void *)bpt->bpt_addr, bpt->saved_instr, BREAK_INSTR_SIZE); +	err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE); +	if (err || memcmp(opc, bpt->saved_instr, BREAK_INSTR_SIZE)) +		goto knl_write; +	return err; +knl_write: +#endif /* CONFIG_DEBUG_RODATA */ +	return probe_kernel_write((char *)bpt->bpt_addr, +				  (char *)bpt->saved_instr, BREAK_INSTR_SIZE); +} +  struct kgdb_arch arch_kgdb_ops = {  	/* Breakpoint instruction: */  	.gdb_bpt_instr		= { 0xcc }, diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index 694d801bf60..e554e5ad2fe 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c @@ -38,6 +38,7 @@  #include <asm/traps.h>  #include <asm/desc.h>  #include <asm/tlbflush.h> +#include <asm/idle.h>  static int kvmapf = 1; @@ -78,7 +79,6 @@ struct kvm_task_sleep_node {  	u32 token;  	int cpu;  	bool halted; -	struct mm_struct *mm;  };  static struct kvm_task_sleep_head { @@ -125,9 +125,7 @@ void kvm_async_pf_task_wait(u32 token)  	n.token = token;  	n.cpu = smp_processor_id(); -	n.mm = current->active_mm;  	n.halted = idle || preempt_count() > 1; -	atomic_inc(&n.mm->mm_count);  	init_waitqueue_head(&n.wq);  	hlist_add_head(&n.link, &b->list);  	spin_unlock(&b->lock); @@ -160,9 +158,6 @@ EXPORT_SYMBOL_GPL(kvm_async_pf_task_wait);  static void apf_task_wake_one(struct kvm_task_sleep_node *n)  {  	hlist_del_init(&n->link); -	if (!n->mm) -		return; -	mmdrop(n->mm);  	if (n->halted)  		smp_send_reschedule(n->cpu);  	else if (waitqueue_active(&n->wq)) @@ -206,7 +201,7 @@ again:  		 * async PF was not yet handled.  		 * Add dummy entry for the token.  		 */ -		n = kmalloc(sizeof(*n), GFP_ATOMIC); +		n = kzalloc(sizeof(*n), GFP_ATOMIC);  		if (!n) {  			/*  			 * Allocation failed! Busy wait while other cpu @@ -218,7 +213,6 @@ again:  		}  		n->token = token;  		n->cpu = smp_processor_id(); -		n->mm = NULL;  		init_waitqueue_head(&n->wq);  		hlist_add_head(&n->link, &b->list);  	} else @@ -253,7 +247,10 @@ do_async_page_fault(struct pt_regs *regs, unsigned long error_code)  		kvm_async_pf_task_wait((u32)read_cr2());  		break;  	case KVM_PV_REASON_PAGE_READY: +		rcu_irq_enter(); +		exit_idle();  		kvm_async_pf_task_wake((u32)read_cr2()); +		rcu_irq_exit();  		break;  	}  } diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c index 73465aab28f..8a2ce8fd41c 100644 --- a/arch/x86/kernel/microcode_amd.c +++ b/arch/x86/kernel/microcode_amd.c @@ -82,11 +82,6 @@ static int collect_cpu_info_amd(int cpu, struct cpu_signature *csig)  {  	struct cpuinfo_x86 *c = &cpu_data(cpu); -	if (c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10) { -		pr_warning("CPU%d: family %d not supported\n", cpu, c->x86); -		return -1; -	} -  	csig->rev = c->microcode;  	pr_info("CPU%d: patch_level=0x%08x\n", cpu, csig->rev); @@ -380,6 +375,13 @@ static struct microcode_ops microcode_amd_ops = {  struct microcode_ops * __init init_amd_microcode(void)  { +	struct cpuinfo_x86 *c = &cpu_data(0); + +	if (c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10) { +		pr_warning("AMD CPU family 0x%x not supported\n", c->x86); +		return NULL; +	} +  	patch = (void *)get_zeroed_page(GFP_KERNEL);  	if (!patch)  		return NULL; diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c index 87a0f868830..c9bda6d6035 100644 --- a/arch/x86/kernel/microcode_core.c +++ b/arch/x86/kernel/microcode_core.c @@ -419,10 +419,8 @@ static int mc_device_add(struct device *dev, struct subsys_interface *sif)  	if (err)  		return err; -	if (microcode_init_cpu(cpu) == UCODE_ERROR) { -		sysfs_remove_group(&dev->kobj, &mc_attr_group); +	if (microcode_init_cpu(cpu) == UCODE_ERROR)  		return -EINVAL; -	}  	return err;  } @@ -528,11 +526,11 @@ static int __init microcode_init(void)  		microcode_ops = init_intel_microcode();  	else if (c->x86_vendor == X86_VENDOR_AMD)  		microcode_ops = init_amd_microcode(); - -	if (!microcode_ops) { +	else  		pr_err("no support for this CPU vendor\n"); + +	if (!microcode_ops)  		return -ENODEV; -	}  	microcode_pdev = platform_device_register_simple("microcode", -1,  							 NULL, 0); diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c index 6ac5782f4d6..d0b2fb9ccbb 100644 --- a/arch/x86/kernel/pci-calgary_64.c +++ b/arch/x86/kernel/pci-calgary_64.c @@ -430,7 +430,7 @@ static void calgary_unmap_page(struct device *dev, dma_addr_t dma_addr,  }  static void* calgary_alloc_coherent(struct device *dev, size_t size, -	dma_addr_t *dma_handle, gfp_t flag) +	dma_addr_t *dma_handle, gfp_t flag, struct dma_attrs *attrs)  {  	void *ret = NULL;  	dma_addr_t mapping; @@ -463,7 +463,8 @@ error:  }  static void calgary_free_coherent(struct device *dev, size_t size, -				  void *vaddr, dma_addr_t dma_handle) +				  void *vaddr, dma_addr_t dma_handle, +				  struct dma_attrs *attrs)  {  	unsigned int npages;  	struct iommu_table *tbl = find_iommu_table(dev); @@ -476,8 +477,8 @@ static void calgary_free_coherent(struct device *dev, size_t size,  }  static struct dma_map_ops calgary_dma_ops = { -	.alloc_coherent = calgary_alloc_coherent, -	.free_coherent = calgary_free_coherent, +	.alloc = calgary_alloc_coherent, +	.free = calgary_free_coherent,  	.map_sg = calgary_map_sg,  	.unmap_sg = calgary_unmap_sg,  	.map_page = calgary_map_page, diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c index 28e5e06fcba..3003250ac51 100644 --- a/arch/x86/kernel/pci-dma.c +++ b/arch/x86/kernel/pci-dma.c @@ -96,7 +96,8 @@ void __init pci_iommu_alloc(void)  	}  }  void *dma_generic_alloc_coherent(struct device *dev, size_t size, -				 dma_addr_t *dma_addr, gfp_t flag) +				 dma_addr_t *dma_addr, gfp_t flag, +				 struct dma_attrs *attrs)  {  	unsigned long dma_mask;  	struct page *page; diff --git a/arch/x86/kernel/pci-nommu.c b/arch/x86/kernel/pci-nommu.c index 3af4af810c0..f96050685b4 100644 --- a/arch/x86/kernel/pci-nommu.c +++ b/arch/x86/kernel/pci-nommu.c @@ -75,7 +75,7 @@ static int nommu_map_sg(struct device *hwdev, struct scatterlist *sg,  }  static void nommu_free_coherent(struct device *dev, size_t size, void *vaddr, -				dma_addr_t dma_addr) +				dma_addr_t dma_addr, struct dma_attrs *attrs)  {  	free_pages((unsigned long)vaddr, get_order(size));  } @@ -96,8 +96,8 @@ static void nommu_sync_sg_for_device(struct device *dev,  }  struct dma_map_ops nommu_dma_ops = { -	.alloc_coherent		= dma_generic_alloc_coherent, -	.free_coherent		= nommu_free_coherent, +	.alloc			= dma_generic_alloc_coherent, +	.free			= nommu_free_coherent,  	.map_sg			= nommu_map_sg,  	.map_page		= nommu_map_page,  	.sync_single_for_device = nommu_sync_single_for_device, diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c index 8f972cbddef..6c483ba98b9 100644 --- a/arch/x86/kernel/pci-swiotlb.c +++ b/arch/x86/kernel/pci-swiotlb.c @@ -15,21 +15,30 @@  int swiotlb __read_mostly;  static void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size, -					dma_addr_t *dma_handle, gfp_t flags) +					dma_addr_t *dma_handle, gfp_t flags, +					struct dma_attrs *attrs)  {  	void *vaddr; -	vaddr = dma_generic_alloc_coherent(hwdev, size, dma_handle, flags); +	vaddr = dma_generic_alloc_coherent(hwdev, size, dma_handle, flags, +					   attrs);  	if (vaddr)  		return vaddr;  	return swiotlb_alloc_coherent(hwdev, size, dma_handle, flags);  } +static void x86_swiotlb_free_coherent(struct device *dev, size_t size, +				      void *vaddr, dma_addr_t dma_addr, +				      struct dma_attrs *attrs) +{ +	swiotlb_free_coherent(dev, size, vaddr, dma_addr); +} +  static struct dma_map_ops swiotlb_dma_ops = {  	.mapping_error = swiotlb_dma_mapping_error, -	.alloc_coherent = x86_swiotlb_alloc_coherent, -	.free_coherent = swiotlb_free_coherent, +	.alloc = x86_swiotlb_alloc_coherent, +	.free = x86_swiotlb_free_coherent,  	.sync_single_for_cpu = swiotlb_sync_single_for_cpu,  	.sync_single_for_device = swiotlb_sync_single_for_device,  	.sync_sg_for_cpu = swiotlb_sync_sg_for_cpu, diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index a33afaa5ddb..1d92a5ab6e8 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c @@ -362,34 +362,10 @@ void (*pm_idle)(void);  EXPORT_SYMBOL(pm_idle);  #endif -#ifdef CONFIG_X86_32 -/* - * This halt magic was a workaround for ancient floppy DMA - * wreckage. It should be safe to remove. - */ -static int hlt_counter; -void disable_hlt(void) -{ -	hlt_counter++; -} -EXPORT_SYMBOL(disable_hlt); - -void enable_hlt(void) -{ -	hlt_counter--; -} -EXPORT_SYMBOL(enable_hlt); - -static inline int hlt_use_halt(void) -{ -	return (!hlt_counter && boot_cpu_data.hlt_works_ok); -} -#else  static inline int hlt_use_halt(void)  {  	return 1;  } -#endif  #ifndef CONFIG_SMP  static inline void play_dead(void) diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 733ca39f367..43d8b48b23e 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -423,6 +423,7 @@ void set_personality_ia32(bool x32)  		current_thread_info()->status |= TS_COMPAT;  	}  } +EXPORT_SYMBOL_GPL(set_personality_ia32);  unsigned long get_wchan(struct task_struct *p)  { diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c index 71f4727da37..5a98aa27218 100644 --- a/arch/x86/kernel/setup_percpu.c +++ b/arch/x86/kernel/setup_percpu.c @@ -185,10 +185,22 @@ void __init setup_per_cpu_areas(void)  #endif  	rc = -EINVAL;  	if (pcpu_chosen_fc != PCPU_FC_PAGE) { -		const size_t atom_size = cpu_has_pse ? PMD_SIZE : PAGE_SIZE;  		const size_t dyn_size = PERCPU_MODULE_RESERVE +  			PERCPU_DYNAMIC_RESERVE - PERCPU_FIRST_CHUNK_RESERVE; +		size_t atom_size; +		/* +		 * On 64bit, use PMD_SIZE for atom_size so that embedded +		 * percpu areas are aligned to PMD.  This, in the future, +		 * can also allow using PMD mappings in vmalloc area.  Use +		 * PAGE_SIZE on 32bit as vmalloc space is highly contended +		 * and large vmalloc area allocs can easily fail. +		 */ +#ifdef CONFIG_X86_64 +		atom_size = PMD_SIZE; +#else +		atom_size = PAGE_SIZE; +#endif  		rc = pcpu_embed_first_chunk(PERCPU_FIRST_CHUNK_RESERVE,  					    dyn_size, atom_size,  					    pcpu_cpu_distance, diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c index f386dc49f98..7515cf0e180 100644 --- a/arch/x86/kernel/vsyscall_64.c +++ b/arch/x86/kernel/vsyscall_64.c @@ -216,9 +216,9 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)  	current_thread_info()->sig_on_uaccess_error = 1;  	/* -	 * 0 is a valid user pointer (in the access_ok sense) on 32-bit and +	 * NULL is a valid user pointer (in the access_ok sense) on 32-bit and  	 * 64-bit, so we don't need to special-case it here.  For all the -	 * vsyscalls, 0 means "don't write anything" not "write it at +	 * vsyscalls, NULL means "don't write anything" not "write it at  	 * address 0".  	 */  	ret = -EFAULT; @@ -247,7 +247,7 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)  		ret = sys_getcpu((unsigned __user *)regs->di,  				 (unsigned __user *)regs->si, -				 0); +				 NULL);  		break;  	} diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c index e9f265fd79a..9cf71d0b2d3 100644 --- a/arch/x86/kernel/x86_init.c +++ b/arch/x86/kernel/x86_init.c @@ -93,7 +93,6 @@ struct x86_init_ops x86_init __initdata = {  struct x86_cpuinit_ops x86_cpuinit __cpuinitdata = {  	.early_percpu_clock_init	= x86_init_noop,  	.setup_percpu_clockev		= setup_secondary_APIC_clock, -	.fixup_cpu_id			= x86_default_fixup_cpu_id,  };  static void default_nmi_init(void) { };  |