diff options
Diffstat (limited to 'arch/arm')
144 files changed, 7679 insertions, 823 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index b934d90a61e..09238c83e6d 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -36,7 +36,6 @@ config ARM  	select HAVE_GENERIC_HARDIRQS  	select HAVE_HW_BREAKPOINT if (PERF_EVENTS && (CPU_V6 || CPU_V6K || CPU_V7))  	select HAVE_IDE if PCI || ISA || PCMCIA -	select HAVE_IRQ_WORK  	select HAVE_KERNEL_GZIP  	select HAVE_KERNEL_LZMA  	select HAVE_KERNEL_LZO @@ -1536,7 +1535,6 @@ config SMP  config SMP_ON_UP  	bool "Allow booting SMP kernel on uniprocessor systems (EXPERIMENTAL)" -	depends on EXPERIMENTAL  	depends on SMP && !XIP_KERNEL  	default y  	help @@ -1625,6 +1623,16 @@ config HOTPLUG_CPU  	  Say Y here to experiment with turning CPUs off and on.  CPUs  	  can be controlled through /sys/devices/system/cpu. +config ARM_PSCI +	bool "Support for the ARM Power State Coordination Interface (PSCI)" +	depends on CPU_V7 +	help +	  Say Y here if you want Linux to communicate with system firmware +	  implementing the PSCI specification for CPU-centric power +	  management operations described in ARM document number ARM DEN +	  0022A ("Power State Coordination Interface System Software on +	  ARM processors"). +  config LOCAL_TIMERS  	bool "Use local timer interrupts"  	depends on SMP @@ -1642,7 +1650,7 @@ config ARCH_NR_GPIO  	default 355 if ARCH_U8500  	default 264 if MACH_H4700  	default 512 if SOC_OMAP5 -	default 288 if ARCH_VT8500 +	default 288 if ARCH_VT8500 || ARCH_SUNXI  	default 0  	help  	  Maximum number of GPIOs in the system. @@ -1660,6 +1668,9 @@ config HZ  	default SHMOBILE_TIMER_HZ if ARCH_SHMOBILE  	default 100 +config SCHED_HRTICK +	def_bool HIGH_RES_TIMERS +  config THUMB2_KERNEL  	bool "Compile the kernel in Thumb-2 mode"  	depends on CPU_V7 && !CPU_V6 && !CPU_V6K @@ -1724,7 +1735,7 @@ config AEABI  config OABI_COMPAT  	bool "Allow old ABI binaries to run with this kernel (EXPERIMENTAL)" -	depends on AEABI && EXPERIMENTAL && !THUMB2_KERNEL +	depends on AEABI && !THUMB2_KERNEL  	default y  	help  	  This option preserves the old syscall interface along with the @@ -1848,7 +1859,6 @@ config SECCOMP  config CC_STACKPROTECTOR  	bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)" -	depends on EXPERIMENTAL  	help  	  This option turns on the -fstack-protector GCC feature. This  	  feature puts, at the beginning of functions, a canary value on @@ -1865,7 +1875,7 @@ config XEN_DOM0  config XEN  	bool "Xen guest support on ARM (EXPERIMENTAL)" -	depends on EXPERIMENTAL && ARM && OF +	depends on ARM && OF  	depends on CPU_V7 && !CPU_V6  	help  	  Say Y if you want to run Linux in a Virtual Machine on Xen on ARM. @@ -1934,7 +1944,7 @@ config ZBOOT_ROM  choice  	prompt "Include SD/MMC loader in zImage (EXPERIMENTAL)" -	depends on ZBOOT_ROM && ARCH_SH7372 && EXPERIMENTAL +	depends on ZBOOT_ROM && ARCH_SH7372  	default ZBOOT_ROM_NONE  	help  	  Include experimental SD/MMC loading code in the ROM-able zImage. @@ -1963,7 +1973,7 @@ endchoice  config ARM_APPENDED_DTB  	bool "Use appended device tree blob to zImage (EXPERIMENTAL)" -	depends on OF && !ZBOOT_ROM && EXPERIMENTAL +	depends on OF && !ZBOOT_ROM  	help  	  With this option, the boot code will look for a device tree binary  	  (DTB) appended to zImage @@ -2081,7 +2091,7 @@ config XIP_PHYS_ADDR  config KEXEC  	bool "Kexec system call (EXPERIMENTAL)" -	depends on EXPERIMENTAL && (!SMP || HOTPLUG_CPU) +	depends on (!SMP || HOTPLUG_CPU)  	help  	  kexec is a system call that implements the ability to shutdown your  	  current kernel, and to start another kernel.  It is like a reboot @@ -2103,7 +2113,6 @@ config ATAGS_PROC  config CRASH_DUMP  	bool "Build kdump crash kernel (EXPERIMENTAL)" -	depends on EXPERIMENTAL  	help  	  Generate crash dump after being started by kexec. This should  	  be normally only set in special crash dump kernels which are @@ -2170,7 +2179,7 @@ config CPU_FREQ_S3C  config CPU_FREQ_S3C24XX  	bool "CPUfreq driver for Samsung S3C24XX series CPUs (EXPERIMENTAL)" -	depends on ARCH_S3C24XX && CPU_FREQ && EXPERIMENTAL +	depends on ARCH_S3C24XX && CPU_FREQ  	select CPU_FREQ_S3C  	help  	  This enables the CPUfreq driver for the Samsung S3C24XX family @@ -2182,7 +2191,7 @@ config CPU_FREQ_S3C24XX  config CPU_FREQ_S3C24XX_PLL  	bool "Support CPUfreq changing of PLL frequency (EXPERIMENTAL)" -	depends on CPU_FREQ_S3C24XX && EXPERIMENTAL +	depends on CPU_FREQ_S3C24XX  	help  	  Compile in support for changing the PLL frequency from the  	  S3C24XX series CPUfreq driver. The PLL takes time to settle @@ -2245,7 +2254,7 @@ config FPE_NWFPE_XP  config FPE_FASTFPE  	bool "FastFPE math emulation (EXPERIMENTAL)" -	depends on (!AEABI || OABI_COMPAT) && !CPU_32v3 && EXPERIMENTAL +	depends on (!AEABI || OABI_COMPAT) && !CPU_32v3  	---help---  	  Say Y here to include the FAST floating point emulator in the kernel.  	  This is an experimental much faster emulator which now also has full @@ -2327,3 +2336,5 @@ source "security/Kconfig"  source "crypto/Kconfig"  source "lib/Kconfig" + +source "arch/arm/kvm/Kconfig" diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug index 661030d6bc6..fc2a591e167 100644 --- a/arch/arm/Kconfig.debug +++ b/arch/arm/Kconfig.debug @@ -32,7 +32,7 @@ config FRAME_POINTER  config ARM_UNWIND  	bool "Enable stack unwinding support (EXPERIMENTAL)" -	depends on AEABI && EXPERIMENTAL +	depends on AEABI  	default y  	help  	  This option enables stack unwinding support in the kernel diff --git a/arch/arm/Makefile b/arch/arm/Makefile index 30c443c406f..4bcd2d6b053 100644 --- a/arch/arm/Makefile +++ b/arch/arm/Makefile @@ -252,6 +252,7 @@ core-$(CONFIG_FPE_NWFPE)	+= arch/arm/nwfpe/  core-$(CONFIG_FPE_FASTFPE)	+= $(FASTFPE_OBJ)  core-$(CONFIG_VFP)		+= arch/arm/vfp/  core-$(CONFIG_XEN)		+= arch/arm/xen/ +core-$(CONFIG_KVM_ARM_HOST) 	+= arch/arm/kvm/  # If we have a machine-specific directory, then include it in the build.  core-y				+= arch/arm/kernel/ arch/arm/mm/ arch/arm/common/ diff --git a/arch/arm/boot/dts/dbx5x0.dtsi b/arch/arm/boot/dts/dbx5x0.dtsi index 63f2fbcfe81..69140ba99f4 100644 --- a/arch/arm/boot/dts/dbx5x0.dtsi +++ b/arch/arm/boot/dts/dbx5x0.dtsi @@ -170,10 +170,9 @@  			gpio-bank = <8>;  		}; -		pinctrl@80157000 { -			// This is actually the PRCMU base address -			reg = <0x80157000 0x2000>; -			compatible = "stericsson,nmk_pinctrl"; +		pinctrl { +			compatible = "stericsson,nmk-pinctrl"; +			prcm = <&prcmu>;  		};  		usb@a03e0000 { @@ -190,9 +189,10 @@  			interrupts = <0 25 0x4>;  		}; -		prcmu@80157000 { +		prcmu: prcmu@80157000 {  			compatible = "stericsson,db8500-prcmu";  			reg = <0x80157000 0x1000>; +			reg-names = "prcmu";  			interrupts = <0 47 0x4>;  			#address-cells = <1>;  			#size-cells = <1>; diff --git a/arch/arm/boot/dts/exynos5250-smdk5250.dts b/arch/arm/boot/dts/exynos5250-smdk5250.dts index e05b18f3c33..4db9db0a844 100644 --- a/arch/arm/boot/dts/exynos5250-smdk5250.dts +++ b/arch/arm/boot/dts/exynos5250-smdk5250.dts @@ -49,6 +49,11 @@  			compatible = "samsung,s524ad0xd1";  			reg = <0x51>;  		}; + +		wm8994: wm8994@1a { +			 compatible = "wlf,wm8994"; +			 reg = <0x1a>; +		};  	};  	i2c@121D0000 { @@ -204,4 +209,25 @@  		samsung,mfc-r = <0x43000000 0x800000>;  		samsung,mfc-l = <0x51000000 0x800000>;  	}; + +	i2s0: i2s@03830000 { +		gpios = <&gpz 0 2 0 0>, <&gpz 1 2 0 0>, <&gpz 2 2 0 0>, +			<&gpz 3 2 0 0>, <&gpz 4 2 0 0>, <&gpz 5 2 0 0>, +			<&gpz 6 2 0 0>; +	}; + +	i2s1: i2s@12D60000 { +		status = "disabled"; +	}; + +	i2s2: i2s@12D70000 { +		status = "disabled"; +	}; + +	sound { +		compatible = "samsung,smdk-wm8994"; + +		samsung,i2s-controller = <&i2s0>; +		samsung,audio-codec = <&wm8994>; +	};  }; diff --git a/arch/arm/boot/dts/exynos5250.dtsi b/arch/arm/boot/dts/exynos5250.dtsi index 3acf594ea60..f50b4e85435 100644 --- a/arch/arm/boot/dts/exynos5250.dtsi +++ b/arch/arm/boot/dts/exynos5250.dtsi @@ -211,8 +211,9 @@  		compatible = "samsung,exynos4210-spi";  		reg = <0x12d20000 0x100>;  		interrupts = <0 66 0>; -		tx-dma-channel = <&pdma0 5>; /* preliminary */ -		rx-dma-channel = <&pdma0 4>; /* preliminary */ +		dmas = <&pdma0 5 +			&pdma0 4>; +		dma-names = "tx", "rx";  		#address-cells = <1>;  		#size-cells = <0>;  	}; @@ -221,8 +222,9 @@  		compatible = "samsung,exynos4210-spi";  		reg = <0x12d30000 0x100>;  		interrupts = <0 67 0>; -		tx-dma-channel = <&pdma1 5>; /* preliminary */ -		rx-dma-channel = <&pdma1 4>; /* preliminary */ +		dmas = <&pdma1 5 +			&pdma1 4>; +		dma-names = "tx", "rx";  		#address-cells = <1>;  		#size-cells = <0>;  	}; @@ -231,8 +233,9 @@  		compatible = "samsung,exynos4210-spi";  		reg = <0x12d40000 0x100>;  		interrupts = <0 68 0>; -		tx-dma-channel = <&pdma0 7>; /* preliminary */ -		rx-dma-channel = <&pdma0 6>; /* preliminary */ +		dmas = <&pdma0 7 +			&pdma0 6>; +		dma-names = "tx", "rx";  		#address-cells = <1>;  		#size-cells = <0>;  	}; @@ -269,6 +272,35 @@  		#size-cells = <0>;  	}; +	i2s0: i2s@03830000 { +		compatible = "samsung,i2s-v5"; +		reg = <0x03830000 0x100>; +		dmas = <&pdma0 10 +			&pdma0 9 +			&pdma0 8>; +		dma-names = "tx", "rx", "tx-sec"; +		samsung,supports-6ch; +		samsung,supports-rstclr; +		samsung,supports-secdai; +		samsung,idma-addr = <0x03000000>; +	}; + +	i2s1: i2s@12D60000 { +		compatible = "samsung,i2s-v5"; +		reg = <0x12D60000 0x100>; +		dmas = <&pdma1 12 +			&pdma1 11>; +		dma-names = "tx", "rx"; +	}; + +	i2s2: i2s@12D70000 { +		compatible = "samsung,i2s-v5"; +		reg = <0x12D70000 0x100>; +		dmas = <&pdma0 12 +			&pdma0 11>; +		dma-names = "tx", "rx"; +	}; +  	amba {  		#address-cells = <1>;  		#size-cells = <1>; diff --git a/arch/arm/boot/dts/highbank.dts b/arch/arm/boot/dts/highbank.dts index 5927a8df562..6aad34ad951 100644 --- a/arch/arm/boot/dts/highbank.dts +++ b/arch/arm/boot/dts/highbank.dts @@ -37,6 +37,16 @@  			next-level-cache = <&L2>;  			clocks = <&a9pll>;  			clock-names = "cpu"; +			operating-points = < +				/* kHz    ignored */ +				 1300000  1000000 +				 1200000  1000000 +				 1100000  1000000 +				  800000  1000000 +				  400000  1000000 +				  200000  1000000 +			>; +			clock-latency = <100000>;  		};  		cpu@901 { diff --git a/arch/arm/boot/dts/imx23.dtsi b/arch/arm/boot/dts/imx23.dtsi index 65415c598a5..56afcf41aae 100644 --- a/arch/arm/boot/dts/imx23.dtsi +++ b/arch/arm/boot/dts/imx23.dtsi @@ -391,7 +391,9 @@  			};  			lradc@80050000 { +				compatible = "fsl,imx23-lradc";  				reg = <0x80050000 0x2000>; +				interrupts = <36 37 38 39 40 41 42 43 44>;  				status = "disabled";  			}; diff --git a/arch/arm/boot/dts/prima2.dtsi b/arch/arm/boot/dts/prima2.dtsi index 055fca54212..3329719a941 100644 --- a/arch/arm/boot/dts/prima2.dtsi +++ b/arch/arm/boot/dts/prima2.dtsi @@ -58,10 +58,11 @@  			#size-cells = <1>;  			ranges = <0x88000000 0x88000000 0x40000>; -			clock-controller@88000000 { +			clks: clock-controller@88000000 {  				compatible = "sirf,prima2-clkc";  				reg = <0x88000000 0x1000>;  				interrupts = <3>; +				#clock-cells = <1>;  			};  			reset-controller@88010000 { @@ -85,6 +86,7 @@  				compatible = "sirf,prima2-memc";  				reg = <0x90000000 0x10000>;  				interrupts = <27>; +				clocks = <&clks 5>;  			};  		}; @@ -104,6 +106,7 @@  				compatible = "sirf,prima2-vpp";  				reg = <0x90020000 0x10000>;  				interrupts = <31>; +				clocks = <&clks 35>;  			};  		}; @@ -117,6 +120,7 @@  				compatible = "powervr,sgx531";  				reg = <0x98000000 0x8000000>;  				interrupts = <6>; +				clocks = <&clks 32>;  			};  		}; @@ -130,6 +134,7 @@  				compatible = "sirf,prima2-video-codec";  				reg = <0xa0000000 0x8000000>;  				interrupts = <5>; +				clocks = <&clks 33>;  			};  		}; @@ -149,12 +154,14 @@  				compatible = "sirf,prima2-gps";  				reg = <0xa8010000 0x10000>;  				interrupts = <7>; +				clocks = <&clks 9>;  			};  			dsp@a9000000 {  				compatible = "sirf,prima2-dsp";  				reg = <0xa9000000 0x1000000>;  				interrupts = <8>; +				clocks = <&clks 8>;  			};  		}; @@ -174,12 +181,14 @@  				compatible = "sirf,prima2-nand";  				reg = <0xb0030000 0x10000>;  				interrupts = <41>; +				clocks = <&clks 26>;  			};  			audio@b0040000 {  				compatible = "sirf,prima2-audio";  				reg = <0xb0040000 0x10000>;  				interrupts = <35>; +				clocks = <&clks 27>;  			};  			uart0: uart@b0050000 { @@ -187,6 +196,7 @@  				compatible = "sirf,prima2-uart";  				reg = <0xb0050000 0x10000>;  				interrupts = <17>; +				clocks = <&clks 13>;  			};  			uart1: uart@b0060000 { @@ -194,6 +204,7 @@  				compatible = "sirf,prima2-uart";  				reg = <0xb0060000 0x10000>;  				interrupts = <18>; +				clocks = <&clks 14>;  			};  			uart2: uart@b0070000 { @@ -201,6 +212,7 @@  				compatible = "sirf,prima2-uart";  				reg = <0xb0070000 0x10000>;  				interrupts = <19>; +				clocks = <&clks 15>;  			};  			usp0: usp@b0080000 { @@ -208,6 +220,7 @@  				compatible = "sirf,prima2-usp";  				reg = <0xb0080000 0x10000>;  				interrupts = <20>; +				clocks = <&clks 28>;  			};  			usp1: usp@b0090000 { @@ -215,6 +228,7 @@  				compatible = "sirf,prima2-usp";  				reg = <0xb0090000 0x10000>;  				interrupts = <21>; +				clocks = <&clks 29>;  			};  			usp2: usp@b00a0000 { @@ -222,6 +236,7 @@  				compatible = "sirf,prima2-usp";  				reg = <0xb00a0000 0x10000>;  				interrupts = <22>; +				clocks = <&clks 30>;  			};  			dmac0: dma-controller@b00b0000 { @@ -229,6 +244,7 @@  				compatible = "sirf,prima2-dmac";  				reg = <0xb00b0000 0x10000>;  				interrupts = <12>; +				clocks = <&clks 24>;  			};  			dmac1: dma-controller@b0160000 { @@ -236,11 +252,13 @@  				compatible = "sirf,prima2-dmac";  				reg = <0xb0160000 0x10000>;  				interrupts = <13>; +				clocks = <&clks 25>;  			};  			vip@b00C0000 {  				compatible = "sirf,prima2-vip";  				reg = <0xb00C0000 0x10000>; +				clocks = <&clks 31>;  			};  			spi0: spi@b00d0000 { @@ -248,6 +266,7 @@  				compatible = "sirf,prima2-spi";  				reg = <0xb00d0000 0x10000>;  				interrupts = <15>; +				clocks = <&clks 19>;  			};  			spi1: spi@b0170000 { @@ -255,6 +274,7 @@  				compatible = "sirf,prima2-spi";  				reg = <0xb0170000 0x10000>;  				interrupts = <16>; +				clocks = <&clks 20>;  			};  			i2c0: i2c@b00e0000 { @@ -262,6 +282,7 @@  				compatible = "sirf,prima2-i2c";  				reg = <0xb00e0000 0x10000>;  				interrupts = <24>; +				clocks = <&clks 17>;  			};  			i2c1: i2c@b00f0000 { @@ -269,12 +290,14 @@  				compatible = "sirf,prima2-i2c";  				reg = <0xb00f0000 0x10000>;  				interrupts = <25>; +				clocks = <&clks 18>;  			};  			tsc@b0110000 {  				compatible = "sirf,prima2-tsc";  				reg = <0xb0110000 0x10000>;  				interrupts = <33>; +				clocks = <&clks 16>;  			};  			gpio: pinctrl@b0120000 { @@ -507,17 +530,20 @@  			pwm@b0130000 {  				compatible = "sirf,prima2-pwm";  				reg = <0xb0130000 0x10000>; +				clocks = <&clks 21>;  			};  			efusesys@b0140000 {  				compatible = "sirf,prima2-efuse";  				reg = <0xb0140000 0x10000>; +				clocks = <&clks 22>;  			};  			pulsec@b0150000 {  				compatible = "sirf,prima2-pulsec";  				reg = <0xb0150000 0x10000>;  				interrupts = <48>; +				clocks = <&clks 23>;  			};  			pci-iobg { @@ -616,12 +642,14 @@  				compatible = "chipidea,ci13611a-prima2";  				reg = <0xb8000000 0x10000>;  				interrupts = <10>; +				clocks = <&clks 40>;  			};  			usb1: usb@b00f0000 {  				compatible = "chipidea,ci13611a-prima2";  				reg = <0xb8010000 0x10000>;  				interrupts = <11>; +				clocks = <&clks 41>;  			};  			sata@b00f0000 { @@ -634,6 +662,7 @@  				compatible = "sirf,prima2-security";  				reg = <0xb8030000 0x10000>;  				interrupts = <42>; +				clocks = <&clks 7>;  			};  		};  	}; diff --git a/arch/arm/boot/dts/sun4i-a10.dtsi b/arch/arm/boot/dts/sun4i-a10.dtsi index e61fdd47bd0..f99f60dadf5 100644 --- a/arch/arm/boot/dts/sun4i-a10.dtsi +++ b/arch/arm/boot/dts/sun4i-a10.dtsi @@ -16,4 +16,34 @@  	memory {  		reg = <0x40000000 0x80000000>;  	}; + +	soc { +		pinctrl@01c20800 { +			compatible = "allwinner,sun4i-a10-pinctrl"; +			reg = <0x01c20800 0x400>; +			#address-cells = <1>; +			#size-cells = <0>; + +			uart0_pins_a: uart0@0 { +				allwinner,pins = "PB22", "PB23"; +				allwinner,function = "uart0"; +				allwinner,drive = <0>; +				allwinner,pull = <0>; +			}; + +			uart0_pins_b: uart0@1 { +				allwinner,pins = "PF2", "PF4"; +				allwinner,function = "uart0"; +				allwinner,drive = <0>; +				allwinner,pull = <0>; +			}; + +			uart1_pins_a: uart1@0 { +				allwinner,pins = "PA10", "PA11"; +				allwinner,function = "uart1"; +				allwinner,drive = <0>; +				allwinner,pull = <0>; +			}; +		}; +	};  }; diff --git a/arch/arm/boot/dts/sun5i-a13-olinuxino.dts b/arch/arm/boot/dts/sun5i-a13-olinuxino.dts index 498a091a4ea..4a1e45d4aac 100644 --- a/arch/arm/boot/dts/sun5i-a13-olinuxino.dts +++ b/arch/arm/boot/dts/sun5i-a13-olinuxino.dts @@ -24,6 +24,8 @@  	soc {  		uart1: uart@01c28400 { +			pinctrl-names = "default"; +			pinctrl-0 = <&uart1_pins_b>;  			status = "okay";  		};  	}; diff --git a/arch/arm/boot/dts/sun5i-a13.dtsi b/arch/arm/boot/dts/sun5i-a13.dtsi index 59a2d265a98..e1121890fb2 100644 --- a/arch/arm/boot/dts/sun5i-a13.dtsi +++ b/arch/arm/boot/dts/sun5i-a13.dtsi @@ -17,4 +17,27 @@  	memory {  		reg = <0x40000000 0x20000000>;  	}; + +	soc { +		pinctrl@01c20800 { +			compatible = "allwinner,sun5i-a13-pinctrl"; +			reg = <0x01c20800 0x400>; +			#address-cells = <1>; +			#size-cells = <0>; + +			uart1_pins_a: uart1@0 { +				allwinner,pins = "PE10", "PE11"; +				allwinner,function = "uart1"; +				allwinner,drive = <0>; +				allwinner,pull = <0>; +			}; + +			uart1_pins_b: uart1@1 { +				allwinner,pins = "PG3", "PG4"; +				allwinner,function = "uart1"; +				allwinner,drive = <0>; +				allwinner,pull = <0>; +			}; +		}; +	};  }; diff --git a/arch/arm/boot/dts/vt8500.dtsi b/arch/arm/boot/dts/vt8500.dtsi index d8645e990b2..cf31ced4660 100644 --- a/arch/arm/boot/dts/vt8500.dtsi +++ b/arch/arm/boot/dts/vt8500.dtsi @@ -45,6 +45,38 @@  					compatible = "fixed-clock";  					clock-frequency = <24000000>;  				}; + +				clkuart0: uart0 { +					#clock-cells = <0>; +					compatible = "via,vt8500-device-clock"; +					clocks = <&ref24>; +					enable-reg = <0x250>; +					enable-bit = <1>; +				}; + +				clkuart1: uart1 { +					#clock-cells = <0>; +					compatible = "via,vt8500-device-clock"; +					clocks = <&ref24>; +					enable-reg = <0x250>; +					enable-bit = <2>; +				}; + +				clkuart2: uart2 { +					#clock-cells = <0>; +					compatible = "via,vt8500-device-clock"; +					clocks = <&ref24>; +					enable-reg = <0x250>; +					enable-bit = <3>; +				}; + +				clkuart3: uart3 { +					#clock-cells = <0>; +					compatible = "via,vt8500-device-clock"; +					clocks = <&ref24>; +					enable-reg = <0x250>; +					enable-bit = <4>; +				};  			};  		}; @@ -83,28 +115,28 @@  			compatible = "via,vt8500-uart";  			reg = <0xd8200000 0x1040>;  			interrupts = <32>; -			clocks = <&ref24>; +			clocks = <&clkuart0>;  		};  		uart@d82b0000 {  			compatible = "via,vt8500-uart";  			reg = <0xd82b0000 0x1040>;  			interrupts = <33>; -			clocks = <&ref24>; +			clocks = <&clkuart1>;  		};  		uart@d8210000 {  			compatible = "via,vt8500-uart";  			reg = <0xd8210000 0x1040>;  			interrupts = <47>; -			clocks = <&ref24>; +			clocks = <&clkuart2>;  		};  		uart@d82c0000 {  			compatible = "via,vt8500-uart";  			reg = <0xd82c0000 0x1040>;  			interrupts = <50>; -			clocks = <&ref24>; +			clocks = <&clkuart3>;  		};  		rtc@d8100000 { diff --git a/arch/arm/boot/dts/wm8505.dtsi b/arch/arm/boot/dts/wm8505.dtsi index 330f833ac3b..e74a1c0fb9a 100644 --- a/arch/arm/boot/dts/wm8505.dtsi +++ b/arch/arm/boot/dts/wm8505.dtsi @@ -59,6 +59,54 @@  					compatible = "fixed-clock";  					clock-frequency = <24000000>;  				}; + +				clkuart0: uart0 { +					#clock-cells = <0>; +					compatible = "via,vt8500-device-clock"; +					clocks = <&ref24>; +					enable-reg = <0x250>; +					enable-bit = <1>; +				}; + +				clkuart1: uart1 { +					#clock-cells = <0>; +					compatible = "via,vt8500-device-clock"; +					clocks = <&ref24>; +					enable-reg = <0x250>; +					enable-bit = <2>; +				}; + +				clkuart2: uart2 { +					#clock-cells = <0>; +					compatible = "via,vt8500-device-clock"; +					clocks = <&ref24>; +					enable-reg = <0x250>; +					enable-bit = <3>; +				}; + +				clkuart3: uart3 { +					#clock-cells = <0>; +					compatible = "via,vt8500-device-clock"; +					clocks = <&ref24>; +					enable-reg = <0x250>; +					enable-bit = <4>; +				}; + +				clkuart4: uart4 { +					#clock-cells = <0>; +					compatible = "via,vt8500-device-clock"; +					clocks = <&ref24>; +					enable-reg = <0x250>; +					enable-bit = <22>; +				}; + +				clkuart5: uart5 { +					#clock-cells = <0>; +					compatible = "via,vt8500-device-clock"; +					clocks = <&ref24>; +					enable-reg = <0x250>; +					enable-bit = <23>; +				};  			};  		}; @@ -96,42 +144,42 @@  			compatible = "via,vt8500-uart";  			reg = <0xd8200000 0x1040>;  			interrupts = <32>; -			clocks = <&ref24>; +			clocks = <&clkuart0>;  		};  		uart@d82b0000 {  			compatible = "via,vt8500-uart";  			reg = <0xd82b0000 0x1040>;  			interrupts = <33>; -			clocks = <&ref24>; +			clocks = <&clkuart1>;  		};  		uart@d8210000 {  			compatible = "via,vt8500-uart";  			reg = <0xd8210000 0x1040>;  			interrupts = <47>; -			clocks = <&ref24>; +			clocks = <&clkuart2>;  		};  		uart@d82c0000 {  			compatible = "via,vt8500-uart";  			reg = <0xd82c0000 0x1040>;  			interrupts = <50>; -			clocks = <&ref24>; +			clocks = <&clkuart3>;  		};  		uart@d8370000 {  			compatible = "via,vt8500-uart";  			reg = <0xd8370000 0x1040>;  			interrupts = <31>; -			clocks = <&ref24>; +			clocks = <&clkuart4>;  		};  		uart@d8380000 {  			compatible = "via,vt8500-uart";  			reg = <0xd8380000 0x1040>;  			interrupts = <30>; -			clocks = <&ref24>; +			clocks = <&clkuart5>;  		};  		rtc@d8100000 { diff --git a/arch/arm/boot/dts/wm8650.dtsi b/arch/arm/boot/dts/wm8650.dtsi index 83b9467559b..db3c0a12e05 100644 --- a/arch/arm/boot/dts/wm8650.dtsi +++ b/arch/arm/boot/dts/wm8650.dtsi @@ -75,6 +75,22 @@  					reg = <0x204>;  				}; +				clkuart0: uart0 { + 					#clock-cells = <0>; + 					compatible = "via,vt8500-device-clock"; +					clocks = <&ref24>; +					enable-reg = <0x250>; +					enable-bit = <1>; + 				}; + +				clkuart1: uart1 { +					#clock-cells = <0>; +					compatible = "via,vt8500-device-clock"; +					clocks = <&ref24>; +					enable-reg = <0x250>; +					enable-bit = <2>; +				}; +  				arm: arm {  					#clock-cells = <0>;  					compatible = "via,vt8500-device-clock"; @@ -128,14 +144,14 @@  			compatible = "via,vt8500-uart";  			reg = <0xd8200000 0x1040>;  			interrupts = <32>; -			clocks = <&ref24>; +			clocks = <&clkuart0>;  		};  		uart@d82b0000 {  			compatible = "via,vt8500-uart";  			reg = <0xd82b0000 0x1040>;  			interrupts = <33>; -			clocks = <&ref24>; +			clocks = <&clkuart1>;  		};  		rtc@d8100000 { diff --git a/arch/arm/boot/dts/zynq-7000.dtsi b/arch/arm/boot/dts/zynq-7000.dtsi index 401c1262d4e..5914b565459 100644 --- a/arch/arm/boot/dts/zynq-7000.dtsi +++ b/arch/arm/boot/dts/zynq-7000.dtsi @@ -44,14 +44,14 @@  			compatible = "xlnx,xuartps";  			reg = <0xE0000000 0x1000>;  			interrupts = <0 27 4>; -			clock = <50000000>; +			clocks = <&uart_clk 0>;  		};  		uart1: uart@e0001000 {  			compatible = "xlnx,xuartps";  			reg = <0xE0001000 0x1000>;  			interrupts = <0 50 4>; -			clock = <50000000>; +			clocks = <&uart_clk 1>;  		};  		slcr: slcr@f8000000 { diff --git a/arch/arm/common/gic.c b/arch/arm/common/gic.c index 36ae03a3f5d..87dfa9026c5 100644 --- a/arch/arm/common/gic.c +++ b/arch/arm/common/gic.c @@ -351,6 +351,25 @@ void __init gic_cascade_irq(unsigned int gic_nr, unsigned int irq)  	irq_set_chained_handler(irq, gic_handle_cascade_irq);  } +static u8 gic_get_cpumask(struct gic_chip_data *gic) +{ +	void __iomem *base = gic_data_dist_base(gic); +	u32 mask, i; + +	for (i = mask = 0; i < 32; i += 4) { +		mask = readl_relaxed(base + GIC_DIST_TARGET + i); +		mask |= mask >> 16; +		mask |= mask >> 8; +		if (mask) +			break; +	} + +	if (!mask) +		pr_crit("GIC CPU mask not found - kernel will fail to boot.\n"); + +	return mask; +} +  static void __init gic_dist_init(struct gic_chip_data *gic)  {  	unsigned int i; @@ -369,7 +388,9 @@ static void __init gic_dist_init(struct gic_chip_data *gic)  	/*  	 * Set all global interrupts to this CPU only.  	 */ -	cpumask = readl_relaxed(base + GIC_DIST_TARGET + 0); +	cpumask = gic_get_cpumask(gic); +	cpumask |= cpumask << 8; +	cpumask |= cpumask << 16;  	for (i = 32; i < gic_irqs; i += 4)  		writel_relaxed(cpumask, base + GIC_DIST_TARGET + i * 4 / 4); @@ -400,7 +421,7 @@ static void __cpuinit gic_cpu_init(struct gic_chip_data *gic)  	 * Get what the GIC says our CPU mask is.  	 */  	BUG_ON(cpu >= NR_GIC_CPU_IF); -	cpu_mask = readl_relaxed(dist_base + GIC_DIST_TARGET + 0); +	cpu_mask = gic_get_cpumask(gic);  	gic_cpu_map[cpu] = cpu_mask;  	/* diff --git a/arch/arm/crypto/aes-armv4.S b/arch/arm/crypto/aes-armv4.S index e59b1d505d6..19d6cd6f29f 100644 --- a/arch/arm/crypto/aes-armv4.S +++ b/arch/arm/crypto/aes-armv4.S @@ -34,8 +34,9 @@  @ A little glue here to select the correct code below for the ARM CPU  @ that is being targetted. +#include <linux/linkage.h> +  .text -.code	32  .type	AES_Te,%object  .align	5 @@ -145,10 +146,8 @@ AES_Te:  @ void AES_encrypt(const unsigned char *in, unsigned char *out,  @ 		 const AES_KEY *key) { -.global AES_encrypt -.type   AES_encrypt,%function  .align	5 -AES_encrypt: +ENTRY(AES_encrypt)  	sub	r3,pc,#8		@ AES_encrypt  	stmdb   sp!,{r1,r4-r12,lr}  	mov	r12,r0		@ inp @@ -239,15 +238,8 @@ AES_encrypt:  	strb	r6,[r12,#14]  	strb	r3,[r12,#15]  #endif -#if __ARM_ARCH__>=5  	ldmia	sp!,{r4-r12,pc} -#else -	ldmia   sp!,{r4-r12,lr} -	tst	lr,#1 -	moveq	pc,lr			@ be binary compatible with V4, yet -	.word	0xe12fff1e			@ interoperable with Thumb ISA:-) -#endif -.size	AES_encrypt,.-AES_encrypt +ENDPROC(AES_encrypt)  .type   _armv4_AES_encrypt,%function  .align	2 @@ -386,10 +378,8 @@ _armv4_AES_encrypt:  	ldr	pc,[sp],#4		@ pop and return  .size	_armv4_AES_encrypt,.-_armv4_AES_encrypt -.global private_AES_set_encrypt_key -.type   private_AES_set_encrypt_key,%function  .align	5 -private_AES_set_encrypt_key: +ENTRY(private_AES_set_encrypt_key)  _armv4_AES_set_encrypt_key:  	sub	r3,pc,#8		@ AES_set_encrypt_key  	teq	r0,#0 @@ -658,15 +648,11 @@ _armv4_AES_set_encrypt_key:  .Ldone:	mov	r0,#0  	ldmia   sp!,{r4-r12,lr} -.Labrt:	tst	lr,#1 -	moveq	pc,lr			@ be binary compatible with V4, yet -	.word	0xe12fff1e			@ interoperable with Thumb ISA:-) -.size	private_AES_set_encrypt_key,.-private_AES_set_encrypt_key +.Labrt:	mov	pc,lr +ENDPROC(private_AES_set_encrypt_key) -.global private_AES_set_decrypt_key -.type   private_AES_set_decrypt_key,%function  .align	5 -private_AES_set_decrypt_key: +ENTRY(private_AES_set_decrypt_key)  	str	lr,[sp,#-4]!            @ push lr  #if 0  	@ kernel does both of these in setkey so optimise this bit out by @@ -748,15 +734,8 @@ private_AES_set_decrypt_key:  	bne	.Lmix  	mov	r0,#0 -#if __ARM_ARCH__>=5  	ldmia	sp!,{r4-r12,pc} -#else -	ldmia   sp!,{r4-r12,lr} -	tst	lr,#1 -	moveq	pc,lr			@ be binary compatible with V4, yet -	.word	0xe12fff1e			@ interoperable with Thumb ISA:-) -#endif -.size	private_AES_set_decrypt_key,.-private_AES_set_decrypt_key +ENDPROC(private_AES_set_decrypt_key)  .type	AES_Td,%object  .align	5 @@ -862,10 +841,8 @@ AES_Td:  @ void AES_decrypt(const unsigned char *in, unsigned char *out,  @ 		 const AES_KEY *key) { -.global AES_decrypt -.type   AES_decrypt,%function  .align	5 -AES_decrypt: +ENTRY(AES_decrypt)  	sub	r3,pc,#8		@ AES_decrypt  	stmdb   sp!,{r1,r4-r12,lr}  	mov	r12,r0		@ inp @@ -956,15 +933,8 @@ AES_decrypt:  	strb	r6,[r12,#14]  	strb	r3,[r12,#15]  #endif -#if __ARM_ARCH__>=5  	ldmia	sp!,{r4-r12,pc} -#else -	ldmia   sp!,{r4-r12,lr} -	tst	lr,#1 -	moveq	pc,lr			@ be binary compatible with V4, yet -	.word	0xe12fff1e			@ interoperable with Thumb ISA:-) -#endif -.size	AES_decrypt,.-AES_decrypt +ENDPROC(AES_decrypt)  .type   _armv4_AES_decrypt,%function  .align	2 @@ -1064,7 +1034,9 @@ _armv4_AES_decrypt:  	and	r9,lr,r1,lsr#8  	ldrb	r7,[r10,r7]		@ Td4[s1>>0] -	ldrb	r1,[r10,r1,lsr#24]	@ Td4[s1>>24] + ARM(	ldrb	r1,[r10,r1,lsr#24]  )	@ Td4[s1>>24] + THUMB(	add	r1,r10,r1,lsr#24    ) 	@ Td4[s1>>24] + THUMB(	ldrb	r1,[r1]		    )  	ldrb	r8,[r10,r8]		@ Td4[s1>>16]  	eor	r0,r7,r0,lsl#24  	ldrb	r9,[r10,r9]		@ Td4[s1>>8] @@ -1077,7 +1049,9 @@ _armv4_AES_decrypt:  	ldrb	r8,[r10,r8]		@ Td4[s2>>0]  	and	r9,lr,r2,lsr#16 -	ldrb	r2,[r10,r2,lsr#24]	@ Td4[s2>>24] + ARM(	ldrb	r2,[r10,r2,lsr#24]  )	@ Td4[s2>>24] + THUMB(	add	r2,r10,r2,lsr#24    )	@ Td4[s2>>24] + THUMB(	ldrb	r2,[r2]		    )  	eor	r0,r0,r7,lsl#8  	ldrb	r9,[r10,r9]		@ Td4[s2>>16]  	eor	r1,r8,r1,lsl#16 @@ -1090,7 +1064,9 @@ _armv4_AES_decrypt:  	and	r9,lr,r3		@ i2  	ldrb	r9,[r10,r9]		@ Td4[s3>>0] -	ldrb	r3,[r10,r3,lsr#24]	@ Td4[s3>>24] + ARM(	ldrb	r3,[r10,r3,lsr#24]  )	@ Td4[s3>>24] + THUMB(	add	r3,r10,r3,lsr#24    )	@ Td4[s3>>24] + THUMB(	ldrb	r3,[r3]		    )  	eor	r0,r0,r7,lsl#16  	ldr	r7,[r11,#0]  	eor	r1,r1,r8,lsl#8 diff --git a/arch/arm/crypto/sha1-armv4-large.S b/arch/arm/crypto/sha1-armv4-large.S index 7050ab133b9..92c6eed7aac 100644 --- a/arch/arm/crypto/sha1-armv4-large.S +++ b/arch/arm/crypto/sha1-armv4-large.S @@ -51,13 +51,12 @@  @ Profiler-assisted and platform-specific optimization resulted in 10%  @ improvement on Cortex A8 core and 12.2 cycles per byte. -.text +#include <linux/linkage.h> -.global	sha1_block_data_order -.type	sha1_block_data_order,%function +.text  .align	2 -sha1_block_data_order: +ENTRY(sha1_block_data_order)  	stmdb	sp!,{r4-r12,lr}  	add	r2,r1,r2,lsl#6	@ r2 to point at the end of r1  	ldmia	r0,{r3,r4,r5,r6,r7} @@ -194,7 +193,7 @@ sha1_block_data_order:  	eor	r10,r10,r7,ror#2		@ F_00_19(B,C,D)  	str	r9,[r14,#-4]!  	add	r3,r3,r10			@ E+=F_00_19(B,C,D) -	teq	r14,sp +	cmp	r14,sp  	bne	.L_00_15		@ [((11+4)*5+2)*3]  #if __ARM_ARCH__<7  	ldrb	r10,[r1,#2] @@ -374,7 +373,9 @@ sha1_block_data_order:  						@ F_xx_xx  	add	r3,r3,r9			@ E+=X[i]  	add	r3,r3,r10			@ E+=F_20_39(B,C,D) -	teq	r14,sp			@ preserve carry + ARM(	teq	r14,sp		)	@ preserve carry + THUMB(	mov	r11,sp		) + THUMB(	teq	r14,r11		)	@ preserve carry  	bne	.L_20_39_or_60_79	@ [+((12+3)*5+2)*4]  	bcs	.L_done			@ [+((12+3)*5+2)*4], spare 300 bytes @@ -466,7 +467,7 @@ sha1_block_data_order:  	add	r3,r3,r9			@ E+=X[i]  	add	r3,r3,r10			@ E+=F_40_59(B,C,D)  	add	r3,r3,r11,ror#2 -	teq	r14,sp +	cmp	r14,sp  	bne	.L_40_59		@ [+((12+5)*5+2)*4]  	ldr	r8,.LK_60_79 @@ -485,19 +486,12 @@ sha1_block_data_order:  	teq	r1,r2  	bne	.Lloop			@ [+18], total 1307 -#if __ARM_ARCH__>=5  	ldmia	sp!,{r4-r12,pc} -#else -	ldmia	sp!,{r4-r12,lr} -	tst	lr,#1 -	moveq	pc,lr			@ be binary compatible with V4, yet -	.word	0xe12fff1e			@ interoperable with Thumb ISA:-) -#endif  .align	2  .LK_00_19:	.word	0x5a827999  .LK_20_39:	.word	0x6ed9eba1  .LK_40_59:	.word	0x8f1bbcdc  .LK_60_79:	.word	0xca62c1d6 -.size	sha1_block_data_order,.-sha1_block_data_order +ENDPROC(sha1_block_data_order)  .asciz	"SHA1 block transform for ARMv4, CRYPTOGAMS by <appro@openssl.org>"  .align	2 diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h index eb87200aa4b..05ee9eebad6 100644 --- a/arch/arm/include/asm/assembler.h +++ b/arch/arm/include/asm/assembler.h @@ -246,18 +246,14 @@   *   * This macro is intended for forcing the CPU into SVC mode at boot time.   * you cannot return to the original mode. - * - * Beware, it also clobers LR.   */  .macro safe_svcmode_maskall reg:req  #if __LINUX_ARM_ARCH__ >= 6  	mrs	\reg , cpsr -	mov	lr , \reg -	and	lr , lr , #MODE_MASK -	cmp	lr , #HYP_MODE -	orr	\reg , \reg , #PSR_I_BIT | PSR_F_BIT +	eor	\reg, \reg, #HYP_MODE +	tst	\reg, #MODE_MASK  	bic	\reg , \reg , #MODE_MASK -	orr	\reg , \reg , #SVC_MODE +	orr	\reg , \reg , #PSR_I_BIT | PSR_F_BIT | SVC_MODE  THUMB(	orr	\reg , \reg , #PSR_T_BIT	)  	bne	1f  	orr	\reg, \reg, #PSR_A_BIT diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h index a59dcb5ab5f..ad41ec2471e 100644 --- a/arch/arm/include/asm/cputype.h +++ b/arch/arm/include/asm/cputype.h @@ -64,6 +64,24 @@ extern unsigned int processor_id;  #define read_cpuid_ext(reg) 0  #endif +#define ARM_CPU_IMP_ARM			0x41 +#define ARM_CPU_IMP_INTEL		0x69 + +#define ARM_CPU_PART_ARM1136		0xB360 +#define ARM_CPU_PART_ARM1156		0xB560 +#define ARM_CPU_PART_ARM1176		0xB760 +#define ARM_CPU_PART_ARM11MPCORE	0xB020 +#define ARM_CPU_PART_CORTEX_A8		0xC080 +#define ARM_CPU_PART_CORTEX_A9		0xC090 +#define ARM_CPU_PART_CORTEX_A5		0xC050 +#define ARM_CPU_PART_CORTEX_A15		0xC0F0 +#define ARM_CPU_PART_CORTEX_A7		0xC070 + +#define ARM_CPU_XSCALE_ARCH_MASK	0xe000 +#define ARM_CPU_XSCALE_ARCH_V1		0x2000 +#define ARM_CPU_XSCALE_ARCH_V2		0x4000 +#define ARM_CPU_XSCALE_ARCH_V3		0x6000 +  /*   * The CPU ID never changes at run time, so we might as well tell the   * compiler that it's constant.  Use this function to read the CPU ID @@ -74,6 +92,21 @@ static inline unsigned int __attribute_const__ read_cpuid_id(void)  	return read_cpuid(CPUID_ID);  } +static inline unsigned int __attribute_const__ read_cpuid_implementor(void) +{ +	return (read_cpuid_id() & 0xFF000000) >> 24; +} + +static inline unsigned int __attribute_const__ read_cpuid_part_number(void) +{ +	return read_cpuid_id() & 0xFFF0; +} + +static inline unsigned int __attribute_const__ xscale_cpu_arch_version(void) +{ +	return read_cpuid_part_number() & ARM_CPU_XSCALE_ARCH_MASK; +} +  static inline unsigned int __attribute_const__ read_cpuid_cachetype(void)  {  	return read_cpuid(CPUID_CACHETYPE); diff --git a/arch/arm/include/asm/cti.h b/arch/arm/include/asm/cti.h index f2e5cad3f30..2381199acb7 100644 --- a/arch/arm/include/asm/cti.h +++ b/arch/arm/include/asm/cti.h @@ -2,6 +2,7 @@  #define __ASMARM_CTI_H  #include	<asm/io.h> +#include	<asm/hardware/coresight.h>  /* The registers' definition is from section 3.2 of   * Embedded Cross Trigger Revision: r0p0 @@ -35,11 +36,6 @@  #define		LOCKACCESS		0xFB0  #define		LOCKSTATUS		0xFB4 -/* write this value to LOCKACCESS will unlock the module, and - * other value will lock the module - */ -#define		LOCKCODE		0xC5ACCE55 -  /**   * struct cti - cross trigger interface struct   * @base: mapped virtual address for the cti base @@ -146,7 +142,7 @@ static inline void cti_irq_ack(struct cti *cti)   */  static inline void cti_unlock(struct cti *cti)  { -	__raw_writel(LOCKCODE, cti->base + LOCKACCESS); +	__raw_writel(CS_LAR_KEY, cti->base + LOCKACCESS);  }  /** @@ -158,6 +154,6 @@ static inline void cti_unlock(struct cti *cti)   */  static inline void cti_lock(struct cti *cti)  { -	__raw_writel(~LOCKCODE, cti->base + LOCKACCESS); +	__raw_writel(~CS_LAR_KEY, cti->base + LOCKACCESS);  }  #endif diff --git a/arch/arm/include/asm/hardware/coresight.h b/arch/arm/include/asm/hardware/coresight.h index 7ecd793b8f5..0cf7a6b842f 100644 --- a/arch/arm/include/asm/hardware/coresight.h +++ b/arch/arm/include/asm/hardware/coresight.h @@ -36,7 +36,7 @@  /* CoreSight Component Registers */  #define CSCR_CLASS	0xff4 -#define UNLOCK_MAGIC	0xc5acce55 +#define CS_LAR_KEY	0xc5acce55  /* ETM control register, "ETM Architecture", 3.3.1 */  #define ETMR_CTRL		0 @@ -147,11 +147,11 @@  #define etm_lock(t) do { etm_writel((t), 0, CSMR_LOCKACCESS); } while (0)  #define etm_unlock(t) \ -	do { etm_writel((t), UNLOCK_MAGIC, CSMR_LOCKACCESS); } while (0) +	do { etm_writel((t), CS_LAR_KEY, CSMR_LOCKACCESS); } while (0)  #define etb_lock(t) do { etb_writel((t), 0, CSMR_LOCKACCESS); } while (0)  #define etb_unlock(t) \ -	do { etb_writel((t), UNLOCK_MAGIC, CSMR_LOCKACCESS); } while (0) +	do { etb_writel((t), CS_LAR_KEY, CSMR_LOCKACCESS); } while (0)  #endif /* __ASM_HARDWARE_CORESIGHT_H */ diff --git a/arch/arm/include/asm/hw_breakpoint.h b/arch/arm/include/asm/hw_breakpoint.h index 01169dd723f..eef55ea9ef0 100644 --- a/arch/arm/include/asm/hw_breakpoint.h +++ b/arch/arm/include/asm/hw_breakpoint.h @@ -85,6 +85,9 @@ static inline void decode_ctrl_reg(u32 reg,  #define ARM_DSCR_HDBGEN		(1 << 14)  #define ARM_DSCR_MDBGEN		(1 << 15) +/* OSLSR os lock model bits */ +#define ARM_OSLSR_OSLM0		(1 << 0) +  /* opcode2 numbers for the co-processor instructions. */  #define ARM_OP2_BVR		4  #define ARM_OP2_BCR		5 diff --git a/arch/arm/include/asm/idmap.h b/arch/arm/include/asm/idmap.h index bf863edb517..1a66f907e5c 100644 --- a/arch/arm/include/asm/idmap.h +++ b/arch/arm/include/asm/idmap.h @@ -8,6 +8,7 @@  #define __idmap __section(.idmap.text) noinline notrace  extern pgd_t *idmap_pgd; +extern pgd_t *hyp_pgd;  void setup_mm_for_reboot(void); diff --git a/arch/arm/include/asm/kvm_arm.h b/arch/arm/include/asm/kvm_arm.h new file mode 100644 index 00000000000..7c3d813e15d --- /dev/null +++ b/arch/arm/include/asm/kvm_arm.h @@ -0,0 +1,214 @@ +/* + * Copyright (C) 2012 - Virtual Open Systems and Columbia University + * Author: Christoffer Dall <c.dall@virtualopensystems.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA. + */ + +#ifndef __ARM_KVM_ARM_H__ +#define __ARM_KVM_ARM_H__ + +#include <linux/types.h> + +/* Hyp Configuration Register (HCR) bits */ +#define HCR_TGE		(1 << 27) +#define HCR_TVM		(1 << 26) +#define HCR_TTLB	(1 << 25) +#define HCR_TPU		(1 << 24) +#define HCR_TPC		(1 << 23) +#define HCR_TSW		(1 << 22) +#define HCR_TAC		(1 << 21) +#define HCR_TIDCP	(1 << 20) +#define HCR_TSC		(1 << 19) +#define HCR_TID3	(1 << 18) +#define HCR_TID2	(1 << 17) +#define HCR_TID1	(1 << 16) +#define HCR_TID0	(1 << 15) +#define HCR_TWE		(1 << 14) +#define HCR_TWI		(1 << 13) +#define HCR_DC		(1 << 12) +#define HCR_BSU		(3 << 10) +#define HCR_BSU_IS	(1 << 10) +#define HCR_FB		(1 << 9) +#define HCR_VA		(1 << 8) +#define HCR_VI		(1 << 7) +#define HCR_VF		(1 << 6) +#define HCR_AMO		(1 << 5) +#define HCR_IMO		(1 << 4) +#define HCR_FMO		(1 << 3) +#define HCR_PTW		(1 << 2) +#define HCR_SWIO	(1 << 1) +#define HCR_VM		1 + +/* + * The bits we set in HCR: + * TAC:		Trap ACTLR + * TSC:		Trap SMC + * TSW:		Trap cache operations by set/way + * TWI:		Trap WFI + * TIDCP:	Trap L2CTLR/L2ECTLR + * BSU_IS:	Upgrade barriers to the inner shareable domain + * FB:		Force broadcast of all maintainance operations + * AMO:		Override CPSR.A and enable signaling with VA + * IMO:		Override CPSR.I and enable signaling with VI + * FMO:		Override CPSR.F and enable signaling with VF + * SWIO:	Turn set/way invalidates into set/way clean+invalidate + */ +#define HCR_GUEST_MASK (HCR_TSC | HCR_TSW | HCR_TWI | HCR_VM | HCR_BSU_IS | \ +			HCR_FB | HCR_TAC | HCR_AMO | HCR_IMO | HCR_FMO | \ +			HCR_SWIO | HCR_TIDCP) +#define HCR_VIRT_EXCP_MASK (HCR_VA | HCR_VI | HCR_VF) + +/* System Control Register (SCTLR) bits */ +#define SCTLR_TE	(1 << 30) +#define SCTLR_EE	(1 << 25) +#define SCTLR_V		(1 << 13) + +/* Hyp System Control Register (HSCTLR) bits */ +#define HSCTLR_TE	(1 << 30) +#define HSCTLR_EE	(1 << 25) +#define HSCTLR_FI	(1 << 21) +#define HSCTLR_WXN	(1 << 19) +#define HSCTLR_I	(1 << 12) +#define HSCTLR_C	(1 << 2) +#define HSCTLR_A	(1 << 1) +#define HSCTLR_M	1 +#define HSCTLR_MASK	(HSCTLR_M | HSCTLR_A | HSCTLR_C | HSCTLR_I | \ +			 HSCTLR_WXN | HSCTLR_FI | HSCTLR_EE | HSCTLR_TE) + +/* TTBCR and HTCR Registers bits */ +#define TTBCR_EAE	(1 << 31) +#define TTBCR_IMP	(1 << 30) +#define TTBCR_SH1	(3 << 28) +#define TTBCR_ORGN1	(3 << 26) +#define TTBCR_IRGN1	(3 << 24) +#define TTBCR_EPD1	(1 << 23) +#define TTBCR_A1	(1 << 22) +#define TTBCR_T1SZ	(3 << 16) +#define TTBCR_SH0	(3 << 12) +#define TTBCR_ORGN0	(3 << 10) +#define TTBCR_IRGN0	(3 << 8) +#define TTBCR_EPD0	(1 << 7) +#define TTBCR_T0SZ	3 +#define HTCR_MASK	(TTBCR_T0SZ | TTBCR_IRGN0 | TTBCR_ORGN0 | TTBCR_SH0) + +/* Hyp System Trap Register */ +#define HSTR_T(x)	(1 << x) +#define HSTR_TTEE	(1 << 16) +#define HSTR_TJDBX	(1 << 17) + +/* Hyp Coprocessor Trap Register */ +#define HCPTR_TCP(x)	(1 << x) +#define HCPTR_TCP_MASK	(0x3fff) +#define HCPTR_TASE	(1 << 15) +#define HCPTR_TTA	(1 << 20) +#define HCPTR_TCPAC	(1 << 31) + +/* Hyp Debug Configuration Register bits */ +#define HDCR_TDRA	(1 << 11) +#define HDCR_TDOSA	(1 << 10) +#define HDCR_TDA	(1 << 9) +#define HDCR_TDE	(1 << 8) +#define HDCR_HPME	(1 << 7) +#define HDCR_TPM	(1 << 6) +#define HDCR_TPMCR	(1 << 5) +#define HDCR_HPMN_MASK	(0x1F) + +/* + * The architecture supports 40-bit IPA as input to the 2nd stage translations + * and PTRS_PER_S2_PGD becomes 1024, because each entry covers 1GB of address + * space. + */ +#define KVM_PHYS_SHIFT	(40) +#define KVM_PHYS_SIZE	(1ULL << KVM_PHYS_SHIFT) +#define KVM_PHYS_MASK	(KVM_PHYS_SIZE - 1ULL) +#define PTRS_PER_S2_PGD	(1ULL << (KVM_PHYS_SHIFT - 30)) +#define S2_PGD_ORDER	get_order(PTRS_PER_S2_PGD * sizeof(pgd_t)) +#define S2_PGD_SIZE	(1 << S2_PGD_ORDER) + +/* Virtualization Translation Control Register (VTCR) bits */ +#define VTCR_SH0	(3 << 12) +#define VTCR_ORGN0	(3 << 10) +#define VTCR_IRGN0	(3 << 8) +#define VTCR_SL0	(3 << 6) +#define VTCR_S		(1 << 4) +#define VTCR_T0SZ	(0xf) +#define VTCR_MASK	(VTCR_SH0 | VTCR_ORGN0 | VTCR_IRGN0 | VTCR_SL0 | \ +			 VTCR_S | VTCR_T0SZ) +#define VTCR_HTCR_SH	(VTCR_SH0 | VTCR_ORGN0 | VTCR_IRGN0) +#define VTCR_SL_L2	(0 << 6)	/* Starting-level: 2 */ +#define VTCR_SL_L1	(1 << 6)	/* Starting-level: 1 */ +#define KVM_VTCR_SL0	VTCR_SL_L1 +/* stage-2 input address range defined as 2^(32-T0SZ) */ +#define KVM_T0SZ	(32 - KVM_PHYS_SHIFT) +#define KVM_VTCR_T0SZ	(KVM_T0SZ & VTCR_T0SZ) +#define KVM_VTCR_S	((KVM_VTCR_T0SZ << 1) & VTCR_S) + +/* Virtualization Translation Table Base Register (VTTBR) bits */ +#if KVM_VTCR_SL0 == VTCR_SL_L2	/* see ARM DDI 0406C: B4-1720 */ +#define VTTBR_X		(14 - KVM_T0SZ) +#else +#define VTTBR_X		(5 - KVM_T0SZ) +#endif +#define VTTBR_BADDR_SHIFT (VTTBR_X - 1) +#define VTTBR_BADDR_MASK  (((1LLU << (40 - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT) +#define VTTBR_VMID_SHIFT  (48LLU) +#define VTTBR_VMID_MASK	  (0xffLLU << VTTBR_VMID_SHIFT) + +/* Hyp Syndrome Register (HSR) bits */ +#define HSR_EC_SHIFT	(26) +#define HSR_EC		(0x3fU << HSR_EC_SHIFT) +#define HSR_IL		(1U << 25) +#define HSR_ISS		(HSR_IL - 1) +#define HSR_ISV_SHIFT	(24) +#define HSR_ISV		(1U << HSR_ISV_SHIFT) +#define HSR_SRT_SHIFT	(16) +#define HSR_SRT_MASK	(0xf << HSR_SRT_SHIFT) +#define HSR_FSC		(0x3f) +#define HSR_FSC_TYPE	(0x3c) +#define HSR_SSE		(1 << 21) +#define HSR_WNR		(1 << 6) +#define HSR_CV_SHIFT	(24) +#define HSR_CV		(1U << HSR_CV_SHIFT) +#define HSR_COND_SHIFT	(20) +#define HSR_COND	(0xfU << HSR_COND_SHIFT) + +#define FSC_FAULT	(0x04) +#define FSC_PERM	(0x0c) + +/* Hyp Prefetch Fault Address Register (HPFAR/HDFAR) */ +#define HPFAR_MASK	(~0xf) + +#define HSR_EC_UNKNOWN	(0x00) +#define HSR_EC_WFI	(0x01) +#define HSR_EC_CP15_32	(0x03) +#define HSR_EC_CP15_64	(0x04) +#define HSR_EC_CP14_MR	(0x05) +#define HSR_EC_CP14_LS	(0x06) +#define HSR_EC_CP_0_13	(0x07) +#define HSR_EC_CP10_ID	(0x08) +#define HSR_EC_JAZELLE	(0x09) +#define HSR_EC_BXJ	(0x0A) +#define HSR_EC_CP14_64	(0x0C) +#define HSR_EC_SVC_HYP	(0x11) +#define HSR_EC_HVC	(0x12) +#define HSR_EC_SMC	(0x13) +#define HSR_EC_IABT	(0x20) +#define HSR_EC_IABT_HYP	(0x21) +#define HSR_EC_DABT	(0x24) +#define HSR_EC_DABT_HYP	(0x25) + +#define HSR_HVC_IMM_MASK	((1UL << 16) - 1) + +#endif /* __ARM_KVM_ARM_H__ */ diff --git a/arch/arm/include/asm/kvm_asm.h b/arch/arm/include/asm/kvm_asm.h new file mode 100644 index 00000000000..5e06e817778 --- /dev/null +++ b/arch/arm/include/asm/kvm_asm.h @@ -0,0 +1,82 @@ +/* + * Copyright (C) 2012 - Virtual Open Systems and Columbia University + * Author: Christoffer Dall <c.dall@virtualopensystems.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA. + */ + +#ifndef __ARM_KVM_ASM_H__ +#define __ARM_KVM_ASM_H__ + +/* 0 is reserved as an invalid value. */ +#define c0_MPIDR	1	/* MultiProcessor ID Register */ +#define c0_CSSELR	2	/* Cache Size Selection Register */ +#define c1_SCTLR	3	/* System Control Register */ +#define c1_ACTLR	4	/* Auxilliary Control Register */ +#define c1_CPACR	5	/* Coprocessor Access Control */ +#define c2_TTBR0	6	/* Translation Table Base Register 0 */ +#define c2_TTBR0_high	7	/* TTBR0 top 32 bits */ +#define c2_TTBR1	8	/* Translation Table Base Register 1 */ +#define c2_TTBR1_high	9	/* TTBR1 top 32 bits */ +#define c2_TTBCR	10	/* Translation Table Base Control R. */ +#define c3_DACR		11	/* Domain Access Control Register */ +#define c5_DFSR		12	/* Data Fault Status Register */ +#define c5_IFSR		13	/* Instruction Fault Status Register */ +#define c5_ADFSR	14	/* Auxilary Data Fault Status R */ +#define c5_AIFSR	15	/* Auxilary Instrunction Fault Status R */ +#define c6_DFAR		16	/* Data Fault Address Register */ +#define c6_IFAR		17	/* Instruction Fault Address Register */ +#define c9_L2CTLR	18	/* Cortex A15 L2 Control Register */ +#define c10_PRRR	19	/* Primary Region Remap Register */ +#define c10_NMRR	20	/* Normal Memory Remap Register */ +#define c12_VBAR	21	/* Vector Base Address Register */ +#define c13_CID		22	/* Context ID Register */ +#define c13_TID_URW	23	/* Thread ID, User R/W */ +#define c13_TID_URO	24	/* Thread ID, User R/O */ +#define c13_TID_PRIV	25	/* Thread ID, Privileged */ +#define NR_CP15_REGS	26	/* Number of regs (incl. invalid) */ + +#define ARM_EXCEPTION_RESET	  0 +#define ARM_EXCEPTION_UNDEFINED   1 +#define ARM_EXCEPTION_SOFTWARE    2 +#define ARM_EXCEPTION_PREF_ABORT  3 +#define ARM_EXCEPTION_DATA_ABORT  4 +#define ARM_EXCEPTION_IRQ	  5 +#define ARM_EXCEPTION_FIQ	  6 +#define ARM_EXCEPTION_HVC	  7 + +#ifndef __ASSEMBLY__ +struct kvm; +struct kvm_vcpu; + +extern char __kvm_hyp_init[]; +extern char __kvm_hyp_init_end[]; + +extern char __kvm_hyp_exit[]; +extern char __kvm_hyp_exit_end[]; + +extern char __kvm_hyp_vector[]; + +extern char __kvm_hyp_code_start[]; +extern char __kvm_hyp_code_end[]; + +extern void __kvm_tlb_flush_vmid(struct kvm *kvm); + +extern void __kvm_flush_vm_context(void); +extern void __kvm_tlb_flush_vmid(struct kvm *kvm); + +extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu); +#endif + +#endif /* __ARM_KVM_ASM_H__ */ diff --git a/arch/arm/include/asm/kvm_coproc.h b/arch/arm/include/asm/kvm_coproc.h new file mode 100644 index 00000000000..4917c2f7e45 --- /dev/null +++ b/arch/arm/include/asm/kvm_coproc.h @@ -0,0 +1,47 @@ +/* + * Copyright (C) 2012 Rusty Russell IBM Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA. + */ + +#ifndef __ARM_KVM_COPROC_H__ +#define __ARM_KVM_COPROC_H__ +#include <linux/kvm_host.h> + +void kvm_reset_coprocs(struct kvm_vcpu *vcpu); + +struct kvm_coproc_target_table { +	unsigned target; +	const struct coproc_reg *table; +	size_t num; +}; +void kvm_register_target_coproc_table(struct kvm_coproc_target_table *table); + +int kvm_handle_cp10_id(struct kvm_vcpu *vcpu, struct kvm_run *run); +int kvm_handle_cp_0_13_access(struct kvm_vcpu *vcpu, struct kvm_run *run); +int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run); +int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run); +int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run); +int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run); + +unsigned long kvm_arm_num_guest_msrs(struct kvm_vcpu *vcpu); +int kvm_arm_copy_msrindices(struct kvm_vcpu *vcpu, u64 __user *uindices); +void kvm_coproc_table_init(void); + +struct kvm_one_reg; +int kvm_arm_copy_coproc_indices(struct kvm_vcpu *vcpu, u64 __user *uindices); +int kvm_arm_coproc_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *); +int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *); +unsigned long kvm_arm_num_coproc_regs(struct kvm_vcpu *vcpu); +#endif /* __ARM_KVM_COPROC_H__ */ diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h new file mode 100644 index 00000000000..fd611996bfb --- /dev/null +++ b/arch/arm/include/asm/kvm_emulate.h @@ -0,0 +1,72 @@ +/* + * Copyright (C) 2012 - Virtual Open Systems and Columbia University + * Author: Christoffer Dall <c.dall@virtualopensystems.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA. + */ + +#ifndef __ARM_KVM_EMULATE_H__ +#define __ARM_KVM_EMULATE_H__ + +#include <linux/kvm_host.h> +#include <asm/kvm_asm.h> +#include <asm/kvm_mmio.h> + +u32 *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num); +u32 *vcpu_spsr(struct kvm_vcpu *vcpu); + +int kvm_handle_wfi(struct kvm_vcpu *vcpu, struct kvm_run *run); +void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr); +void kvm_inject_undefined(struct kvm_vcpu *vcpu); +void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr); +void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr); + +static inline bool vcpu_mode_is_32bit(struct kvm_vcpu *vcpu) +{ +	return 1; +} + +static inline u32 *vcpu_pc(struct kvm_vcpu *vcpu) +{ +	return (u32 *)&vcpu->arch.regs.usr_regs.ARM_pc; +} + +static inline u32 *vcpu_cpsr(struct kvm_vcpu *vcpu) +{ +	return (u32 *)&vcpu->arch.regs.usr_regs.ARM_cpsr; +} + +static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu) +{ +	*vcpu_cpsr(vcpu) |= PSR_T_BIT; +} + +static inline bool mode_has_spsr(struct kvm_vcpu *vcpu) +{ +	unsigned long cpsr_mode = vcpu->arch.regs.usr_regs.ARM_cpsr & MODE_MASK; +	return (cpsr_mode > USR_MODE && cpsr_mode < SYSTEM_MODE); +} + +static inline bool vcpu_mode_priv(struct kvm_vcpu *vcpu) +{ +	unsigned long cpsr_mode = vcpu->arch.regs.usr_regs.ARM_cpsr & MODE_MASK; +	return cpsr_mode > USR_MODE;; +} + +static inline bool kvm_vcpu_reg_is_pc(struct kvm_vcpu *vcpu, int reg) +{ +	return reg == 15; +} + +#endif /* __ARM_KVM_EMULATE_H__ */ diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h new file mode 100644 index 00000000000..98b4d1a7292 --- /dev/null +++ b/arch/arm/include/asm/kvm_host.h @@ -0,0 +1,161 @@ +/* + * Copyright (C) 2012 - Virtual Open Systems and Columbia University + * Author: Christoffer Dall <c.dall@virtualopensystems.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA. + */ + +#ifndef __ARM_KVM_HOST_H__ +#define __ARM_KVM_HOST_H__ + +#include <asm/kvm.h> +#include <asm/kvm_asm.h> +#include <asm/kvm_mmio.h> +#include <asm/fpstate.h> + +#define KVM_MAX_VCPUS CONFIG_KVM_ARM_MAX_VCPUS +#define KVM_MEMORY_SLOTS 32 +#define KVM_PRIVATE_MEM_SLOTS 4 +#define KVM_COALESCED_MMIO_PAGE_OFFSET 1 +#define KVM_HAVE_ONE_REG + +#define KVM_VCPU_MAX_FEATURES 1 + +/* We don't currently support large pages. */ +#define KVM_HPAGE_GFN_SHIFT(x)	0 +#define KVM_NR_PAGE_SIZES	1 +#define KVM_PAGES_PER_HPAGE(x)	(1UL<<31) + +struct kvm_vcpu; +u32 *kvm_vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num, u32 mode); +int kvm_target_cpu(void); +int kvm_reset_vcpu(struct kvm_vcpu *vcpu); +void kvm_reset_coprocs(struct kvm_vcpu *vcpu); + +struct kvm_arch { +	/* VTTBR value associated with below pgd and vmid */ +	u64    vttbr; + +	/* +	 * Anything that is not used directly from assembly code goes +	 * here. +	 */ + +	/* The VMID generation used for the virt. memory system */ +	u64    vmid_gen; +	u32    vmid; + +	/* Stage-2 page table */ +	pgd_t *pgd; +}; + +#define KVM_NR_MEM_OBJS     40 + +/* + * We don't want allocation failures within the mmu code, so we preallocate + * enough memory for a single page fault in a cache. + */ +struct kvm_mmu_memory_cache { +	int nobjs; +	void *objects[KVM_NR_MEM_OBJS]; +}; + +struct kvm_vcpu_arch { +	struct kvm_regs regs; + +	int target; /* Processor target */ +	DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES); + +	/* System control coprocessor (cp15) */ +	u32 cp15[NR_CP15_REGS]; + +	/* The CPU type we expose to the VM */ +	u32 midr; + +	/* Exception Information */ +	u32 hsr;		/* Hyp Syndrome Register */ +	u32 hxfar;		/* Hyp Data/Inst Fault Address Register */ +	u32 hpfar;		/* Hyp IPA Fault Address Register */ + +	/* Floating point registers (VFP and Advanced SIMD/NEON) */ +	struct vfp_hard_struct vfp_guest; +	struct vfp_hard_struct *vfp_host; + +	/* +	 * Anything that is not used directly from assembly code goes +	 * here. +	 */ +	/* dcache set/way operation pending */ +	int last_pcpu; +	cpumask_t require_dcache_flush; + +	/* Don't run the guest on this vcpu */ +	bool pause; + +	/* IO related fields */ +	struct kvm_decode mmio_decode; + +	/* Interrupt related fields */ +	u32 irq_lines;		/* IRQ and FIQ levels */ + +	/* Hyp exception information */ +	u32 hyp_pc;		/* PC when exception was taken from Hyp mode */ + +	/* Cache some mmu pages needed inside spinlock regions */ +	struct kvm_mmu_memory_cache mmu_page_cache; + +	/* Detect first run of a vcpu */ +	bool has_run_once; +}; + +struct kvm_vm_stat { +	u32 remote_tlb_flush; +}; + +struct kvm_vcpu_stat { +	u32 halt_wakeup; +}; + +struct kvm_vcpu_init; +int kvm_vcpu_set_target(struct kvm_vcpu *vcpu, +			const struct kvm_vcpu_init *init); +unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu); +int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices); +struct kvm_one_reg; +int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg); +int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg); +u64 kvm_call_hyp(void *hypfn, ...); +void force_vm_exit(const cpumask_t *mask); + +#define KVM_ARCH_WANT_MMU_NOTIFIER +struct kvm; +int kvm_unmap_hva(struct kvm *kvm, unsigned long hva); +int kvm_unmap_hva_range(struct kvm *kvm, +			unsigned long start, unsigned long end); +void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); + +unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu); +int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices); + +/* We do not have shadow page tables, hence the empty hooks */ +static inline int kvm_age_hva(struct kvm *kvm, unsigned long hva) +{ +	return 0; +} + +static inline int kvm_test_age_hva(struct kvm *kvm, unsigned long hva) +{ +	return 0; +} +#endif /* __ARM_KVM_HOST_H__ */ diff --git a/arch/arm/include/asm/kvm_mmio.h b/arch/arm/include/asm/kvm_mmio.h new file mode 100644 index 00000000000..adcc0d7d317 --- /dev/null +++ b/arch/arm/include/asm/kvm_mmio.h @@ -0,0 +1,56 @@ +/* + * Copyright (C) 2012 - Virtual Open Systems and Columbia University + * Author: Christoffer Dall <c.dall@virtualopensystems.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA. + */ + +#ifndef __ARM_KVM_MMIO_H__ +#define __ARM_KVM_MMIO_H__ + +#include <linux/kvm_host.h> +#include <asm/kvm_asm.h> +#include <asm/kvm_arm.h> + +struct kvm_decode { +	unsigned long rt; +	bool sign_extend; +}; + +/* + * The in-kernel MMIO emulation code wants to use a copy of run->mmio, + * which is an anonymous type. Use our own type instead. + */ +struct kvm_exit_mmio { +	phys_addr_t	phys_addr; +	u8		data[8]; +	u32		len; +	bool		is_write; +}; + +static inline void kvm_prepare_mmio(struct kvm_run *run, +				    struct kvm_exit_mmio *mmio) +{ +	run->mmio.phys_addr	= mmio->phys_addr; +	run->mmio.len		= mmio->len; +	run->mmio.is_write	= mmio->is_write; +	memcpy(run->mmio.data, mmio->data, mmio->len); +	run->exit_reason	= KVM_EXIT_MMIO; +} + +int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run); +int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run, +		 phys_addr_t fault_ipa); + +#endif	/* __ARM_KVM_MMIO_H__ */ diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h new file mode 100644 index 00000000000..421a20b3487 --- /dev/null +++ b/arch/arm/include/asm/kvm_mmu.h @@ -0,0 +1,50 @@ +/* + * Copyright (C) 2012 - Virtual Open Systems and Columbia University + * Author: Christoffer Dall <c.dall@virtualopensystems.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA. + */ + +#ifndef __ARM_KVM_MMU_H__ +#define __ARM_KVM_MMU_H__ + +int create_hyp_mappings(void *from, void *to); +int create_hyp_io_mappings(void *from, void *to, phys_addr_t); +void free_hyp_pmds(void); + +int kvm_alloc_stage2_pgd(struct kvm *kvm); +void kvm_free_stage2_pgd(struct kvm *kvm); +int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa, +			  phys_addr_t pa, unsigned long size); + +int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run); + +void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu); + +phys_addr_t kvm_mmu_get_httbr(void); +int kvm_mmu_init(void); +void kvm_clear_hyp_idmap(void); + +static inline bool kvm_is_write_fault(unsigned long hsr) +{ +	unsigned long hsr_ec = hsr >> HSR_EC_SHIFT; +	if (hsr_ec == HSR_EC_IABT) +		return false; +	else if ((hsr & HSR_ISV) && !(hsr & HSR_WNR)) +		return false; +	else +		return true; +} + +#endif /* __ARM_KVM_MMU_H__ */ diff --git a/arch/arm/include/asm/kvm_psci.h b/arch/arm/include/asm/kvm_psci.h new file mode 100644 index 00000000000..9a83d98bf17 --- /dev/null +++ b/arch/arm/include/asm/kvm_psci.h @@ -0,0 +1,23 @@ +/* + * Copyright (C) 2012 - ARM Ltd + * Author: Marc Zyngier <marc.zyngier@arm.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program.  If not, see <http://www.gnu.org/licenses/>. + */ + +#ifndef __ARM_KVM_PSCI_H__ +#define __ARM_KVM_PSCI_H__ + +bool kvm_psci_call(struct kvm_vcpu *vcpu); + +#endif /* __ARM_KVM_PSCI_H__ */ diff --git a/arch/arm/include/asm/mach/pci.h b/arch/arm/include/asm/mach/pci.h index db9fedb57f2..5cf2e979b4b 100644 --- a/arch/arm/include/asm/mach/pci.h +++ b/arch/arm/include/asm/mach/pci.h @@ -23,6 +23,7 @@ struct hw_pci {  #endif  	struct pci_ops	*ops;  	int		nr_controllers; +	void		**private_data;  	int		(*setup)(int nr, struct pci_sys_data *);  	struct pci_bus *(*scan)(int nr, struct pci_sys_data *);  	void		(*preinit)(void); diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h index b11105c8599..57870ab313c 100644 --- a/arch/arm/include/asm/memory.h +++ b/arch/arm/include/asm/memory.h @@ -36,23 +36,23 @@   * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area   */  #define PAGE_OFFSET		UL(CONFIG_PAGE_OFFSET) -#define TASK_SIZE		(UL(CONFIG_PAGE_OFFSET) - UL(0x01000000)) -#define TASK_UNMAPPED_BASE	(UL(CONFIG_PAGE_OFFSET) / 3) +#define TASK_SIZE		(UL(CONFIG_PAGE_OFFSET) - UL(SZ_16M)) +#define TASK_UNMAPPED_BASE	ALIGN(TASK_SIZE / 3, SZ_16M)  /*   * The maximum size of a 26-bit user space task.   */ -#define TASK_SIZE_26		UL(0x04000000) +#define TASK_SIZE_26		(UL(1) << 26)  /*   * The module space lives between the addresses given by TASK_SIZE   * and PAGE_OFFSET - it must be within 32MB of the kernel text.   */  #ifndef CONFIG_THUMB2_KERNEL -#define MODULES_VADDR		(PAGE_OFFSET - 16*1024*1024) +#define MODULES_VADDR		(PAGE_OFFSET - SZ_16M)  #else  /* smaller range for Thumb-2 symbols relocation (2^24)*/ -#define MODULES_VADDR		(PAGE_OFFSET - 8*1024*1024) +#define MODULES_VADDR		(PAGE_OFFSET - SZ_8M)  #endif  #if TASK_SIZE > MODULES_VADDR diff --git a/arch/arm/include/asm/opcodes-sec.h b/arch/arm/include/asm/opcodes-sec.h new file mode 100644 index 00000000000..bc3a9174417 --- /dev/null +++ b/arch/arm/include/asm/opcodes-sec.h @@ -0,0 +1,24 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + * Copyright (C) 2012 ARM Limited + */ + +#ifndef __ASM_ARM_OPCODES_SEC_H +#define __ASM_ARM_OPCODES_SEC_H + +#include <asm/opcodes.h> + +#define __SMC(imm4) __inst_arm_thumb32(					\ +	0xE1600070 | (((imm4) & 0xF) << 0),				\ +	0xF7F08000 | (((imm4) & 0xF) << 16)				\ +) + +#endif /* __ASM_ARM_OPCODES_SEC_H */ diff --git a/arch/arm/include/asm/opcodes.h b/arch/arm/include/asm/opcodes.h index 74e211a6fb2..e796c598513 100644 --- a/arch/arm/include/asm/opcodes.h +++ b/arch/arm/include/asm/opcodes.h @@ -10,6 +10,7 @@  #define __ASM_ARM_OPCODES_H  #ifndef __ASSEMBLY__ +#include <linux/linkage.h>  extern asmlinkage unsigned int arm_check_condition(u32 opcode, u32 psr);  #endif diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h index 53426c66352..12f71a19042 100644 --- a/arch/arm/include/asm/outercache.h +++ b/arch/arm/include/asm/outercache.h @@ -92,6 +92,7 @@ static inline void outer_flush_range(phys_addr_t start, phys_addr_t end)  static inline void outer_flush_all(void) { }  static inline void outer_inv_all(void) { }  static inline void outer_disable(void) { } +static inline void outer_resume(void) { }  #endif diff --git a/arch/arm/include/asm/pgtable-3level-hwdef.h b/arch/arm/include/asm/pgtable-3level-hwdef.h index d7952824c5c..18f5cef82ad 100644 --- a/arch/arm/include/asm/pgtable-3level-hwdef.h +++ b/arch/arm/include/asm/pgtable-3level-hwdef.h @@ -32,6 +32,9 @@  #define PMD_TYPE_SECT		(_AT(pmdval_t, 1) << 0)  #define PMD_BIT4		(_AT(pmdval_t, 0))  #define PMD_DOMAIN(x)		(_AT(pmdval_t, 0)) +#define PMD_APTABLE_SHIFT	(61) +#define PMD_APTABLE		(_AT(pgdval_t, 3) << PGD_APTABLE_SHIFT) +#define PMD_PXNTABLE		(_AT(pgdval_t, 1) << 59)  /*   *   - section @@ -41,9 +44,11 @@  #define PMD_SECT_S		(_AT(pmdval_t, 3) << 8)  #define PMD_SECT_AF		(_AT(pmdval_t, 1) << 10)  #define PMD_SECT_nG		(_AT(pmdval_t, 1) << 11) +#define PMD_SECT_PXN		(_AT(pmdval_t, 1) << 53)  #define PMD_SECT_XN		(_AT(pmdval_t, 1) << 54)  #define PMD_SECT_AP_WRITE	(_AT(pmdval_t, 0))  #define PMD_SECT_AP_READ	(_AT(pmdval_t, 0)) +#define PMD_SECT_AP1		(_AT(pmdval_t, 1) << 6)  #define PMD_SECT_TEX(x)		(_AT(pmdval_t, 0))  /* diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h index a3f37929940..6ef8afd1b64 100644 --- a/arch/arm/include/asm/pgtable-3level.h +++ b/arch/arm/include/asm/pgtable-3level.h @@ -104,11 +104,29 @@   */  #define L_PGD_SWAPPER		(_AT(pgdval_t, 1) << 55)	/* swapper_pg_dir entry */ +/* + * 2nd stage PTE definitions for LPAE. + */ +#define L_PTE_S2_MT_UNCACHED	 (_AT(pteval_t, 0x5) << 2) /* MemAttr[3:0] */ +#define L_PTE_S2_MT_WRITETHROUGH (_AT(pteval_t, 0xa) << 2) /* MemAttr[3:0] */ +#define L_PTE_S2_MT_WRITEBACK	 (_AT(pteval_t, 0xf) << 2) /* MemAttr[3:0] */ +#define L_PTE_S2_RDONLY		 (_AT(pteval_t, 1) << 6)   /* HAP[1]   */ +#define L_PTE_S2_RDWR		 (_AT(pteval_t, 2) << 6)   /* HAP[2:1] */ + +/* + * Hyp-mode PL2 PTE definitions for LPAE. + */ +#define L_PTE_HYP		L_PTE_USER +  #ifndef __ASSEMBLY__  #define pud_none(pud)		(!pud_val(pud))  #define pud_bad(pud)		(!(pud_val(pud) & 2))  #define pud_present(pud)	(pud_val(pud)) +#define pmd_table(pmd)		((pmd_val(pmd) & PMD_TYPE_MASK) == \ +						 PMD_TYPE_TABLE) +#define pmd_sect(pmd)		((pmd_val(pmd) & PMD_TYPE_MASK) == \ +						 PMD_TYPE_SECT)  #define pud_clear(pudp)			\  	do {				\ diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index 9c82f988c0e..f30ac3b55ba 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h @@ -70,6 +70,9 @@ extern void __pgd_error(const char *file, int line, pgd_t);  extern pgprot_t		pgprot_user;  extern pgprot_t		pgprot_kernel; +extern pgprot_t		pgprot_hyp_device; +extern pgprot_t		pgprot_s2; +extern pgprot_t		pgprot_s2_device;  #define _MOD_PROT(p, b)	__pgprot(pgprot_val(p) | (b)) @@ -82,6 +85,10 @@ extern pgprot_t		pgprot_kernel;  #define PAGE_READONLY_EXEC	_MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY)  #define PAGE_KERNEL		_MOD_PROT(pgprot_kernel, L_PTE_XN)  #define PAGE_KERNEL_EXEC	pgprot_kernel +#define PAGE_HYP		_MOD_PROT(pgprot_kernel, L_PTE_HYP) +#define PAGE_HYP_DEVICE		_MOD_PROT(pgprot_hyp_device, L_PTE_HYP) +#define PAGE_S2			_MOD_PROT(pgprot_s2, L_PTE_S2_RDONLY) +#define PAGE_S2_DEVICE		_MOD_PROT(pgprot_s2_device, L_PTE_USER | L_PTE_S2_RDONLY)  #define __PAGE_NONE		__pgprot(_L_PTE_DEFAULT | L_PTE_RDONLY | L_PTE_XN | L_PTE_NONE)  #define __PAGE_SHARED		__pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_XN) diff --git a/arch/arm/include/asm/psci.h b/arch/arm/include/asm/psci.h new file mode 100644 index 00000000000..ce0dbe7c162 --- /dev/null +++ b/arch/arm/include/asm/psci.h @@ -0,0 +1,36 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + * Copyright (C) 2012 ARM Limited + */ + +#ifndef __ASM_ARM_PSCI_H +#define __ASM_ARM_PSCI_H + +#define PSCI_POWER_STATE_TYPE_STANDBY		0 +#define PSCI_POWER_STATE_TYPE_POWER_DOWN	1 + +struct psci_power_state { +	u16	id; +	u8	type; +	u8	affinity_level; +}; + +struct psci_operations { +	int (*cpu_suspend)(struct psci_power_state state, +			   unsigned long entry_point); +	int (*cpu_off)(struct psci_power_state state); +	int (*cpu_on)(unsigned long cpuid, unsigned long entry_point); +	int (*migrate)(unsigned long cpuid); +}; + +extern struct psci_operations psci_ops; + +#endif /* __ASM_ARM_PSCI_H */ diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h index b4ca707d0a6..6220e9fdf4c 100644 --- a/arch/arm/include/asm/spinlock.h +++ b/arch/arm/include/asm/spinlock.h @@ -119,22 +119,8 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)  static inline void arch_spin_unlock(arch_spinlock_t *lock)  { -	unsigned long tmp; -	u32 slock; -  	smp_mb(); - -	__asm__ __volatile__( -"	mov	%1, #1\n" -"1:	ldrex	%0, [%2]\n" -"	uadd16	%0, %0, %1\n" -"	strex	%1, %0, [%2]\n" -"	teq	%1, #0\n" -"	bne	1b" -	: "=&r" (slock), "=&r" (tmp) -	: "r" (&lock->slock) -	: "cc"); - +	lock->tickets.owner++;  	dsb_sev();  } diff --git a/arch/arm/include/asm/virt.h b/arch/arm/include/asm/virt.h index 86164df86cb..50af92bac73 100644 --- a/arch/arm/include/asm/virt.h +++ b/arch/arm/include/asm/virt.h @@ -24,9 +24,9 @@  /*   * Flag indicating that the kernel was not entered in the same mode on every   * CPU.  The zImage loader stashes this value in an SPSR, so we need an - * architecturally defined flag bit here (the N flag, as it happens) + * architecturally defined flag bit here.   */ -#define BOOT_CPU_MODE_MISMATCH (1<<31) +#define BOOT_CPU_MODE_MISMATCH	PSR_N_BIT  #ifndef __ASSEMBLY__ diff --git a/arch/arm/include/uapi/asm/kvm.h b/arch/arm/include/uapi/asm/kvm.h new file mode 100644 index 00000000000..3303ff5adbf --- /dev/null +++ b/arch/arm/include/uapi/asm/kvm.h @@ -0,0 +1,164 @@ +/* + * Copyright (C) 2012 - Virtual Open Systems and Columbia University + * Author: Christoffer Dall <c.dall@virtualopensystems.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA. + */ + +#ifndef __ARM_KVM_H__ +#define __ARM_KVM_H__ + +#include <linux/types.h> +#include <asm/ptrace.h> + +#define __KVM_HAVE_GUEST_DEBUG +#define __KVM_HAVE_IRQ_LINE + +#define KVM_REG_SIZE(id)						\ +	(1U << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT)) + +/* Valid for svc_regs, abt_regs, und_regs, irq_regs in struct kvm_regs */ +#define KVM_ARM_SVC_sp		svc_regs[0] +#define KVM_ARM_SVC_lr		svc_regs[1] +#define KVM_ARM_SVC_spsr	svc_regs[2] +#define KVM_ARM_ABT_sp		abt_regs[0] +#define KVM_ARM_ABT_lr		abt_regs[1] +#define KVM_ARM_ABT_spsr	abt_regs[2] +#define KVM_ARM_UND_sp		und_regs[0] +#define KVM_ARM_UND_lr		und_regs[1] +#define KVM_ARM_UND_spsr	und_regs[2] +#define KVM_ARM_IRQ_sp		irq_regs[0] +#define KVM_ARM_IRQ_lr		irq_regs[1] +#define KVM_ARM_IRQ_spsr	irq_regs[2] + +/* Valid only for fiq_regs in struct kvm_regs */ +#define KVM_ARM_FIQ_r8		fiq_regs[0] +#define KVM_ARM_FIQ_r9		fiq_regs[1] +#define KVM_ARM_FIQ_r10		fiq_regs[2] +#define KVM_ARM_FIQ_fp		fiq_regs[3] +#define KVM_ARM_FIQ_ip		fiq_regs[4] +#define KVM_ARM_FIQ_sp		fiq_regs[5] +#define KVM_ARM_FIQ_lr		fiq_regs[6] +#define KVM_ARM_FIQ_spsr	fiq_regs[7] + +struct kvm_regs { +	struct pt_regs usr_regs;/* R0_usr - R14_usr, PC, CPSR */ +	__u32 svc_regs[3];	/* SP_svc, LR_svc, SPSR_svc */ +	__u32 abt_regs[3];	/* SP_abt, LR_abt, SPSR_abt */ +	__u32 und_regs[3];	/* SP_und, LR_und, SPSR_und */ +	__u32 irq_regs[3];	/* SP_irq, LR_irq, SPSR_irq */ +	__u32 fiq_regs[8];	/* R8_fiq - R14_fiq, SPSR_fiq */ +}; + +/* Supported Processor Types */ +#define KVM_ARM_TARGET_CORTEX_A15	0 +#define KVM_ARM_NUM_TARGETS		1 + +#define KVM_ARM_VCPU_POWER_OFF		0 /* CPU is started in OFF state */ + +struct kvm_vcpu_init { +	__u32 target; +	__u32 features[7]; +}; + +struct kvm_sregs { +}; + +struct kvm_fpu { +}; + +struct kvm_guest_debug_arch { +}; + +struct kvm_debug_exit_arch { +}; + +struct kvm_sync_regs { +}; + +struct kvm_arch_memory_slot { +}; + +/* If you need to interpret the index values, here is the key: */ +#define KVM_REG_ARM_COPROC_MASK		0x000000000FFF0000 +#define KVM_REG_ARM_COPROC_SHIFT	16 +#define KVM_REG_ARM_32_OPC2_MASK	0x0000000000000007 +#define KVM_REG_ARM_32_OPC2_SHIFT	0 +#define KVM_REG_ARM_OPC1_MASK		0x0000000000000078 +#define KVM_REG_ARM_OPC1_SHIFT		3 +#define KVM_REG_ARM_CRM_MASK		0x0000000000000780 +#define KVM_REG_ARM_CRM_SHIFT		7 +#define KVM_REG_ARM_32_CRN_MASK		0x0000000000007800 +#define KVM_REG_ARM_32_CRN_SHIFT	11 + +/* Normal registers are mapped as coprocessor 16. */ +#define KVM_REG_ARM_CORE		(0x0010 << KVM_REG_ARM_COPROC_SHIFT) +#define KVM_REG_ARM_CORE_REG(name)	(offsetof(struct kvm_regs, name) / 4) + +/* Some registers need more space to represent values. */ +#define KVM_REG_ARM_DEMUX		(0x0011 << KVM_REG_ARM_COPROC_SHIFT) +#define KVM_REG_ARM_DEMUX_ID_MASK	0x000000000000FF00 +#define KVM_REG_ARM_DEMUX_ID_SHIFT	8 +#define KVM_REG_ARM_DEMUX_ID_CCSIDR	(0x00 << KVM_REG_ARM_DEMUX_ID_SHIFT) +#define KVM_REG_ARM_DEMUX_VAL_MASK	0x00000000000000FF +#define KVM_REG_ARM_DEMUX_VAL_SHIFT	0 + +/* VFP registers: we could overload CP10 like ARM does, but that's ugly. */ +#define KVM_REG_ARM_VFP			(0x0012 << KVM_REG_ARM_COPROC_SHIFT) +#define KVM_REG_ARM_VFP_MASK		0x000000000000FFFF +#define KVM_REG_ARM_VFP_BASE_REG	0x0 +#define KVM_REG_ARM_VFP_FPSID		0x1000 +#define KVM_REG_ARM_VFP_FPSCR		0x1001 +#define KVM_REG_ARM_VFP_MVFR1		0x1006 +#define KVM_REG_ARM_VFP_MVFR0		0x1007 +#define KVM_REG_ARM_VFP_FPEXC		0x1008 +#define KVM_REG_ARM_VFP_FPINST		0x1009 +#define KVM_REG_ARM_VFP_FPINST2		0x100A + + +/* KVM_IRQ_LINE irq field index values */ +#define KVM_ARM_IRQ_TYPE_SHIFT		24 +#define KVM_ARM_IRQ_TYPE_MASK		0xff +#define KVM_ARM_IRQ_VCPU_SHIFT		16 +#define KVM_ARM_IRQ_VCPU_MASK		0xff +#define KVM_ARM_IRQ_NUM_SHIFT		0 +#define KVM_ARM_IRQ_NUM_MASK		0xffff + +/* irq_type field */ +#define KVM_ARM_IRQ_TYPE_CPU		0 +#define KVM_ARM_IRQ_TYPE_SPI		1 +#define KVM_ARM_IRQ_TYPE_PPI		2 + +/* out-of-kernel GIC cpu interrupt injection irq_number field */ +#define KVM_ARM_IRQ_CPU_IRQ		0 +#define KVM_ARM_IRQ_CPU_FIQ		1 + +/* Highest supported SPI, from VGIC_NR_IRQS */ +#define KVM_ARM_IRQ_GIC_MAX		127 + +/* PSCI interface */ +#define KVM_PSCI_FN_BASE		0x95c1ba5e +#define KVM_PSCI_FN(n)			(KVM_PSCI_FN_BASE + (n)) + +#define KVM_PSCI_FN_CPU_SUSPEND		KVM_PSCI_FN(0) +#define KVM_PSCI_FN_CPU_OFF		KVM_PSCI_FN(1) +#define KVM_PSCI_FN_CPU_ON		KVM_PSCI_FN(2) +#define KVM_PSCI_FN_MIGRATE		KVM_PSCI_FN(3) + +#define KVM_PSCI_RET_SUCCESS		0 +#define KVM_PSCI_RET_NI			((unsigned long)-1) +#define KVM_PSCI_RET_INVAL		((unsigned long)-2) +#define KVM_PSCI_RET_DENIED		((unsigned long)-3) + +#endif /* __ARM_KVM_H__ */ diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile index 5bbec7b8183..5f3338eacad 100644 --- a/arch/arm/kernel/Makefile +++ b/arch/arm/kernel/Makefile @@ -82,5 +82,6 @@ obj-$(CONFIG_DEBUG_LL)	+= debug.o  obj-$(CONFIG_EARLY_PRINTK)	+= early_printk.o  obj-$(CONFIG_ARM_VIRT_EXT)	+= hyp-stub.o +obj-$(CONFIG_ARM_PSCI)		+= psci.o  extra-y := $(head-y) vmlinux.lds diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c index c985b481192..c8b3272dfed 100644 --- a/arch/arm/kernel/asm-offsets.c +++ b/arch/arm/kernel/asm-offsets.c @@ -13,6 +13,9 @@  #include <linux/sched.h>  #include <linux/mm.h>  #include <linux/dma-mapping.h> +#ifdef CONFIG_KVM_ARM_HOST +#include <linux/kvm_host.h> +#endif  #include <asm/cacheflush.h>  #include <asm/glue-df.h>  #include <asm/glue-pf.h> @@ -146,5 +149,27 @@ int main(void)    DEFINE(DMA_BIDIRECTIONAL,	DMA_BIDIRECTIONAL);    DEFINE(DMA_TO_DEVICE,		DMA_TO_DEVICE);    DEFINE(DMA_FROM_DEVICE,	DMA_FROM_DEVICE); +#ifdef CONFIG_KVM_ARM_HOST +  DEFINE(VCPU_KVM,		offsetof(struct kvm_vcpu, kvm)); +  DEFINE(VCPU_MIDR,		offsetof(struct kvm_vcpu, arch.midr)); +  DEFINE(VCPU_CP15,		offsetof(struct kvm_vcpu, arch.cp15)); +  DEFINE(VCPU_VFP_GUEST,	offsetof(struct kvm_vcpu, arch.vfp_guest)); +  DEFINE(VCPU_VFP_HOST,		offsetof(struct kvm_vcpu, arch.vfp_host)); +  DEFINE(VCPU_REGS,		offsetof(struct kvm_vcpu, arch.regs)); +  DEFINE(VCPU_USR_REGS,		offsetof(struct kvm_vcpu, arch.regs.usr_regs)); +  DEFINE(VCPU_SVC_REGS,		offsetof(struct kvm_vcpu, arch.regs.svc_regs)); +  DEFINE(VCPU_ABT_REGS,		offsetof(struct kvm_vcpu, arch.regs.abt_regs)); +  DEFINE(VCPU_UND_REGS,		offsetof(struct kvm_vcpu, arch.regs.und_regs)); +  DEFINE(VCPU_IRQ_REGS,		offsetof(struct kvm_vcpu, arch.regs.irq_regs)); +  DEFINE(VCPU_FIQ_REGS,		offsetof(struct kvm_vcpu, arch.regs.fiq_regs)); +  DEFINE(VCPU_PC,		offsetof(struct kvm_vcpu, arch.regs.usr_regs.ARM_pc)); +  DEFINE(VCPU_CPSR,		offsetof(struct kvm_vcpu, arch.regs.usr_regs.ARM_cpsr)); +  DEFINE(VCPU_IRQ_LINES,	offsetof(struct kvm_vcpu, arch.irq_lines)); +  DEFINE(VCPU_HSR,		offsetof(struct kvm_vcpu, arch.hsr)); +  DEFINE(VCPU_HxFAR,		offsetof(struct kvm_vcpu, arch.hxfar)); +  DEFINE(VCPU_HPFAR,		offsetof(struct kvm_vcpu, arch.hpfar)); +  DEFINE(VCPU_HYP_PC,		offsetof(struct kvm_vcpu, arch.hyp_pc)); +  DEFINE(KVM_VTTBR,		offsetof(struct kvm, arch.vttbr)); +#endif    return 0;   } diff --git a/arch/arm/kernel/bios32.c b/arch/arm/kernel/bios32.c index 379cf329239..a1f73b502ef 100644 --- a/arch/arm/kernel/bios32.c +++ b/arch/arm/kernel/bios32.c @@ -413,7 +413,7 @@ static int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)  	return irq;  } -static int __init pcibios_init_resources(int busnr, struct pci_sys_data *sys) +static int pcibios_init_resources(int busnr, struct pci_sys_data *sys)  {  	int ret;  	struct pci_host_bridge_window *window; @@ -445,7 +445,7 @@ static int __init pcibios_init_resources(int busnr, struct pci_sys_data *sys)  	return 0;  } -static void __init pcibios_init_hw(struct hw_pci *hw, struct list_head *head) +static void pcibios_init_hw(struct hw_pci *hw, struct list_head *head)  {  	struct pci_sys_data *sys = NULL;  	int ret; @@ -464,6 +464,9 @@ static void __init pcibios_init_hw(struct hw_pci *hw, struct list_head *head)  		sys->map_irq = hw->map_irq;  		INIT_LIST_HEAD(&sys->resources); +		if (hw->private_data) +			sys->private_data = hw->private_data[nr]; +  		ret = hw->setup(nr, sys);  		if (ret > 0) { @@ -493,7 +496,7 @@ static void __init pcibios_init_hw(struct hw_pci *hw, struct list_head *head)  	}  } -void __init pci_common_init(struct hw_pci *hw) +void pci_common_init(struct hw_pci *hw)  {  	struct pci_sys_data *sys;  	LIST_HEAD(head); diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c index 5ff2e77782b..5eae53e7a2e 100644 --- a/arch/arm/kernel/hw_breakpoint.c +++ b/arch/arm/kernel/hw_breakpoint.c @@ -28,6 +28,7 @@  #include <linux/perf_event.h>  #include <linux/hw_breakpoint.h>  #include <linux/smp.h> +#include <linux/cpu_pm.h>  #include <asm/cacheflush.h>  #include <asm/cputype.h> @@ -35,6 +36,7 @@  #include <asm/hw_breakpoint.h>  #include <asm/kdebug.h>  #include <asm/traps.h> +#include <asm/hardware/coresight.h>  /* Breakpoint currently in use for each BRP. */  static DEFINE_PER_CPU(struct perf_event *, bp_on_reg[ARM_MAX_BRP]); @@ -49,6 +51,9 @@ static int core_num_wrps;  /* Debug architecture version. */  static u8 debug_arch; +/* Does debug architecture support OS Save and Restore? */ +static bool has_ossr; +  /* Maximum supported watchpoint length. */  static u8 max_watchpoint_len; @@ -903,6 +908,23 @@ static struct undef_hook debug_reg_hook = {  	.fn		= debug_reg_trap,  }; +/* Does this core support OS Save and Restore? */ +static bool core_has_os_save_restore(void) +{ +	u32 oslsr; + +	switch (get_debug_arch()) { +	case ARM_DEBUG_ARCH_V7_1: +		return true; +	case ARM_DEBUG_ARCH_V7_ECP14: +		ARM_DBG_READ(c1, c1, 4, oslsr); +		if (oslsr & ARM_OSLSR_OSLM0) +			return true; +	default: +		return false; +	} +} +  static void reset_ctrl_regs(void *unused)  {  	int i, raw_num_brps, err = 0, cpu = smp_processor_id(); @@ -930,11 +952,7 @@ static void reset_ctrl_regs(void *unused)  		if ((val & 0x1) == 0)  			err = -EPERM; -		/* -		 * Check whether we implement OS save and restore. -		 */ -		ARM_DBG_READ(c1, c1, 4, val); -		if ((val & 0x9) == 0) +		if (!has_ossr)  			goto clear_vcr;  		break;  	case ARM_DEBUG_ARCH_V7_1: @@ -955,9 +973,9 @@ static void reset_ctrl_regs(void *unused)  	/*  	 * Unconditionally clear the OS lock by writing a value -	 * other than 0xC5ACCE55 to the access register. +	 * other than CS_LAR_KEY to the access register.  	 */ -	ARM_DBG_WRITE(c1, c0, 4, 0); +	ARM_DBG_WRITE(c1, c0, 4, ~CS_LAR_KEY);  	isb();  	/* @@ -1015,6 +1033,30 @@ static struct notifier_block __cpuinitdata dbg_reset_nb = {  	.notifier_call = dbg_reset_notify,  }; +#ifdef CONFIG_CPU_PM +static int dbg_cpu_pm_notify(struct notifier_block *self, unsigned long action, +			     void *v) +{ +	if (action == CPU_PM_EXIT) +		reset_ctrl_regs(NULL); + +	return NOTIFY_OK; +} + +static struct notifier_block __cpuinitdata dbg_cpu_pm_nb = { +	.notifier_call = dbg_cpu_pm_notify, +}; + +static void __init pm_init(void) +{ +	cpu_pm_register_notifier(&dbg_cpu_pm_nb); +} +#else +static inline void pm_init(void) +{ +} +#endif +  static int __init arch_hw_breakpoint_init(void)  {  	debug_arch = get_debug_arch(); @@ -1024,6 +1066,8 @@ static int __init arch_hw_breakpoint_init(void)  		return 0;  	} +	has_ossr = core_has_os_save_restore(); +  	/* Determine how many BRPs/WRPs are available. */  	core_num_brps = get_num_brps();  	core_num_wrps = get_num_wrps(); @@ -1062,8 +1106,9 @@ static int __init arch_hw_breakpoint_init(void)  	hook_ifault_code(FAULT_CODE_DEBUG, hw_breakpoint_pending, SIGTRAP,  			TRAP_HWBKPT, "breakpoint debug exception"); -	/* Register hotplug notifier. */ +	/* Register hotplug and PM notifiers. */  	register_cpu_notifier(&dbg_reset_nb); +	pm_init();  	return 0;  }  arch_initcall(arch_hw_breakpoint_init); diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index f9e8657dd24..31e0eb353cd 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c @@ -149,12 +149,6 @@ again:  static void  armpmu_read(struct perf_event *event)  { -	struct hw_perf_event *hwc = &event->hw; - -	/* Don't read disabled counters! */ -	if (hwc->idx < 0) -		return; -  	armpmu_event_update(event);  } @@ -207,8 +201,6 @@ armpmu_del(struct perf_event *event, int flags)  	struct hw_perf_event *hwc = &event->hw;  	int idx = hwc->idx; -	WARN_ON(idx < 0); -  	armpmu_stop(event, PERF_EF_UPDATE);  	hw_events->events[idx] = NULL;  	clear_bit(idx, hw_events->used_mask); @@ -358,7 +350,7 @@ __hw_perf_event_init(struct perf_event *event)  {  	struct arm_pmu *armpmu = to_arm_pmu(event->pmu);  	struct hw_perf_event *hwc = &event->hw; -	int mapping, err; +	int mapping;  	mapping = armpmu->map_event(event); @@ -407,14 +399,12 @@ __hw_perf_event_init(struct perf_event *event)  		local64_set(&hwc->period_left, hwc->sample_period);  	} -	err = 0;  	if (event->group_leader != event) { -		err = validate_group(event); -		if (err) +		if (validate_group(event) != 0);  			return -EINVAL;  	} -	return err; +	return 0;  }  static int armpmu_event_init(struct perf_event *event) diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c index 5f6620684e2..1f2740e3dbc 100644 --- a/arch/arm/kernel/perf_event_cpu.c +++ b/arch/arm/kernel/perf_event_cpu.c @@ -147,7 +147,7 @@ static void cpu_pmu_init(struct arm_pmu *cpu_pmu)  	cpu_pmu->free_irq	= cpu_pmu_free_irq;  	/* Ensure the PMU has sane values out of reset. */ -	if (cpu_pmu && cpu_pmu->reset) +	if (cpu_pmu->reset)  		on_each_cpu(cpu_pmu->reset, cpu_pmu, 1);  } @@ -201,48 +201,46 @@ static struct platform_device_id cpu_pmu_plat_device_ids[] = {  static int probe_current_pmu(struct arm_pmu *pmu)  {  	int cpu = get_cpu(); -	unsigned long cpuid = read_cpuid_id(); -	unsigned long implementor = (cpuid & 0xFF000000) >> 24; -	unsigned long part_number = (cpuid & 0xFFF0); +	unsigned long implementor = read_cpuid_implementor(); +	unsigned long part_number = read_cpuid_part_number();  	int ret = -ENODEV;  	pr_info("probing PMU on CPU %d\n", cpu);  	/* ARM Ltd CPUs. */ -	if (0x41 == implementor) { +	if (implementor == ARM_CPU_IMP_ARM) {  		switch (part_number) { -		case 0xB360:	/* ARM1136 */ -		case 0xB560:	/* ARM1156 */ -		case 0xB760:	/* ARM1176 */ +		case ARM_CPU_PART_ARM1136: +		case ARM_CPU_PART_ARM1156: +		case ARM_CPU_PART_ARM1176:  			ret = armv6pmu_init(pmu);  			break; -		case 0xB020:	/* ARM11mpcore */ +		case ARM_CPU_PART_ARM11MPCORE:  			ret = armv6mpcore_pmu_init(pmu);  			break; -		case 0xC080:	/* Cortex-A8 */ +		case ARM_CPU_PART_CORTEX_A8:  			ret = armv7_a8_pmu_init(pmu);  			break; -		case 0xC090:	/* Cortex-A9 */ +		case ARM_CPU_PART_CORTEX_A9:  			ret = armv7_a9_pmu_init(pmu);  			break; -		case 0xC050:	/* Cortex-A5 */ +		case ARM_CPU_PART_CORTEX_A5:  			ret = armv7_a5_pmu_init(pmu);  			break; -		case 0xC0F0:	/* Cortex-A15 */ +		case ARM_CPU_PART_CORTEX_A15:  			ret = armv7_a15_pmu_init(pmu);  			break; -		case 0xC070:	/* Cortex-A7 */ +		case ARM_CPU_PART_CORTEX_A7:  			ret = armv7_a7_pmu_init(pmu);  			break;  		}  	/* Intel CPUs [xscale]. */ -	} else if (0x69 == implementor) { -		part_number = (cpuid >> 13) & 0x7; -		switch (part_number) { -		case 1: +	} else if (implementor == ARM_CPU_IMP_INTEL) { +		switch (xscale_cpu_arch_version()) { +		case ARM_CPU_XSCALE_ARCH_V1:  			ret = xscale1pmu_init(pmu);  			break; -		case 2: +		case ARM_CPU_XSCALE_ARCH_V2:  			ret = xscale2pmu_init(pmu);  			break;  		} @@ -279,17 +277,22 @@ static int cpu_pmu_device_probe(struct platform_device *pdev)  	}  	if (ret) { -		pr_info("failed to register PMU devices!"); -		kfree(pmu); -		return ret; +		pr_info("failed to probe PMU!"); +		goto out_free;  	}  	cpu_pmu = pmu;  	cpu_pmu->plat_device = pdev;  	cpu_pmu_init(cpu_pmu); -	armpmu_register(cpu_pmu, PERF_TYPE_RAW); +	ret = armpmu_register(cpu_pmu, PERF_TYPE_RAW); -	return 0; +	if (!ret) +		return 0; + +out_free: +	pr_info("failed to register PMU devices!"); +	kfree(pmu); +	return ret;  }  static struct platform_driver cpu_pmu_driver = { diff --git a/arch/arm/kernel/perf_event_v6.c b/arch/arm/kernel/perf_event_v6.c index 041d0526a28..03664b0e8fa 100644 --- a/arch/arm/kernel/perf_event_v6.c +++ b/arch/arm/kernel/perf_event_v6.c @@ -106,7 +106,7 @@ static const unsigned armv6_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]  		},  		[C(OP_WRITE)] = {  			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED, -			[C(RESULT_MISS)]	= ARMV6_PERFCTR_ICACHE_MISS, +			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,  		},  		[C(OP_PREFETCH)] = {  			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED, @@ -259,7 +259,7 @@ static const unsigned armv6mpcore_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]  		},  		[C(OP_WRITE)] = {  			[C(RESULT_ACCESS)]  = CACHE_OP_UNSUPPORTED, -			[C(RESULT_MISS)]    = ARMV6MPCORE_PERFCTR_ICACHE_MISS, +			[C(RESULT_MISS)]    = CACHE_OP_UNSUPPORTED,  		},  		[C(OP_PREFETCH)] = {  			[C(RESULT_ACCESS)]  = CACHE_OP_UNSUPPORTED, diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c index 4fbc757d9cf..8c79a9e70b8 100644 --- a/arch/arm/kernel/perf_event_v7.c +++ b/arch/arm/kernel/perf_event_v7.c @@ -157,8 +157,8 @@ static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]  			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_ICACHE_REFILL,  		},  		[C(OP_WRITE)] = { -			[C(RESULT_ACCESS)]	= ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS, -			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_ICACHE_REFILL, +			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED, +			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,  		},  		[C(OP_PREFETCH)] = {  			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED, @@ -282,7 +282,7 @@ static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]  		},  		[C(OP_WRITE)] = {  			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED, -			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_ICACHE_REFILL, +			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,  		},  		[C(OP_PREFETCH)] = {  			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED, @@ -399,8 +399,8 @@ static const unsigned armv7_a5_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]  			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_ICACHE_REFILL,  		},  		[C(OP_WRITE)] = { -			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_ICACHE_ACCESS, -			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_ICACHE_REFILL, +			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED, +			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,  		},  		/*  		 * The prefetch counters don't differentiate between the I @@ -527,8 +527,8 @@ static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]  			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_ICACHE_REFILL,  		},  		[C(OP_WRITE)] = { -			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_ICACHE_ACCESS, -			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_ICACHE_REFILL, +			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED, +			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,  		},  		[C(OP_PREFETCH)] = {  			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED, @@ -651,8 +651,8 @@ static const unsigned armv7_a7_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]  			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_ICACHE_REFILL,  		},  		[C(OP_WRITE)] = { -			[C(RESULT_ACCESS)]	= ARMV7_PERFCTR_L1_ICACHE_ACCESS, -			[C(RESULT_MISS)]	= ARMV7_PERFCTR_L1_ICACHE_REFILL, +			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED, +			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,  		},  		[C(OP_PREFETCH)] = {  			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED, diff --git a/arch/arm/kernel/perf_event_xscale.c b/arch/arm/kernel/perf_event_xscale.c index 2b0fe30ec12..63990c42fac 100644 --- a/arch/arm/kernel/perf_event_xscale.c +++ b/arch/arm/kernel/perf_event_xscale.c @@ -83,7 +83,7 @@ static const unsigned xscale_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]  		},  		[C(OP_WRITE)] = {  			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED, -			[C(RESULT_MISS)]	= XSCALE_PERFCTR_ICACHE_MISS, +			[C(RESULT_MISS)]	= CACHE_OP_UNSUPPORTED,  		},  		[C(OP_PREFETCH)] = {  			[C(RESULT_ACCESS)]	= CACHE_OP_UNSUPPORTED, diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index c6dec5fc20a..047d3e40e47 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -172,14 +172,9 @@ static void default_idle(void)  	local_irq_enable();  } -void (*pm_idle)(void) = default_idle; -EXPORT_SYMBOL(pm_idle); -  /* - * The idle thread, has rather strange semantics for calling pm_idle, - * but this is what x86 does and we need to do the same, so that - * things like cpuidle get called in the same way.  The only difference - * is that we always respect 'hlt_counter' to prevent low power idle. + * The idle thread. + * We always respect 'hlt_counter' to prevent low power idle.   */  void cpu_idle(void)  { @@ -210,10 +205,10 @@ void cpu_idle(void)  			} else if (!need_resched()) {  				stop_critical_timings();  				if (cpuidle_idle_call()) -					pm_idle(); +					default_idle();  				start_critical_timings();  				/* -				 * pm_idle functions must always +				 * default_idle functions must always  				 * return with IRQs enabled.  				 */  				WARN_ON(irqs_disabled()); diff --git a/arch/arm/kernel/psci.c b/arch/arm/kernel/psci.c new file mode 100644 index 00000000000..36531643cc2 --- /dev/null +++ b/arch/arm/kernel/psci.c @@ -0,0 +1,211 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + * Copyright (C) 2012 ARM Limited + * + * Author: Will Deacon <will.deacon@arm.com> + */ + +#define pr_fmt(fmt) "psci: " fmt + +#include <linux/init.h> +#include <linux/of.h> + +#include <asm/compiler.h> +#include <asm/errno.h> +#include <asm/opcodes-sec.h> +#include <asm/opcodes-virt.h> +#include <asm/psci.h> + +struct psci_operations psci_ops; + +static int (*invoke_psci_fn)(u32, u32, u32, u32); + +enum psci_function { +	PSCI_FN_CPU_SUSPEND, +	PSCI_FN_CPU_ON, +	PSCI_FN_CPU_OFF, +	PSCI_FN_MIGRATE, +	PSCI_FN_MAX, +}; + +static u32 psci_function_id[PSCI_FN_MAX]; + +#define PSCI_RET_SUCCESS		0 +#define PSCI_RET_EOPNOTSUPP		-1 +#define PSCI_RET_EINVAL			-2 +#define PSCI_RET_EPERM			-3 + +static int psci_to_linux_errno(int errno) +{ +	switch (errno) { +	case PSCI_RET_SUCCESS: +		return 0; +	case PSCI_RET_EOPNOTSUPP: +		return -EOPNOTSUPP; +	case PSCI_RET_EINVAL: +		return -EINVAL; +	case PSCI_RET_EPERM: +		return -EPERM; +	}; + +	return -EINVAL; +} + +#define PSCI_POWER_STATE_ID_MASK	0xffff +#define PSCI_POWER_STATE_ID_SHIFT	0 +#define PSCI_POWER_STATE_TYPE_MASK	0x1 +#define PSCI_POWER_STATE_TYPE_SHIFT	16 +#define PSCI_POWER_STATE_AFFL_MASK	0x3 +#define PSCI_POWER_STATE_AFFL_SHIFT	24 + +static u32 psci_power_state_pack(struct psci_power_state state) +{ +	return	((state.id & PSCI_POWER_STATE_ID_MASK) +			<< PSCI_POWER_STATE_ID_SHIFT)	| +		((state.type & PSCI_POWER_STATE_TYPE_MASK) +			<< PSCI_POWER_STATE_TYPE_SHIFT)	| +		((state.affinity_level & PSCI_POWER_STATE_AFFL_MASK) +			<< PSCI_POWER_STATE_AFFL_SHIFT); +} + +/* + * The following two functions are invoked via the invoke_psci_fn pointer + * and will not be inlined, allowing us to piggyback on the AAPCS. + */ +static noinline int __invoke_psci_fn_hvc(u32 function_id, u32 arg0, u32 arg1, +					 u32 arg2) +{ +	asm volatile( +			__asmeq("%0", "r0") +			__asmeq("%1", "r1") +			__asmeq("%2", "r2") +			__asmeq("%3", "r3") +			__HVC(0) +		: "+r" (function_id) +		: "r" (arg0), "r" (arg1), "r" (arg2)); + +	return function_id; +} + +static noinline int __invoke_psci_fn_smc(u32 function_id, u32 arg0, u32 arg1, +					 u32 arg2) +{ +	asm volatile( +			__asmeq("%0", "r0") +			__asmeq("%1", "r1") +			__asmeq("%2", "r2") +			__asmeq("%3", "r3") +			__SMC(0) +		: "+r" (function_id) +		: "r" (arg0), "r" (arg1), "r" (arg2)); + +	return function_id; +} + +static int psci_cpu_suspend(struct psci_power_state state, +			    unsigned long entry_point) +{ +	int err; +	u32 fn, power_state; + +	fn = psci_function_id[PSCI_FN_CPU_SUSPEND]; +	power_state = psci_power_state_pack(state); +	err = invoke_psci_fn(fn, power_state, entry_point, 0); +	return psci_to_linux_errno(err); +} + +static int psci_cpu_off(struct psci_power_state state) +{ +	int err; +	u32 fn, power_state; + +	fn = psci_function_id[PSCI_FN_CPU_OFF]; +	power_state = psci_power_state_pack(state); +	err = invoke_psci_fn(fn, power_state, 0, 0); +	return psci_to_linux_errno(err); +} + +static int psci_cpu_on(unsigned long cpuid, unsigned long entry_point) +{ +	int err; +	u32 fn; + +	fn = psci_function_id[PSCI_FN_CPU_ON]; +	err = invoke_psci_fn(fn, cpuid, entry_point, 0); +	return psci_to_linux_errno(err); +} + +static int psci_migrate(unsigned long cpuid) +{ +	int err; +	u32 fn; + +	fn = psci_function_id[PSCI_FN_MIGRATE]; +	err = invoke_psci_fn(fn, cpuid, 0, 0); +	return psci_to_linux_errno(err); +} + +static const struct of_device_id psci_of_match[] __initconst = { +	{ .compatible = "arm,psci",	}, +	{}, +}; + +static int __init psci_init(void) +{ +	struct device_node *np; +	const char *method; +	u32 id; + +	np = of_find_matching_node(NULL, psci_of_match); +	if (!np) +		return 0; + +	pr_info("probing function IDs from device-tree\n"); + +	if (of_property_read_string(np, "method", &method)) { +		pr_warning("missing \"method\" property\n"); +		goto out_put_node; +	} + +	if (!strcmp("hvc", method)) { +		invoke_psci_fn = __invoke_psci_fn_hvc; +	} else if (!strcmp("smc", method)) { +		invoke_psci_fn = __invoke_psci_fn_smc; +	} else { +		pr_warning("invalid \"method\" property: %s\n", method); +		goto out_put_node; +	} + +	if (!of_property_read_u32(np, "cpu_suspend", &id)) { +		psci_function_id[PSCI_FN_CPU_SUSPEND] = id; +		psci_ops.cpu_suspend = psci_cpu_suspend; +	} + +	if (!of_property_read_u32(np, "cpu_off", &id)) { +		psci_function_id[PSCI_FN_CPU_OFF] = id; +		psci_ops.cpu_off = psci_cpu_off; +	} + +	if (!of_property_read_u32(np, "cpu_on", &id)) { +		psci_function_id[PSCI_FN_CPU_ON] = id; +		psci_ops.cpu_on = psci_cpu_on; +	} + +	if (!of_property_read_u32(np, "migrate", &id)) { +		psci_function_id[PSCI_FN_MIGRATE] = id; +		psci_ops.migrate = psci_migrate; +	} + +out_put_node: +	of_node_put(np); +	return 0; +} +early_initcall(psci_init); diff --git a/arch/arm/kernel/sched_clock.c b/arch/arm/kernel/sched_clock.c index fc6692e2b60..bd6f56b9ec2 100644 --- a/arch/arm/kernel/sched_clock.c +++ b/arch/arm/kernel/sched_clock.c @@ -93,11 +93,11 @@ static void notrace update_sched_clock(void)  	 * detectable in cyc_to_fixed_sched_clock().  	 */  	raw_local_irq_save(flags); -	cd.epoch_cyc = cyc; +	cd.epoch_cyc_copy = cyc;  	smp_wmb();  	cd.epoch_ns = ns;  	smp_wmb(); -	cd.epoch_cyc_copy = cyc; +	cd.epoch_cyc = cyc;  	raw_local_irq_restore(flags);  } diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 84f4cbf652e..365c8d92e2e 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -125,18 +125,6 @@ void __init smp_init_cpus(void)  		smp_ops.smp_init_cpus();  } -static void __init platform_smp_prepare_cpus(unsigned int max_cpus) -{ -	if (smp_ops.smp_prepare_cpus) -		smp_ops.smp_prepare_cpus(max_cpus); -} - -static void __cpuinit platform_secondary_init(unsigned int cpu) -{ -	if (smp_ops.smp_secondary_init) -		smp_ops.smp_secondary_init(cpu); -} -  int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)  {  	if (smp_ops.smp_boot_secondary) @@ -154,12 +142,6 @@ static int platform_cpu_kill(unsigned int cpu)  	return 1;  } -static void platform_cpu_die(unsigned int cpu) -{ -	if (smp_ops.cpu_die) -		smp_ops.cpu_die(cpu); -} -  static int platform_cpu_disable(unsigned int cpu)  {  	if (smp_ops.cpu_disable) @@ -257,7 +239,8 @@ void __ref cpu_die(void)  	 * actual CPU shutdown procedure is at least platform (if not  	 * CPU) specific.  	 */ -	platform_cpu_die(cpu); +	if (smp_ops.cpu_die) +		smp_ops.cpu_die(cpu);  	/*  	 * Do not return to the idle loop - jump back to the secondary @@ -324,7 +307,8 @@ asmlinkage void __cpuinit secondary_start_kernel(void)  	/*  	 * Give the platform a chance to do its own initialisation.  	 */ -	platform_secondary_init(cpu); +	if (smp_ops.smp_secondary_init) +		smp_ops.smp_secondary_init(cpu);  	notify_cpu_starting(cpu); @@ -399,8 +383,8 @@ void __init smp_prepare_cpus(unsigned int max_cpus)  		/*  		 * Initialise the present map, which describes the set of CPUs  		 * actually populated at the present time. A platform should -		 * re-initialize the map in platform_smp_prepare_cpus() if -		 * present != possible (e.g. physical hotplug). +		 * re-initialize the map in the platforms smp_prepare_cpus() +		 * if present != possible (e.g. physical hotplug).  		 */  		init_cpu_present(cpu_possible_mask); @@ -408,7 +392,8 @@ void __init smp_prepare_cpus(unsigned int max_cpus)  		 * Initialise the SCU if there are more than one CPU  		 * and let them know where to start.  		 */ -		platform_smp_prepare_cpus(max_cpus); +		if (smp_ops.smp_prepare_cpus) +			smp_ops.smp_prepare_cpus(max_cpus);  	}  } diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c index 49f335d301b..ae0c7bb39ae 100644 --- a/arch/arm/kernel/smp_twd.c +++ b/arch/arm/kernel/smp_twd.c @@ -31,7 +31,6 @@ static void __iomem *twd_base;  static struct clk *twd_clk;  static unsigned long twd_timer_rate; -static bool common_setup_called;  static DEFINE_PER_CPU(bool, percpu_setup_called);  static struct clock_event_device __percpu **twd_evt; @@ -239,25 +238,28 @@ static irqreturn_t twd_handler(int irq, void *dev_id)  	return IRQ_NONE;  } -static struct clk *twd_get_clock(void) +static void twd_get_clock(struct device_node *np)  { -	struct clk *clk;  	int err; -	clk = clk_get_sys("smp_twd", NULL); -	if (IS_ERR(clk)) { -		pr_err("smp_twd: clock not found: %d\n", (int)PTR_ERR(clk)); -		return clk; +	if (np) +		twd_clk = of_clk_get(np, 0); +	else +		twd_clk = clk_get_sys("smp_twd", NULL); + +	if (IS_ERR(twd_clk)) { +		pr_err("smp_twd: clock not found %d\n", (int) PTR_ERR(twd_clk)); +		return;  	} -	err = clk_prepare_enable(clk); +	err = clk_prepare_enable(twd_clk);  	if (err) {  		pr_err("smp_twd: clock failed to prepare+enable: %d\n", err); -		clk_put(clk); -		return ERR_PTR(err); +		clk_put(twd_clk); +		return;  	} -	return clk; +	twd_timer_rate = clk_get_rate(twd_clk);  }  /* @@ -280,26 +282,7 @@ static int __cpuinit twd_timer_setup(struct clock_event_device *clk)  	}  	per_cpu(percpu_setup_called, cpu) = true; -	/* -	 * This stuff only need to be done once for the entire TWD cluster -	 * during the runtime of the system. -	 */ -	if (!common_setup_called) { -		twd_clk = twd_get_clock(); - -		/* -		 * We use IS_ERR_OR_NULL() here, because if the clock stubs -		 * are active we will get a valid clk reference which is -		 * however NULL and will return the rate 0. In that case we -		 * need to calibrate the rate instead. -		 */ -		if (!IS_ERR_OR_NULL(twd_clk)) -			twd_timer_rate = clk_get_rate(twd_clk); -		else -			twd_calibrate_rate(); - -		common_setup_called = true; -	} +	twd_calibrate_rate();  	/*  	 * The following is done once per CPU the first time .setup() is @@ -330,7 +313,7 @@ static struct local_timer_ops twd_lt_ops __cpuinitdata = {  	.stop	= twd_timer_stop,  }; -static int __init twd_local_timer_common_register(void) +static int __init twd_local_timer_common_register(struct device_node *np)  {  	int err; @@ -350,6 +333,8 @@ static int __init twd_local_timer_common_register(void)  	if (err)  		goto out_irq; +	twd_get_clock(np); +  	return 0;  out_irq: @@ -373,7 +358,7 @@ int __init twd_local_timer_register(struct twd_local_timer *tlt)  	if (!twd_base)  		return -ENOMEM; -	return twd_local_timer_common_register(); +	return twd_local_timer_common_register(NULL);  }  #ifdef CONFIG_OF @@ -405,7 +390,7 @@ void __init twd_local_timer_of_register(void)  		goto out;  	} -	err = twd_local_timer_common_register(); +	err = twd_local_timer_common_register(np);  out:  	WARN(err, "twd_local_timer_of_register failed (%d)\n", err); diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S index 11c1785bf63..b571484e9f0 100644 --- a/arch/arm/kernel/vmlinux.lds.S +++ b/arch/arm/kernel/vmlinux.lds.S @@ -19,7 +19,11 @@  	ALIGN_FUNCTION();						\  	VMLINUX_SYMBOL(__idmap_text_start) = .;				\  	*(.idmap.text)							\ -	VMLINUX_SYMBOL(__idmap_text_end) = .; +	VMLINUX_SYMBOL(__idmap_text_end) = .;				\ +	ALIGN_FUNCTION();						\ +	VMLINUX_SYMBOL(__hyp_idmap_text_start) = .;			\ +	*(.hyp.idmap.text)						\ +	VMLINUX_SYMBOL(__hyp_idmap_text_end) = .;  #ifdef CONFIG_HOTPLUG_CPU  #define ARM_CPU_DISCARD(x) diff --git a/arch/arm/kvm/Kconfig b/arch/arm/kvm/Kconfig new file mode 100644 index 00000000000..05227cb57a7 --- /dev/null +++ b/arch/arm/kvm/Kconfig @@ -0,0 +1,56 @@ +# +# KVM configuration +# + +source "virt/kvm/Kconfig" + +menuconfig VIRTUALIZATION +	bool "Virtualization" +	---help--- +	  Say Y here to get to see options for using your Linux host to run +	  other operating systems inside virtual machines (guests). +	  This option alone does not add any kernel code. + +	  If you say N, all options in this submenu will be skipped and +	  disabled. + +if VIRTUALIZATION + +config KVM +	bool "Kernel-based Virtual Machine (KVM) support" +	select PREEMPT_NOTIFIERS +	select ANON_INODES +	select KVM_MMIO +	select KVM_ARM_HOST +	depends on ARM_VIRT_EXT && ARM_LPAE +	---help--- +	  Support hosting virtualized guest machines. You will also +	  need to select one or more of the processor modules below. + +	  This module provides access to the hardware capabilities through +	  a character device node named /dev/kvm. + +	  If unsure, say N. + +config KVM_ARM_HOST +	bool "KVM host support for ARM cpus." +	depends on KVM +	depends on MMU +	select	MMU_NOTIFIER +	---help--- +	  Provides host support for ARM processors. + +config KVM_ARM_MAX_VCPUS +	int "Number maximum supported virtual CPUs per VM" +	depends on KVM_ARM_HOST +	default 4 +	help +	  Static number of max supported virtual CPUs per VM. + +	  If you choose a high number, the vcpu structures will be quite +	  large, so only choose a reasonable number that you expect to +	  actually use. + +source drivers/virtio/Kconfig + +endif # VIRTUALIZATION diff --git a/arch/arm/kvm/Makefile b/arch/arm/kvm/Makefile new file mode 100644 index 00000000000..ea27987bd07 --- /dev/null +++ b/arch/arm/kvm/Makefile @@ -0,0 +1,21 @@ +# +# Makefile for Kernel-based Virtual Machine module +# + +plus_virt := $(call as-instr,.arch_extension virt,+virt) +ifeq ($(plus_virt),+virt) +	plus_virt_def := -DREQUIRES_VIRT=1 +endif + +ccflags-y += -Ivirt/kvm -Iarch/arm/kvm +CFLAGS_arm.o := -I. $(plus_virt_def) +CFLAGS_mmu.o := -I. + +AFLAGS_init.o := -Wa,-march=armv7-a$(plus_virt) +AFLAGS_interrupts.o := -Wa,-march=armv7-a$(plus_virt) + +kvm-arm-y = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o) + +obj-y += kvm-arm.o init.o interrupts.o +obj-y += arm.o guest.o mmu.o emulate.o reset.o +obj-y += coproc.o coproc_a15.o mmio.o psci.o diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c new file mode 100644 index 00000000000..2d30e3afdaf --- /dev/null +++ b/arch/arm/kvm/arm.c @@ -0,0 +1,1015 @@ +/* + * Copyright (C) 2012 - Virtual Open Systems and Columbia University + * Author: Christoffer Dall <c.dall@virtualopensystems.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA. + */ + +#include <linux/errno.h> +#include <linux/err.h> +#include <linux/kvm_host.h> +#include <linux/module.h> +#include <linux/vmalloc.h> +#include <linux/fs.h> +#include <linux/mman.h> +#include <linux/sched.h> +#include <linux/kvm.h> +#include <trace/events/kvm.h> + +#define CREATE_TRACE_POINTS +#include "trace.h" + +#include <asm/unified.h> +#include <asm/uaccess.h> +#include <asm/ptrace.h> +#include <asm/mman.h> +#include <asm/cputype.h> +#include <asm/tlbflush.h> +#include <asm/cacheflush.h> +#include <asm/virt.h> +#include <asm/kvm_arm.h> +#include <asm/kvm_asm.h> +#include <asm/kvm_mmu.h> +#include <asm/kvm_emulate.h> +#include <asm/kvm_coproc.h> +#include <asm/kvm_psci.h> +#include <asm/opcodes.h> + +#ifdef REQUIRES_VIRT +__asm__(".arch_extension	virt"); +#endif + +static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page); +static struct vfp_hard_struct __percpu *kvm_host_vfp_state; +static unsigned long hyp_default_vectors; + +/* The VMID used in the VTTBR */ +static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1); +static u8 kvm_next_vmid; +static DEFINE_SPINLOCK(kvm_vmid_lock); + +int kvm_arch_hardware_enable(void *garbage) +{ +	return 0; +} + +int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) +{ +	return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE; +} + +void kvm_arch_hardware_disable(void *garbage) +{ +} + +int kvm_arch_hardware_setup(void) +{ +	return 0; +} + +void kvm_arch_hardware_unsetup(void) +{ +} + +void kvm_arch_check_processor_compat(void *rtn) +{ +	*(int *)rtn = 0; +} + +void kvm_arch_sync_events(struct kvm *kvm) +{ +} + +/** + * kvm_arch_init_vm - initializes a VM data structure + * @kvm:	pointer to the KVM struct + */ +int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) +{ +	int ret = 0; + +	if (type) +		return -EINVAL; + +	ret = kvm_alloc_stage2_pgd(kvm); +	if (ret) +		goto out_fail_alloc; + +	ret = create_hyp_mappings(kvm, kvm + 1); +	if (ret) +		goto out_free_stage2_pgd; + +	/* Mark the initial VMID generation invalid */ +	kvm->arch.vmid_gen = 0; + +	return ret; +out_free_stage2_pgd: +	kvm_free_stage2_pgd(kvm); +out_fail_alloc: +	return ret; +} + +int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) +{ +	return VM_FAULT_SIGBUS; +} + +void kvm_arch_free_memslot(struct kvm_memory_slot *free, +			   struct kvm_memory_slot *dont) +{ +} + +int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages) +{ +	return 0; +} + +/** + * kvm_arch_destroy_vm - destroy the VM data structure + * @kvm:	pointer to the KVM struct + */ +void kvm_arch_destroy_vm(struct kvm *kvm) +{ +	int i; + +	kvm_free_stage2_pgd(kvm); + +	for (i = 0; i < KVM_MAX_VCPUS; ++i) { +		if (kvm->vcpus[i]) { +			kvm_arch_vcpu_free(kvm->vcpus[i]); +			kvm->vcpus[i] = NULL; +		} +	} +} + +int kvm_dev_ioctl_check_extension(long ext) +{ +	int r; +	switch (ext) { +	case KVM_CAP_USER_MEMORY: +	case KVM_CAP_SYNC_MMU: +	case KVM_CAP_DESTROY_MEMORY_REGION_WORKS: +	case KVM_CAP_ONE_REG: +	case KVM_CAP_ARM_PSCI: +		r = 1; +		break; +	case KVM_CAP_COALESCED_MMIO: +		r = KVM_COALESCED_MMIO_PAGE_OFFSET; +		break; +	case KVM_CAP_NR_VCPUS: +		r = num_online_cpus(); +		break; +	case KVM_CAP_MAX_VCPUS: +		r = KVM_MAX_VCPUS; +		break; +	default: +		r = 0; +		break; +	} +	return r; +} + +long kvm_arch_dev_ioctl(struct file *filp, +			unsigned int ioctl, unsigned long arg) +{ +	return -EINVAL; +} + +int kvm_arch_set_memory_region(struct kvm *kvm, +			       struct kvm_userspace_memory_region *mem, +			       struct kvm_memory_slot old, +			       int user_alloc) +{ +	return 0; +} + +int kvm_arch_prepare_memory_region(struct kvm *kvm, +				   struct kvm_memory_slot *memslot, +				   struct kvm_memory_slot old, +				   struct kvm_userspace_memory_region *mem, +				   int user_alloc) +{ +	return 0; +} + +void kvm_arch_commit_memory_region(struct kvm *kvm, +				   struct kvm_userspace_memory_region *mem, +				   struct kvm_memory_slot old, +				   int user_alloc) +{ +} + +void kvm_arch_flush_shadow_all(struct kvm *kvm) +{ +} + +void kvm_arch_flush_shadow_memslot(struct kvm *kvm, +				   struct kvm_memory_slot *slot) +{ +} + +struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) +{ +	int err; +	struct kvm_vcpu *vcpu; + +	vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL); +	if (!vcpu) { +		err = -ENOMEM; +		goto out; +	} + +	err = kvm_vcpu_init(vcpu, kvm, id); +	if (err) +		goto free_vcpu; + +	err = create_hyp_mappings(vcpu, vcpu + 1); +	if (err) +		goto vcpu_uninit; + +	return vcpu; +vcpu_uninit: +	kvm_vcpu_uninit(vcpu); +free_vcpu: +	kmem_cache_free(kvm_vcpu_cache, vcpu); +out: +	return ERR_PTR(err); +} + +int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) +{ +	return 0; +} + +void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) +{ +	kvm_mmu_free_memory_caches(vcpu); +	kmem_cache_free(kvm_vcpu_cache, vcpu); +} + +void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) +{ +	kvm_arch_vcpu_free(vcpu); +} + +int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) +{ +	return 0; +} + +int __attribute_const__ kvm_target_cpu(void) +{ +	unsigned long implementor = read_cpuid_implementor(); +	unsigned long part_number = read_cpuid_part_number(); + +	if (implementor != ARM_CPU_IMP_ARM) +		return -EINVAL; + +	switch (part_number) { +	case ARM_CPU_PART_CORTEX_A15: +		return KVM_ARM_TARGET_CORTEX_A15; +	default: +		return -EINVAL; +	} +} + +int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) +{ +	/* Force users to call KVM_ARM_VCPU_INIT */ +	vcpu->arch.target = -1; +	return 0; +} + +void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) +{ +} + +void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) +{ +	vcpu->cpu = cpu; +	vcpu->arch.vfp_host = this_cpu_ptr(kvm_host_vfp_state); + +	/* +	 * Check whether this vcpu requires the cache to be flushed on +	 * this physical CPU. This is a consequence of doing dcache +	 * operations by set/way on this vcpu. We do it here to be in +	 * a non-preemptible section. +	 */ +	if (cpumask_test_and_clear_cpu(cpu, &vcpu->arch.require_dcache_flush)) +		flush_cache_all(); /* We'd really want v7_flush_dcache_all() */ +} + +void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) +{ +} + +int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, +					struct kvm_guest_debug *dbg) +{ +	return -EINVAL; +} + + +int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, +				    struct kvm_mp_state *mp_state) +{ +	return -EINVAL; +} + +int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, +				    struct kvm_mp_state *mp_state) +{ +	return -EINVAL; +} + +/** + * kvm_arch_vcpu_runnable - determine if the vcpu can be scheduled + * @v:		The VCPU pointer + * + * If the guest CPU is not waiting for interrupts or an interrupt line is + * asserted, the CPU is by definition runnable. + */ +int kvm_arch_vcpu_runnable(struct kvm_vcpu *v) +{ +	return !!v->arch.irq_lines; +} + +/* Just ensure a guest exit from a particular CPU */ +static void exit_vm_noop(void *info) +{ +} + +void force_vm_exit(const cpumask_t *mask) +{ +	smp_call_function_many(mask, exit_vm_noop, NULL, true); +} + +/** + * need_new_vmid_gen - check that the VMID is still valid + * @kvm: The VM's VMID to checkt + * + * return true if there is a new generation of VMIDs being used + * + * The hardware supports only 256 values with the value zero reserved for the + * host, so we check if an assigned value belongs to a previous generation, + * which which requires us to assign a new value. If we're the first to use a + * VMID for the new generation, we must flush necessary caches and TLBs on all + * CPUs. + */ +static bool need_new_vmid_gen(struct kvm *kvm) +{ +	return unlikely(kvm->arch.vmid_gen != atomic64_read(&kvm_vmid_gen)); +} + +/** + * update_vttbr - Update the VTTBR with a valid VMID before the guest runs + * @kvm	The guest that we are about to run + * + * Called from kvm_arch_vcpu_ioctl_run before entering the guest to ensure the + * VM has a valid VMID, otherwise assigns a new one and flushes corresponding + * caches and TLBs. + */ +static void update_vttbr(struct kvm *kvm) +{ +	phys_addr_t pgd_phys; +	u64 vmid; + +	if (!need_new_vmid_gen(kvm)) +		return; + +	spin_lock(&kvm_vmid_lock); + +	/* +	 * We need to re-check the vmid_gen here to ensure that if another vcpu +	 * already allocated a valid vmid for this vm, then this vcpu should +	 * use the same vmid. +	 */ +	if (!need_new_vmid_gen(kvm)) { +		spin_unlock(&kvm_vmid_lock); +		return; +	} + +	/* First user of a new VMID generation? */ +	if (unlikely(kvm_next_vmid == 0)) { +		atomic64_inc(&kvm_vmid_gen); +		kvm_next_vmid = 1; + +		/* +		 * On SMP we know no other CPUs can use this CPU's or each +		 * other's VMID after force_vm_exit returns since the +		 * kvm_vmid_lock blocks them from reentry to the guest. +		 */ +		force_vm_exit(cpu_all_mask); +		/* +		 * Now broadcast TLB + ICACHE invalidation over the inner +		 * shareable domain to make sure all data structures are +		 * clean. +		 */ +		kvm_call_hyp(__kvm_flush_vm_context); +	} + +	kvm->arch.vmid_gen = atomic64_read(&kvm_vmid_gen); +	kvm->arch.vmid = kvm_next_vmid; +	kvm_next_vmid++; + +	/* update vttbr to be used with the new vmid */ +	pgd_phys = virt_to_phys(kvm->arch.pgd); +	vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK; +	kvm->arch.vttbr = pgd_phys & VTTBR_BADDR_MASK; +	kvm->arch.vttbr |= vmid; + +	spin_unlock(&kvm_vmid_lock); +} + +static int handle_svc_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run) +{ +	/* SVC called from Hyp mode should never get here */ +	kvm_debug("SVC called from Hyp mode shouldn't go here\n"); +	BUG(); +	return -EINVAL; /* Squash warning */ +} + +static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run) +{ +	trace_kvm_hvc(*vcpu_pc(vcpu), *vcpu_reg(vcpu, 0), +		      vcpu->arch.hsr & HSR_HVC_IMM_MASK); + +	if (kvm_psci_call(vcpu)) +		return 1; + +	kvm_inject_undefined(vcpu); +	return 1; +} + +static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run) +{ +	if (kvm_psci_call(vcpu)) +		return 1; + +	kvm_inject_undefined(vcpu); +	return 1; +} + +static int handle_pabt_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run) +{ +	/* The hypervisor should never cause aborts */ +	kvm_err("Prefetch Abort taken from Hyp mode at %#08x (HSR: %#08x)\n", +		vcpu->arch.hxfar, vcpu->arch.hsr); +	return -EFAULT; +} + +static int handle_dabt_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run) +{ +	/* This is either an error in the ws. code or an external abort */ +	kvm_err("Data Abort taken from Hyp mode at %#08x (HSR: %#08x)\n", +		vcpu->arch.hxfar, vcpu->arch.hsr); +	return -EFAULT; +} + +typedef int (*exit_handle_fn)(struct kvm_vcpu *, struct kvm_run *); +static exit_handle_fn arm_exit_handlers[] = { +	[HSR_EC_WFI]		= kvm_handle_wfi, +	[HSR_EC_CP15_32]	= kvm_handle_cp15_32, +	[HSR_EC_CP15_64]	= kvm_handle_cp15_64, +	[HSR_EC_CP14_MR]	= kvm_handle_cp14_access, +	[HSR_EC_CP14_LS]	= kvm_handle_cp14_load_store, +	[HSR_EC_CP14_64]	= kvm_handle_cp14_access, +	[HSR_EC_CP_0_13]	= kvm_handle_cp_0_13_access, +	[HSR_EC_CP10_ID]	= kvm_handle_cp10_id, +	[HSR_EC_SVC_HYP]	= handle_svc_hyp, +	[HSR_EC_HVC]		= handle_hvc, +	[HSR_EC_SMC]		= handle_smc, +	[HSR_EC_IABT]		= kvm_handle_guest_abort, +	[HSR_EC_IABT_HYP]	= handle_pabt_hyp, +	[HSR_EC_DABT]		= kvm_handle_guest_abort, +	[HSR_EC_DABT_HYP]	= handle_dabt_hyp, +}; + +/* + * A conditional instruction is allowed to trap, even though it + * wouldn't be executed.  So let's re-implement the hardware, in + * software! + */ +static bool kvm_condition_valid(struct kvm_vcpu *vcpu) +{ +	unsigned long cpsr, cond, insn; + +	/* +	 * Exception Code 0 can only happen if we set HCR.TGE to 1, to +	 * catch undefined instructions, and then we won't get past +	 * the arm_exit_handlers test anyway. +	 */ +	BUG_ON(((vcpu->arch.hsr & HSR_EC) >> HSR_EC_SHIFT) == 0); + +	/* Top two bits non-zero?  Unconditional. */ +	if (vcpu->arch.hsr >> 30) +		return true; + +	cpsr = *vcpu_cpsr(vcpu); + +	/* Is condition field valid? */ +	if ((vcpu->arch.hsr & HSR_CV) >> HSR_CV_SHIFT) +		cond = (vcpu->arch.hsr & HSR_COND) >> HSR_COND_SHIFT; +	else { +		/* This can happen in Thumb mode: examine IT state. */ +		unsigned long it; + +		it = ((cpsr >> 8) & 0xFC) | ((cpsr >> 25) & 0x3); + +		/* it == 0 => unconditional. */ +		if (it == 0) +			return true; + +		/* The cond for this insn works out as the top 4 bits. */ +		cond = (it >> 4); +	} + +	/* Shift makes it look like an ARM-mode instruction */ +	insn = cond << 28; +	return arm_check_condition(insn, cpsr) != ARM_OPCODE_CONDTEST_FAIL; +} + +/* + * Return > 0 to return to guest, < 0 on error, 0 (and set exit_reason) on + * proper exit to QEMU. + */ +static int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, +		       int exception_index) +{ +	unsigned long hsr_ec; + +	switch (exception_index) { +	case ARM_EXCEPTION_IRQ: +		return 1; +	case ARM_EXCEPTION_UNDEFINED: +		kvm_err("Undefined exception in Hyp mode at: %#08x\n", +			vcpu->arch.hyp_pc); +		BUG(); +		panic("KVM: Hypervisor undefined exception!\n"); +	case ARM_EXCEPTION_DATA_ABORT: +	case ARM_EXCEPTION_PREF_ABORT: +	case ARM_EXCEPTION_HVC: +		hsr_ec = (vcpu->arch.hsr & HSR_EC) >> HSR_EC_SHIFT; + +		if (hsr_ec >= ARRAY_SIZE(arm_exit_handlers) +		    || !arm_exit_handlers[hsr_ec]) { +			kvm_err("Unkown exception class: %#08lx, " +				"hsr: %#08x\n", hsr_ec, +				(unsigned int)vcpu->arch.hsr); +			BUG(); +		} + +		/* +		 * See ARM ARM B1.14.1: "Hyp traps on instructions +		 * that fail their condition code check" +		 */ +		if (!kvm_condition_valid(vcpu)) { +			bool is_wide = vcpu->arch.hsr & HSR_IL; +			kvm_skip_instr(vcpu, is_wide); +			return 1; +		} + +		return arm_exit_handlers[hsr_ec](vcpu, run); +	default: +		kvm_pr_unimpl("Unsupported exception type: %d", +			      exception_index); +		run->exit_reason = KVM_EXIT_INTERNAL_ERROR; +		return 0; +	} +} + +static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu) +{ +	if (likely(vcpu->arch.has_run_once)) +		return 0; + +	vcpu->arch.has_run_once = true; + +	/* +	 * Handle the "start in power-off" case by calling into the +	 * PSCI code. +	 */ +	if (test_and_clear_bit(KVM_ARM_VCPU_POWER_OFF, vcpu->arch.features)) { +		*vcpu_reg(vcpu, 0) = KVM_PSCI_FN_CPU_OFF; +		kvm_psci_call(vcpu); +	} + +	return 0; +} + +static void vcpu_pause(struct kvm_vcpu *vcpu) +{ +	wait_queue_head_t *wq = kvm_arch_vcpu_wq(vcpu); + +	wait_event_interruptible(*wq, !vcpu->arch.pause); +} + +/** + * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code + * @vcpu:	The VCPU pointer + * @run:	The kvm_run structure pointer used for userspace state exchange + * + * This function is called through the VCPU_RUN ioctl called from user space. It + * will execute VM code in a loop until the time slice for the process is used + * or some emulation is needed from user space in which case the function will + * return with return value 0 and with the kvm_run structure filled in with the + * required data for the requested emulation. + */ +int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) +{ +	int ret; +	sigset_t sigsaved; + +	/* Make sure they initialize the vcpu with KVM_ARM_VCPU_INIT */ +	if (unlikely(vcpu->arch.target < 0)) +		return -ENOEXEC; + +	ret = kvm_vcpu_first_run_init(vcpu); +	if (ret) +		return ret; + +	if (run->exit_reason == KVM_EXIT_MMIO) { +		ret = kvm_handle_mmio_return(vcpu, vcpu->run); +		if (ret) +			return ret; +	} + +	if (vcpu->sigset_active) +		sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); + +	ret = 1; +	run->exit_reason = KVM_EXIT_UNKNOWN; +	while (ret > 0) { +		/* +		 * Check conditions before entering the guest +		 */ +		cond_resched(); + +		update_vttbr(vcpu->kvm); + +		if (vcpu->arch.pause) +			vcpu_pause(vcpu); + +		local_irq_disable(); + +		/* +		 * Re-check atomic conditions +		 */ +		if (signal_pending(current)) { +			ret = -EINTR; +			run->exit_reason = KVM_EXIT_INTR; +		} + +		if (ret <= 0 || need_new_vmid_gen(vcpu->kvm)) { +			local_irq_enable(); +			continue; +		} + +		/************************************************************** +		 * Enter the guest +		 */ +		trace_kvm_entry(*vcpu_pc(vcpu)); +		kvm_guest_enter(); +		vcpu->mode = IN_GUEST_MODE; + +		ret = kvm_call_hyp(__kvm_vcpu_run, vcpu); + +		vcpu->mode = OUTSIDE_GUEST_MODE; +		vcpu->arch.last_pcpu = smp_processor_id(); +		kvm_guest_exit(); +		trace_kvm_exit(*vcpu_pc(vcpu)); +		/* +		 * We may have taken a host interrupt in HYP mode (ie +		 * while executing the guest). This interrupt is still +		 * pending, as we haven't serviced it yet! +		 * +		 * We're now back in SVC mode, with interrupts +		 * disabled.  Enabling the interrupts now will have +		 * the effect of taking the interrupt again, in SVC +		 * mode this time. +		 */ +		local_irq_enable(); + +		/* +		 * Back from guest +		 *************************************************************/ + +		ret = handle_exit(vcpu, run, ret); +	} + +	if (vcpu->sigset_active) +		sigprocmask(SIG_SETMASK, &sigsaved, NULL); +	return ret; +} + +static int vcpu_interrupt_line(struct kvm_vcpu *vcpu, int number, bool level) +{ +	int bit_index; +	bool set; +	unsigned long *ptr; + +	if (number == KVM_ARM_IRQ_CPU_IRQ) +		bit_index = __ffs(HCR_VI); +	else /* KVM_ARM_IRQ_CPU_FIQ */ +		bit_index = __ffs(HCR_VF); + +	ptr = (unsigned long *)&vcpu->arch.irq_lines; +	if (level) +		set = test_and_set_bit(bit_index, ptr); +	else +		set = test_and_clear_bit(bit_index, ptr); + +	/* +	 * If we didn't change anything, no need to wake up or kick other CPUs +	 */ +	if (set == level) +		return 0; + +	/* +	 * The vcpu irq_lines field was updated, wake up sleeping VCPUs and +	 * trigger a world-switch round on the running physical CPU to set the +	 * virtual IRQ/FIQ fields in the HCR appropriately. +	 */ +	kvm_vcpu_kick(vcpu); + +	return 0; +} + +int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level) +{ +	u32 irq = irq_level->irq; +	unsigned int irq_type, vcpu_idx, irq_num; +	int nrcpus = atomic_read(&kvm->online_vcpus); +	struct kvm_vcpu *vcpu = NULL; +	bool level = irq_level->level; + +	irq_type = (irq >> KVM_ARM_IRQ_TYPE_SHIFT) & KVM_ARM_IRQ_TYPE_MASK; +	vcpu_idx = (irq >> KVM_ARM_IRQ_VCPU_SHIFT) & KVM_ARM_IRQ_VCPU_MASK; +	irq_num = (irq >> KVM_ARM_IRQ_NUM_SHIFT) & KVM_ARM_IRQ_NUM_MASK; + +	trace_kvm_irq_line(irq_type, vcpu_idx, irq_num, irq_level->level); + +	if (irq_type != KVM_ARM_IRQ_TYPE_CPU) +		return -EINVAL; + +	if (vcpu_idx >= nrcpus) +		return -EINVAL; + +	vcpu = kvm_get_vcpu(kvm, vcpu_idx); +	if (!vcpu) +		return -EINVAL; + +	if (irq_num > KVM_ARM_IRQ_CPU_FIQ) +		return -EINVAL; + +	return vcpu_interrupt_line(vcpu, irq_num, level); +} + +long kvm_arch_vcpu_ioctl(struct file *filp, +			 unsigned int ioctl, unsigned long arg) +{ +	struct kvm_vcpu *vcpu = filp->private_data; +	void __user *argp = (void __user *)arg; + +	switch (ioctl) { +	case KVM_ARM_VCPU_INIT: { +		struct kvm_vcpu_init init; + +		if (copy_from_user(&init, argp, sizeof(init))) +			return -EFAULT; + +		return kvm_vcpu_set_target(vcpu, &init); + +	} +	case KVM_SET_ONE_REG: +	case KVM_GET_ONE_REG: { +		struct kvm_one_reg reg; +		if (copy_from_user(®, argp, sizeof(reg))) +			return -EFAULT; +		if (ioctl == KVM_SET_ONE_REG) +			return kvm_arm_set_reg(vcpu, ®); +		else +			return kvm_arm_get_reg(vcpu, ®); +	} +	case KVM_GET_REG_LIST: { +		struct kvm_reg_list __user *user_list = argp; +		struct kvm_reg_list reg_list; +		unsigned n; + +		if (copy_from_user(®_list, user_list, sizeof(reg_list))) +			return -EFAULT; +		n = reg_list.n; +		reg_list.n = kvm_arm_num_regs(vcpu); +		if (copy_to_user(user_list, ®_list, sizeof(reg_list))) +			return -EFAULT; +		if (n < reg_list.n) +			return -E2BIG; +		return kvm_arm_copy_reg_indices(vcpu, user_list->reg); +	} +	default: +		return -EINVAL; +	} +} + +int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) +{ +	return -EINVAL; +} + +long kvm_arch_vm_ioctl(struct file *filp, +		       unsigned int ioctl, unsigned long arg) +{ +	return -EINVAL; +} + +static void cpu_init_hyp_mode(void *vector) +{ +	unsigned long long pgd_ptr; +	unsigned long pgd_low, pgd_high; +	unsigned long hyp_stack_ptr; +	unsigned long stack_page; +	unsigned long vector_ptr; + +	/* Switch from the HYP stub to our own HYP init vector */ +	__hyp_set_vectors((unsigned long)vector); + +	pgd_ptr = (unsigned long long)kvm_mmu_get_httbr(); +	pgd_low = (pgd_ptr & ((1ULL << 32) - 1)); +	pgd_high = (pgd_ptr >> 32ULL); +	stack_page = __get_cpu_var(kvm_arm_hyp_stack_page); +	hyp_stack_ptr = stack_page + PAGE_SIZE; +	vector_ptr = (unsigned long)__kvm_hyp_vector; + +	/* +	 * Call initialization code, and switch to the full blown +	 * HYP code. The init code doesn't need to preserve these registers as +	 * r1-r3 and r12 are already callee save according to the AAPCS. +	 * Note that we slightly misuse the prototype by casing the pgd_low to +	 * a void *. +	 */ +	kvm_call_hyp((void *)pgd_low, pgd_high, hyp_stack_ptr, vector_ptr); +} + +/** + * Inits Hyp-mode on all online CPUs + */ +static int init_hyp_mode(void) +{ +	phys_addr_t init_phys_addr; +	int cpu; +	int err = 0; + +	/* +	 * Allocate Hyp PGD and setup Hyp identity mapping +	 */ +	err = kvm_mmu_init(); +	if (err) +		goto out_err; + +	/* +	 * It is probably enough to obtain the default on one +	 * CPU. It's unlikely to be different on the others. +	 */ +	hyp_default_vectors = __hyp_get_vectors(); + +	/* +	 * Allocate stack pages for Hypervisor-mode +	 */ +	for_each_possible_cpu(cpu) { +		unsigned long stack_page; + +		stack_page = __get_free_page(GFP_KERNEL); +		if (!stack_page) { +			err = -ENOMEM; +			goto out_free_stack_pages; +		} + +		per_cpu(kvm_arm_hyp_stack_page, cpu) = stack_page; +	} + +	/* +	 * Execute the init code on each CPU. +	 * +	 * Note: The stack is not mapped yet, so don't do anything else than +	 * initializing the hypervisor mode on each CPU using a local stack +	 * space for temporary storage. +	 */ +	init_phys_addr = virt_to_phys(__kvm_hyp_init); +	for_each_online_cpu(cpu) { +		smp_call_function_single(cpu, cpu_init_hyp_mode, +					 (void *)(long)init_phys_addr, 1); +	} + +	/* +	 * Unmap the identity mapping +	 */ +	kvm_clear_hyp_idmap(); + +	/* +	 * Map the Hyp-code called directly from the host +	 */ +	err = create_hyp_mappings(__kvm_hyp_code_start, __kvm_hyp_code_end); +	if (err) { +		kvm_err("Cannot map world-switch code\n"); +		goto out_free_mappings; +	} + +	/* +	 * Map the Hyp stack pages +	 */ +	for_each_possible_cpu(cpu) { +		char *stack_page = (char *)per_cpu(kvm_arm_hyp_stack_page, cpu); +		err = create_hyp_mappings(stack_page, stack_page + PAGE_SIZE); + +		if (err) { +			kvm_err("Cannot map hyp stack\n"); +			goto out_free_mappings; +		} +	} + +	/* +	 * Map the host VFP structures +	 */ +	kvm_host_vfp_state = alloc_percpu(struct vfp_hard_struct); +	if (!kvm_host_vfp_state) { +		err = -ENOMEM; +		kvm_err("Cannot allocate host VFP state\n"); +		goto out_free_mappings; +	} + +	for_each_possible_cpu(cpu) { +		struct vfp_hard_struct *vfp; + +		vfp = per_cpu_ptr(kvm_host_vfp_state, cpu); +		err = create_hyp_mappings(vfp, vfp + 1); + +		if (err) { +			kvm_err("Cannot map host VFP state: %d\n", err); +			goto out_free_vfp; +		} +	} + +	kvm_info("Hyp mode initialized successfully\n"); +	return 0; +out_free_vfp: +	free_percpu(kvm_host_vfp_state); +out_free_mappings: +	free_hyp_pmds(); +out_free_stack_pages: +	for_each_possible_cpu(cpu) +		free_page(per_cpu(kvm_arm_hyp_stack_page, cpu)); +out_err: +	kvm_err("error initializing Hyp mode: %d\n", err); +	return err; +} + +/** + * Initialize Hyp-mode and memory mappings on all CPUs. + */ +int kvm_arch_init(void *opaque) +{ +	int err; + +	if (!is_hyp_mode_available()) { +		kvm_err("HYP mode not available\n"); +		return -ENODEV; +	} + +	if (kvm_target_cpu() < 0) { +		kvm_err("Target CPU not supported!\n"); +		return -ENODEV; +	} + +	err = init_hyp_mode(); +	if (err) +		goto out_err; + +	kvm_coproc_table_init(); +	return 0; +out_err: +	return err; +} + +/* NOP: Compiling as a module not supported */ +void kvm_arch_exit(void) +{ +} + +static int arm_init(void) +{ +	int rc = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE); +	return rc; +} + +module_init(arm_init); diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c new file mode 100644 index 00000000000..d782638c7ec --- /dev/null +++ b/arch/arm/kvm/coproc.c @@ -0,0 +1,1046 @@ +/* + * Copyright (C) 2012 - Virtual Open Systems and Columbia University + * Authors: Rusty Russell <rusty@rustcorp.com.au> + *          Christoffer Dall <c.dall@virtualopensystems.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA. + */ +#include <linux/mm.h> +#include <linux/kvm_host.h> +#include <linux/uaccess.h> +#include <asm/kvm_arm.h> +#include <asm/kvm_host.h> +#include <asm/kvm_emulate.h> +#include <asm/kvm_coproc.h> +#include <asm/cacheflush.h> +#include <asm/cputype.h> +#include <trace/events/kvm.h> +#include <asm/vfp.h> +#include "../vfp/vfpinstr.h" + +#include "trace.h" +#include "coproc.h" + + +/****************************************************************************** + * Co-processor emulation + *****************************************************************************/ + +/* 3 bits per cache level, as per CLIDR, but non-existent caches always 0 */ +static u32 cache_levels; + +/* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */ +#define CSSELR_MAX 12 + +int kvm_handle_cp10_id(struct kvm_vcpu *vcpu, struct kvm_run *run) +{ +	kvm_inject_undefined(vcpu); +	return 1; +} + +int kvm_handle_cp_0_13_access(struct kvm_vcpu *vcpu, struct kvm_run *run) +{ +	/* +	 * We can get here, if the host has been built without VFPv3 support, +	 * but the guest attempted a floating point operation. +	 */ +	kvm_inject_undefined(vcpu); +	return 1; +} + +int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run) +{ +	kvm_inject_undefined(vcpu); +	return 1; +} + +int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run) +{ +	kvm_inject_undefined(vcpu); +	return 1; +} + +/* See note at ARM ARM B1.14.4 */ +static bool access_dcsw(struct kvm_vcpu *vcpu, +			const struct coproc_params *p, +			const struct coproc_reg *r) +{ +	u32 val; +	int cpu; + +	cpu = get_cpu(); + +	if (!p->is_write) +		return read_from_write_only(vcpu, p); + +	cpumask_setall(&vcpu->arch.require_dcache_flush); +	cpumask_clear_cpu(cpu, &vcpu->arch.require_dcache_flush); + +	/* If we were already preempted, take the long way around */ +	if (cpu != vcpu->arch.last_pcpu) { +		flush_cache_all(); +		goto done; +	} + +	val = *vcpu_reg(vcpu, p->Rt1); + +	switch (p->CRm) { +	case 6:			/* Upgrade DCISW to DCCISW, as per HCR.SWIO */ +	case 14:		/* DCCISW */ +		asm volatile("mcr p15, 0, %0, c7, c14, 2" : : "r" (val)); +		break; + +	case 10:		/* DCCSW */ +		asm volatile("mcr p15, 0, %0, c7, c10, 2" : : "r" (val)); +		break; +	} + +done: +	put_cpu(); + +	return true; +} + +/* + * We could trap ID_DFR0 and tell the guest we don't support performance + * monitoring.  Unfortunately the patch to make the kernel check ID_DFR0 was + * NAKed, so it will read the PMCR anyway. + * + * Therefore we tell the guest we have 0 counters.  Unfortunately, we + * must always support PMCCNTR (the cycle counter): we just RAZ/WI for + * all PM registers, which doesn't crash the guest kernel at least. + */ +static bool pm_fake(struct kvm_vcpu *vcpu, +		    const struct coproc_params *p, +		    const struct coproc_reg *r) +{ +	if (p->is_write) +		return ignore_write(vcpu, p); +	else +		return read_zero(vcpu, p); +} + +#define access_pmcr pm_fake +#define access_pmcntenset pm_fake +#define access_pmcntenclr pm_fake +#define access_pmovsr pm_fake +#define access_pmselr pm_fake +#define access_pmceid0 pm_fake +#define access_pmceid1 pm_fake +#define access_pmccntr pm_fake +#define access_pmxevtyper pm_fake +#define access_pmxevcntr pm_fake +#define access_pmuserenr pm_fake +#define access_pmintenset pm_fake +#define access_pmintenclr pm_fake + +/* Architected CP15 registers. + * Important: Must be sorted ascending by CRn, CRM, Op1, Op2 + */ +static const struct coproc_reg cp15_regs[] = { +	/* CSSELR: swapped by interrupt.S. */ +	{ CRn( 0), CRm( 0), Op1( 2), Op2( 0), is32, +			NULL, reset_unknown, c0_CSSELR }, + +	/* TTBR0/TTBR1: swapped by interrupt.S. */ +	{ CRm( 2), Op1( 0), is64, NULL, reset_unknown64, c2_TTBR0 }, +	{ CRm( 2), Op1( 1), is64, NULL, reset_unknown64, c2_TTBR1 }, + +	/* TTBCR: swapped by interrupt.S. */ +	{ CRn( 2), CRm( 0), Op1( 0), Op2( 2), is32, +			NULL, reset_val, c2_TTBCR, 0x00000000 }, + +	/* DACR: swapped by interrupt.S. */ +	{ CRn( 3), CRm( 0), Op1( 0), Op2( 0), is32, +			NULL, reset_unknown, c3_DACR }, + +	/* DFSR/IFSR/ADFSR/AIFSR: swapped by interrupt.S. */ +	{ CRn( 5), CRm( 0), Op1( 0), Op2( 0), is32, +			NULL, reset_unknown, c5_DFSR }, +	{ CRn( 5), CRm( 0), Op1( 0), Op2( 1), is32, +			NULL, reset_unknown, c5_IFSR }, +	{ CRn( 5), CRm( 1), Op1( 0), Op2( 0), is32, +			NULL, reset_unknown, c5_ADFSR }, +	{ CRn( 5), CRm( 1), Op1( 0), Op2( 1), is32, +			NULL, reset_unknown, c5_AIFSR }, + +	/* DFAR/IFAR: swapped by interrupt.S. */ +	{ CRn( 6), CRm( 0), Op1( 0), Op2( 0), is32, +			NULL, reset_unknown, c6_DFAR }, +	{ CRn( 6), CRm( 0), Op1( 0), Op2( 2), is32, +			NULL, reset_unknown, c6_IFAR }, +	/* +	 * DC{C,I,CI}SW operations: +	 */ +	{ CRn( 7), CRm( 6), Op1( 0), Op2( 2), is32, access_dcsw}, +	{ CRn( 7), CRm(10), Op1( 0), Op2( 2), is32, access_dcsw}, +	{ CRn( 7), CRm(14), Op1( 0), Op2( 2), is32, access_dcsw}, +	/* +	 * Dummy performance monitor implementation. +	 */ +	{ CRn( 9), CRm(12), Op1( 0), Op2( 0), is32, access_pmcr}, +	{ CRn( 9), CRm(12), Op1( 0), Op2( 1), is32, access_pmcntenset}, +	{ CRn( 9), CRm(12), Op1( 0), Op2( 2), is32, access_pmcntenclr}, +	{ CRn( 9), CRm(12), Op1( 0), Op2( 3), is32, access_pmovsr}, +	{ CRn( 9), CRm(12), Op1( 0), Op2( 5), is32, access_pmselr}, +	{ CRn( 9), CRm(12), Op1( 0), Op2( 6), is32, access_pmceid0}, +	{ CRn( 9), CRm(12), Op1( 0), Op2( 7), is32, access_pmceid1}, +	{ CRn( 9), CRm(13), Op1( 0), Op2( 0), is32, access_pmccntr}, +	{ CRn( 9), CRm(13), Op1( 0), Op2( 1), is32, access_pmxevtyper}, +	{ CRn( 9), CRm(13), Op1( 0), Op2( 2), is32, access_pmxevcntr}, +	{ CRn( 9), CRm(14), Op1( 0), Op2( 0), is32, access_pmuserenr}, +	{ CRn( 9), CRm(14), Op1( 0), Op2( 1), is32, access_pmintenset}, +	{ CRn( 9), CRm(14), Op1( 0), Op2( 2), is32, access_pmintenclr}, + +	/* PRRR/NMRR (aka MAIR0/MAIR1): swapped by interrupt.S. */ +	{ CRn(10), CRm( 2), Op1( 0), Op2( 0), is32, +			NULL, reset_unknown, c10_PRRR}, +	{ CRn(10), CRm( 2), Op1( 0), Op2( 1), is32, +			NULL, reset_unknown, c10_NMRR}, + +	/* VBAR: swapped by interrupt.S. */ +	{ CRn(12), CRm( 0), Op1( 0), Op2( 0), is32, +			NULL, reset_val, c12_VBAR, 0x00000000 }, + +	/* CONTEXTIDR/TPIDRURW/TPIDRURO/TPIDRPRW: swapped by interrupt.S. */ +	{ CRn(13), CRm( 0), Op1( 0), Op2( 1), is32, +			NULL, reset_val, c13_CID, 0x00000000 }, +	{ CRn(13), CRm( 0), Op1( 0), Op2( 2), is32, +			NULL, reset_unknown, c13_TID_URW }, +	{ CRn(13), CRm( 0), Op1( 0), Op2( 3), is32, +			NULL, reset_unknown, c13_TID_URO }, +	{ CRn(13), CRm( 0), Op1( 0), Op2( 4), is32, +			NULL, reset_unknown, c13_TID_PRIV }, +}; + +/* Target specific emulation tables */ +static struct kvm_coproc_target_table *target_tables[KVM_ARM_NUM_TARGETS]; + +void kvm_register_target_coproc_table(struct kvm_coproc_target_table *table) +{ +	target_tables[table->target] = table; +} + +/* Get specific register table for this target. */ +static const struct coproc_reg *get_target_table(unsigned target, size_t *num) +{ +	struct kvm_coproc_target_table *table; + +	table = target_tables[target]; +	*num = table->num; +	return table->table; +} + +static const struct coproc_reg *find_reg(const struct coproc_params *params, +					 const struct coproc_reg table[], +					 unsigned int num) +{ +	unsigned int i; + +	for (i = 0; i < num; i++) { +		const struct coproc_reg *r = &table[i]; + +		if (params->is_64bit != r->is_64) +			continue; +		if (params->CRn != r->CRn) +			continue; +		if (params->CRm != r->CRm) +			continue; +		if (params->Op1 != r->Op1) +			continue; +		if (params->Op2 != r->Op2) +			continue; + +		return r; +	} +	return NULL; +} + +static int emulate_cp15(struct kvm_vcpu *vcpu, +			const struct coproc_params *params) +{ +	size_t num; +	const struct coproc_reg *table, *r; + +	trace_kvm_emulate_cp15_imp(params->Op1, params->Rt1, params->CRn, +				   params->CRm, params->Op2, params->is_write); + +	table = get_target_table(vcpu->arch.target, &num); + +	/* Search target-specific then generic table. */ +	r = find_reg(params, table, num); +	if (!r) +		r = find_reg(params, cp15_regs, ARRAY_SIZE(cp15_regs)); + +	if (likely(r)) { +		/* If we don't have an accessor, we should never get here! */ +		BUG_ON(!r->access); + +		if (likely(r->access(vcpu, params, r))) { +			/* Skip instruction, since it was emulated */ +			kvm_skip_instr(vcpu, (vcpu->arch.hsr >> 25) & 1); +			return 1; +		} +		/* If access function fails, it should complain. */ +	} else { +		kvm_err("Unsupported guest CP15 access at: %08x\n", +			*vcpu_pc(vcpu)); +		print_cp_instr(params); +	} +	kvm_inject_undefined(vcpu); +	return 1; +} + +/** + * kvm_handle_cp15_64 -- handles a mrrc/mcrr trap on a guest CP15 access + * @vcpu: The VCPU pointer + * @run:  The kvm_run struct + */ +int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run) +{ +	struct coproc_params params; + +	params.CRm = (vcpu->arch.hsr >> 1) & 0xf; +	params.Rt1 = (vcpu->arch.hsr >> 5) & 0xf; +	params.is_write = ((vcpu->arch.hsr & 1) == 0); +	params.is_64bit = true; + +	params.Op1 = (vcpu->arch.hsr >> 16) & 0xf; +	params.Op2 = 0; +	params.Rt2 = (vcpu->arch.hsr >> 10) & 0xf; +	params.CRn = 0; + +	return emulate_cp15(vcpu, ¶ms); +} + +static void reset_coproc_regs(struct kvm_vcpu *vcpu, +			      const struct coproc_reg *table, size_t num) +{ +	unsigned long i; + +	for (i = 0; i < num; i++) +		if (table[i].reset) +			table[i].reset(vcpu, &table[i]); +} + +/** + * kvm_handle_cp15_32 -- handles a mrc/mcr trap on a guest CP15 access + * @vcpu: The VCPU pointer + * @run:  The kvm_run struct + */ +int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run) +{ +	struct coproc_params params; + +	params.CRm = (vcpu->arch.hsr >> 1) & 0xf; +	params.Rt1 = (vcpu->arch.hsr >> 5) & 0xf; +	params.is_write = ((vcpu->arch.hsr & 1) == 0); +	params.is_64bit = false; + +	params.CRn = (vcpu->arch.hsr >> 10) & 0xf; +	params.Op1 = (vcpu->arch.hsr >> 14) & 0x7; +	params.Op2 = (vcpu->arch.hsr >> 17) & 0x7; +	params.Rt2 = 0; + +	return emulate_cp15(vcpu, ¶ms); +} + +/****************************************************************************** + * Userspace API + *****************************************************************************/ + +static bool index_to_params(u64 id, struct coproc_params *params) +{ +	switch (id & KVM_REG_SIZE_MASK) { +	case KVM_REG_SIZE_U32: +		/* Any unused index bits means it's not valid. */ +		if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK +			   | KVM_REG_ARM_COPROC_MASK +			   | KVM_REG_ARM_32_CRN_MASK +			   | KVM_REG_ARM_CRM_MASK +			   | KVM_REG_ARM_OPC1_MASK +			   | KVM_REG_ARM_32_OPC2_MASK)) +			return false; + +		params->is_64bit = false; +		params->CRn = ((id & KVM_REG_ARM_32_CRN_MASK) +			       >> KVM_REG_ARM_32_CRN_SHIFT); +		params->CRm = ((id & KVM_REG_ARM_CRM_MASK) +			       >> KVM_REG_ARM_CRM_SHIFT); +		params->Op1 = ((id & KVM_REG_ARM_OPC1_MASK) +			       >> KVM_REG_ARM_OPC1_SHIFT); +		params->Op2 = ((id & KVM_REG_ARM_32_OPC2_MASK) +			       >> KVM_REG_ARM_32_OPC2_SHIFT); +		return true; +	case KVM_REG_SIZE_U64: +		/* Any unused index bits means it's not valid. */ +		if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK +			      | KVM_REG_ARM_COPROC_MASK +			      | KVM_REG_ARM_CRM_MASK +			      | KVM_REG_ARM_OPC1_MASK)) +			return false; +		params->is_64bit = true; +		params->CRm = ((id & KVM_REG_ARM_CRM_MASK) +			       >> KVM_REG_ARM_CRM_SHIFT); +		params->Op1 = ((id & KVM_REG_ARM_OPC1_MASK) +			       >> KVM_REG_ARM_OPC1_SHIFT); +		params->Op2 = 0; +		params->CRn = 0; +		return true; +	default: +		return false; +	} +} + +/* Decode an index value, and find the cp15 coproc_reg entry. */ +static const struct coproc_reg *index_to_coproc_reg(struct kvm_vcpu *vcpu, +						    u64 id) +{ +	size_t num; +	const struct coproc_reg *table, *r; +	struct coproc_params params; + +	/* We only do cp15 for now. */ +	if ((id & KVM_REG_ARM_COPROC_MASK) >> KVM_REG_ARM_COPROC_SHIFT != 15) +		return NULL; + +	if (!index_to_params(id, ¶ms)) +		return NULL; + +	table = get_target_table(vcpu->arch.target, &num); +	r = find_reg(¶ms, table, num); +	if (!r) +		r = find_reg(¶ms, cp15_regs, ARRAY_SIZE(cp15_regs)); + +	/* Not saved in the cp15 array? */ +	if (r && !r->reg) +		r = NULL; + +	return r; +} + +/* + * These are the invariant cp15 registers: we let the guest see the host + * versions of these, so they're part of the guest state. + * + * A future CPU may provide a mechanism to present different values to + * the guest, or a future kvm may trap them. + */ +/* Unfortunately, there's no register-argument for mrc, so generate. */ +#define FUNCTION_FOR32(crn, crm, op1, op2, name)			\ +	static void get_##name(struct kvm_vcpu *v,			\ +			       const struct coproc_reg *r)		\ +	{								\ +		u32 val;						\ +									\ +		asm volatile("mrc p15, " __stringify(op1)		\ +			     ", %0, c" __stringify(crn)			\ +			     ", c" __stringify(crm)			\ +			     ", " __stringify(op2) "\n" : "=r" (val));	\ +		((struct coproc_reg *)r)->val = val;			\ +	} + +FUNCTION_FOR32(0, 0, 0, 0, MIDR) +FUNCTION_FOR32(0, 0, 0, 1, CTR) +FUNCTION_FOR32(0, 0, 0, 2, TCMTR) +FUNCTION_FOR32(0, 0, 0, 3, TLBTR) +FUNCTION_FOR32(0, 0, 0, 6, REVIDR) +FUNCTION_FOR32(0, 1, 0, 0, ID_PFR0) +FUNCTION_FOR32(0, 1, 0, 1, ID_PFR1) +FUNCTION_FOR32(0, 1, 0, 2, ID_DFR0) +FUNCTION_FOR32(0, 1, 0, 3, ID_AFR0) +FUNCTION_FOR32(0, 1, 0, 4, ID_MMFR0) +FUNCTION_FOR32(0, 1, 0, 5, ID_MMFR1) +FUNCTION_FOR32(0, 1, 0, 6, ID_MMFR2) +FUNCTION_FOR32(0, 1, 0, 7, ID_MMFR3) +FUNCTION_FOR32(0, 2, 0, 0, ID_ISAR0) +FUNCTION_FOR32(0, 2, 0, 1, ID_ISAR1) +FUNCTION_FOR32(0, 2, 0, 2, ID_ISAR2) +FUNCTION_FOR32(0, 2, 0, 3, ID_ISAR3) +FUNCTION_FOR32(0, 2, 0, 4, ID_ISAR4) +FUNCTION_FOR32(0, 2, 0, 5, ID_ISAR5) +FUNCTION_FOR32(0, 0, 1, 1, CLIDR) +FUNCTION_FOR32(0, 0, 1, 7, AIDR) + +/* ->val is filled in by kvm_invariant_coproc_table_init() */ +static struct coproc_reg invariant_cp15[] = { +	{ CRn( 0), CRm( 0), Op1( 0), Op2( 0), is32, NULL, get_MIDR }, +	{ CRn( 0), CRm( 0), Op1( 0), Op2( 1), is32, NULL, get_CTR }, +	{ CRn( 0), CRm( 0), Op1( 0), Op2( 2), is32, NULL, get_TCMTR }, +	{ CRn( 0), CRm( 0), Op1( 0), Op2( 3), is32, NULL, get_TLBTR }, +	{ CRn( 0), CRm( 0), Op1( 0), Op2( 6), is32, NULL, get_REVIDR }, + +	{ CRn( 0), CRm( 1), Op1( 0), Op2( 0), is32, NULL, get_ID_PFR0 }, +	{ CRn( 0), CRm( 1), Op1( 0), Op2( 1), is32, NULL, get_ID_PFR1 }, +	{ CRn( 0), CRm( 1), Op1( 0), Op2( 2), is32, NULL, get_ID_DFR0 }, +	{ CRn( 0), CRm( 1), Op1( 0), Op2( 3), is32, NULL, get_ID_AFR0 }, +	{ CRn( 0), CRm( 1), Op1( 0), Op2( 4), is32, NULL, get_ID_MMFR0 }, +	{ CRn( 0), CRm( 1), Op1( 0), Op2( 5), is32, NULL, get_ID_MMFR1 }, +	{ CRn( 0), CRm( 1), Op1( 0), Op2( 6), is32, NULL, get_ID_MMFR2 }, +	{ CRn( 0), CRm( 1), Op1( 0), Op2( 7), is32, NULL, get_ID_MMFR3 }, + +	{ CRn( 0), CRm( 2), Op1( 0), Op2( 0), is32, NULL, get_ID_ISAR0 }, +	{ CRn( 0), CRm( 2), Op1( 0), Op2( 1), is32, NULL, get_ID_ISAR1 }, +	{ CRn( 0), CRm( 2), Op1( 0), Op2( 2), is32, NULL, get_ID_ISAR2 }, +	{ CRn( 0), CRm( 2), Op1( 0), Op2( 3), is32, NULL, get_ID_ISAR3 }, +	{ CRn( 0), CRm( 2), Op1( 0), Op2( 4), is32, NULL, get_ID_ISAR4 }, +	{ CRn( 0), CRm( 2), Op1( 0), Op2( 5), is32, NULL, get_ID_ISAR5 }, + +	{ CRn( 0), CRm( 0), Op1( 1), Op2( 1), is32, NULL, get_CLIDR }, +	{ CRn( 0), CRm( 0), Op1( 1), Op2( 7), is32, NULL, get_AIDR }, +}; + +static int reg_from_user(void *val, const void __user *uaddr, u64 id) +{ +	/* This Just Works because we are little endian. */ +	if (copy_from_user(val, uaddr, KVM_REG_SIZE(id)) != 0) +		return -EFAULT; +	return 0; +} + +static int reg_to_user(void __user *uaddr, const void *val, u64 id) +{ +	/* This Just Works because we are little endian. */ +	if (copy_to_user(uaddr, val, KVM_REG_SIZE(id)) != 0) +		return -EFAULT; +	return 0; +} + +static int get_invariant_cp15(u64 id, void __user *uaddr) +{ +	struct coproc_params params; +	const struct coproc_reg *r; + +	if (!index_to_params(id, ¶ms)) +		return -ENOENT; + +	r = find_reg(¶ms, invariant_cp15, ARRAY_SIZE(invariant_cp15)); +	if (!r) +		return -ENOENT; + +	return reg_to_user(uaddr, &r->val, id); +} + +static int set_invariant_cp15(u64 id, void __user *uaddr) +{ +	struct coproc_params params; +	const struct coproc_reg *r; +	int err; +	u64 val = 0; /* Make sure high bits are 0 for 32-bit regs */ + +	if (!index_to_params(id, ¶ms)) +		return -ENOENT; +	r = find_reg(¶ms, invariant_cp15, ARRAY_SIZE(invariant_cp15)); +	if (!r) +		return -ENOENT; + +	err = reg_from_user(&val, uaddr, id); +	if (err) +		return err; + +	/* This is what we mean by invariant: you can't change it. */ +	if (r->val != val) +		return -EINVAL; + +	return 0; +} + +static bool is_valid_cache(u32 val) +{ +	u32 level, ctype; + +	if (val >= CSSELR_MAX) +		return -ENOENT; + +	/* Bottom bit is Instruction or Data bit.  Next 3 bits are level. */ +        level = (val >> 1); +        ctype = (cache_levels >> (level * 3)) & 7; + +	switch (ctype) { +	case 0: /* No cache */ +		return false; +	case 1: /* Instruction cache only */ +		return (val & 1); +	case 2: /* Data cache only */ +	case 4: /* Unified cache */ +		return !(val & 1); +	case 3: /* Separate instruction and data caches */ +		return true; +	default: /* Reserved: we can't know instruction or data. */ +		return false; +	} +} + +/* Which cache CCSIDR represents depends on CSSELR value. */ +static u32 get_ccsidr(u32 csselr) +{ +	u32 ccsidr; + +	/* Make sure noone else changes CSSELR during this! */ +	local_irq_disable(); +	/* Put value into CSSELR */ +	asm volatile("mcr p15, 2, %0, c0, c0, 0" : : "r" (csselr)); +	isb(); +	/* Read result out of CCSIDR */ +	asm volatile("mrc p15, 1, %0, c0, c0, 0" : "=r" (ccsidr)); +	local_irq_enable(); + +	return ccsidr; +} + +static int demux_c15_get(u64 id, void __user *uaddr) +{ +	u32 val; +	u32 __user *uval = uaddr; + +	/* Fail if we have unknown bits set. */ +	if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK +		   | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1))) +		return -ENOENT; + +	switch (id & KVM_REG_ARM_DEMUX_ID_MASK) { +	case KVM_REG_ARM_DEMUX_ID_CCSIDR: +		if (KVM_REG_SIZE(id) != 4) +			return -ENOENT; +		val = (id & KVM_REG_ARM_DEMUX_VAL_MASK) +			>> KVM_REG_ARM_DEMUX_VAL_SHIFT; +		if (!is_valid_cache(val)) +			return -ENOENT; + +		return put_user(get_ccsidr(val), uval); +	default: +		return -ENOENT; +	} +} + +static int demux_c15_set(u64 id, void __user *uaddr) +{ +	u32 val, newval; +	u32 __user *uval = uaddr; + +	/* Fail if we have unknown bits set. */ +	if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK +		   | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1))) +		return -ENOENT; + +	switch (id & KVM_REG_ARM_DEMUX_ID_MASK) { +	case KVM_REG_ARM_DEMUX_ID_CCSIDR: +		if (KVM_REG_SIZE(id) != 4) +			return -ENOENT; +		val = (id & KVM_REG_ARM_DEMUX_VAL_MASK) +			>> KVM_REG_ARM_DEMUX_VAL_SHIFT; +		if (!is_valid_cache(val)) +			return -ENOENT; + +		if (get_user(newval, uval)) +			return -EFAULT; + +		/* This is also invariant: you can't change it. */ +		if (newval != get_ccsidr(val)) +			return -EINVAL; +		return 0; +	default: +		return -ENOENT; +	} +} + +#ifdef CONFIG_VFPv3 +static const int vfp_sysregs[] = { KVM_REG_ARM_VFP_FPEXC, +				   KVM_REG_ARM_VFP_FPSCR, +				   KVM_REG_ARM_VFP_FPINST, +				   KVM_REG_ARM_VFP_FPINST2, +				   KVM_REG_ARM_VFP_MVFR0, +				   KVM_REG_ARM_VFP_MVFR1, +				   KVM_REG_ARM_VFP_FPSID }; + +static unsigned int num_fp_regs(void) +{ +	if (((fmrx(MVFR0) & MVFR0_A_SIMD_MASK) >> MVFR0_A_SIMD_BIT) == 2) +		return 32; +	else +		return 16; +} + +static unsigned int num_vfp_regs(void) +{ +	/* Normal FP regs + control regs. */ +	return num_fp_regs() + ARRAY_SIZE(vfp_sysregs); +} + +static int copy_vfp_regids(u64 __user *uindices) +{ +	unsigned int i; +	const u64 u32reg = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_VFP; +	const u64 u64reg = KVM_REG_ARM | KVM_REG_SIZE_U64 | KVM_REG_ARM_VFP; + +	for (i = 0; i < num_fp_regs(); i++) { +		if (put_user((u64reg | KVM_REG_ARM_VFP_BASE_REG) + i, +			     uindices)) +			return -EFAULT; +		uindices++; +	} + +	for (i = 0; i < ARRAY_SIZE(vfp_sysregs); i++) { +		if (put_user(u32reg | vfp_sysregs[i], uindices)) +			return -EFAULT; +		uindices++; +	} + +	return num_vfp_regs(); +} + +static int vfp_get_reg(const struct kvm_vcpu *vcpu, u64 id, void __user *uaddr) +{ +	u32 vfpid = (id & KVM_REG_ARM_VFP_MASK); +	u32 val; + +	/* Fail if we have unknown bits set. */ +	if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK +		   | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1))) +		return -ENOENT; + +	if (vfpid < num_fp_regs()) { +		if (KVM_REG_SIZE(id) != 8) +			return -ENOENT; +		return reg_to_user(uaddr, &vcpu->arch.vfp_guest.fpregs[vfpid], +				   id); +	} + +	/* FP control registers are all 32 bit. */ +	if (KVM_REG_SIZE(id) != 4) +		return -ENOENT; + +	switch (vfpid) { +	case KVM_REG_ARM_VFP_FPEXC: +		return reg_to_user(uaddr, &vcpu->arch.vfp_guest.fpexc, id); +	case KVM_REG_ARM_VFP_FPSCR: +		return reg_to_user(uaddr, &vcpu->arch.vfp_guest.fpscr, id); +	case KVM_REG_ARM_VFP_FPINST: +		return reg_to_user(uaddr, &vcpu->arch.vfp_guest.fpinst, id); +	case KVM_REG_ARM_VFP_FPINST2: +		return reg_to_user(uaddr, &vcpu->arch.vfp_guest.fpinst2, id); +	case KVM_REG_ARM_VFP_MVFR0: +		val = fmrx(MVFR0); +		return reg_to_user(uaddr, &val, id); +	case KVM_REG_ARM_VFP_MVFR1: +		val = fmrx(MVFR1); +		return reg_to_user(uaddr, &val, id); +	case KVM_REG_ARM_VFP_FPSID: +		val = fmrx(FPSID); +		return reg_to_user(uaddr, &val, id); +	default: +		return -ENOENT; +	} +} + +static int vfp_set_reg(struct kvm_vcpu *vcpu, u64 id, const void __user *uaddr) +{ +	u32 vfpid = (id & KVM_REG_ARM_VFP_MASK); +	u32 val; + +	/* Fail if we have unknown bits set. */ +	if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK +		   | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1))) +		return -ENOENT; + +	if (vfpid < num_fp_regs()) { +		if (KVM_REG_SIZE(id) != 8) +			return -ENOENT; +		return reg_from_user(&vcpu->arch.vfp_guest.fpregs[vfpid], +				     uaddr, id); +	} + +	/* FP control registers are all 32 bit. */ +	if (KVM_REG_SIZE(id) != 4) +		return -ENOENT; + +	switch (vfpid) { +	case KVM_REG_ARM_VFP_FPEXC: +		return reg_from_user(&vcpu->arch.vfp_guest.fpexc, uaddr, id); +	case KVM_REG_ARM_VFP_FPSCR: +		return reg_from_user(&vcpu->arch.vfp_guest.fpscr, uaddr, id); +	case KVM_REG_ARM_VFP_FPINST: +		return reg_from_user(&vcpu->arch.vfp_guest.fpinst, uaddr, id); +	case KVM_REG_ARM_VFP_FPINST2: +		return reg_from_user(&vcpu->arch.vfp_guest.fpinst2, uaddr, id); +	/* These are invariant. */ +	case KVM_REG_ARM_VFP_MVFR0: +		if (reg_from_user(&val, uaddr, id)) +			return -EFAULT; +		if (val != fmrx(MVFR0)) +			return -EINVAL; +		return 0; +	case KVM_REG_ARM_VFP_MVFR1: +		if (reg_from_user(&val, uaddr, id)) +			return -EFAULT; +		if (val != fmrx(MVFR1)) +			return -EINVAL; +		return 0; +	case KVM_REG_ARM_VFP_FPSID: +		if (reg_from_user(&val, uaddr, id)) +			return -EFAULT; +		if (val != fmrx(FPSID)) +			return -EINVAL; +		return 0; +	default: +		return -ENOENT; +	} +} +#else /* !CONFIG_VFPv3 */ +static unsigned int num_vfp_regs(void) +{ +	return 0; +} + +static int copy_vfp_regids(u64 __user *uindices) +{ +	return 0; +} + +static int vfp_get_reg(const struct kvm_vcpu *vcpu, u64 id, void __user *uaddr) +{ +	return -ENOENT; +} + +static int vfp_set_reg(struct kvm_vcpu *vcpu, u64 id, const void __user *uaddr) +{ +	return -ENOENT; +} +#endif /* !CONFIG_VFPv3 */ + +int kvm_arm_coproc_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) +{ +	const struct coproc_reg *r; +	void __user *uaddr = (void __user *)(long)reg->addr; + +	if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX) +		return demux_c15_get(reg->id, uaddr); + +	if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_VFP) +		return vfp_get_reg(vcpu, reg->id, uaddr); + +	r = index_to_coproc_reg(vcpu, reg->id); +	if (!r) +		return get_invariant_cp15(reg->id, uaddr); + +	/* Note: copies two regs if size is 64 bit. */ +	return reg_to_user(uaddr, &vcpu->arch.cp15[r->reg], reg->id); +} + +int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) +{ +	const struct coproc_reg *r; +	void __user *uaddr = (void __user *)(long)reg->addr; + +	if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX) +		return demux_c15_set(reg->id, uaddr); + +	if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_VFP) +		return vfp_set_reg(vcpu, reg->id, uaddr); + +	r = index_to_coproc_reg(vcpu, reg->id); +	if (!r) +		return set_invariant_cp15(reg->id, uaddr); + +	/* Note: copies two regs if size is 64 bit */ +	return reg_from_user(&vcpu->arch.cp15[r->reg], uaddr, reg->id); +} + +static unsigned int num_demux_regs(void) +{ +	unsigned int i, count = 0; + +	for (i = 0; i < CSSELR_MAX; i++) +		if (is_valid_cache(i)) +			count++; + +	return count; +} + +static int write_demux_regids(u64 __user *uindices) +{ +	u64 val = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX; +	unsigned int i; + +	val |= KVM_REG_ARM_DEMUX_ID_CCSIDR; +	for (i = 0; i < CSSELR_MAX; i++) { +		if (!is_valid_cache(i)) +			continue; +		if (put_user(val | i, uindices)) +			return -EFAULT; +		uindices++; +	} +	return 0; +} + +static u64 cp15_to_index(const struct coproc_reg *reg) +{ +	u64 val = KVM_REG_ARM | (15 << KVM_REG_ARM_COPROC_SHIFT); +	if (reg->is_64) { +		val |= KVM_REG_SIZE_U64; +		val |= (reg->Op1 << KVM_REG_ARM_OPC1_SHIFT); +		val |= (reg->CRm << KVM_REG_ARM_CRM_SHIFT); +	} else { +		val |= KVM_REG_SIZE_U32; +		val |= (reg->Op1 << KVM_REG_ARM_OPC1_SHIFT); +		val |= (reg->Op2 << KVM_REG_ARM_32_OPC2_SHIFT); +		val |= (reg->CRm << KVM_REG_ARM_CRM_SHIFT); +		val |= (reg->CRn << KVM_REG_ARM_32_CRN_SHIFT); +	} +	return val; +} + +static bool copy_reg_to_user(const struct coproc_reg *reg, u64 __user **uind) +{ +	if (!*uind) +		return true; + +	if (put_user(cp15_to_index(reg), *uind)) +		return false; + +	(*uind)++; +	return true; +} + +/* Assumed ordered tables, see kvm_coproc_table_init. */ +static int walk_cp15(struct kvm_vcpu *vcpu, u64 __user *uind) +{ +	const struct coproc_reg *i1, *i2, *end1, *end2; +	unsigned int total = 0; +	size_t num; + +	/* We check for duplicates here, to allow arch-specific overrides. */ +	i1 = get_target_table(vcpu->arch.target, &num); +	end1 = i1 + num; +	i2 = cp15_regs; +	end2 = cp15_regs + ARRAY_SIZE(cp15_regs); + +	BUG_ON(i1 == end1 || i2 == end2); + +	/* Walk carefully, as both tables may refer to the same register. */ +	while (i1 || i2) { +		int cmp = cmp_reg(i1, i2); +		/* target-specific overrides generic entry. */ +		if (cmp <= 0) { +			/* Ignore registers we trap but don't save. */ +			if (i1->reg) { +				if (!copy_reg_to_user(i1, &uind)) +					return -EFAULT; +				total++; +			} +		} else { +			/* Ignore registers we trap but don't save. */ +			if (i2->reg) { +				if (!copy_reg_to_user(i2, &uind)) +					return -EFAULT; +				total++; +			} +		} + +		if (cmp <= 0 && ++i1 == end1) +			i1 = NULL; +		if (cmp >= 0 && ++i2 == end2) +			i2 = NULL; +	} +	return total; +} + +unsigned long kvm_arm_num_coproc_regs(struct kvm_vcpu *vcpu) +{ +	return ARRAY_SIZE(invariant_cp15) +		+ num_demux_regs() +		+ num_vfp_regs() +		+ walk_cp15(vcpu, (u64 __user *)NULL); +} + +int kvm_arm_copy_coproc_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) +{ +	unsigned int i; +	int err; + +	/* Then give them all the invariant registers' indices. */ +	for (i = 0; i < ARRAY_SIZE(invariant_cp15); i++) { +		if (put_user(cp15_to_index(&invariant_cp15[i]), uindices)) +			return -EFAULT; +		uindices++; +	} + +	err = walk_cp15(vcpu, uindices); +	if (err < 0) +		return err; +	uindices += err; + +	err = copy_vfp_regids(uindices); +	if (err < 0) +		return err; +	uindices += err; + +	return write_demux_regids(uindices); +} + +void kvm_coproc_table_init(void) +{ +	unsigned int i; + +	/* Make sure tables are unique and in order. */ +	for (i = 1; i < ARRAY_SIZE(cp15_regs); i++) +		BUG_ON(cmp_reg(&cp15_regs[i-1], &cp15_regs[i]) >= 0); + +	/* We abuse the reset function to overwrite the table itself. */ +	for (i = 0; i < ARRAY_SIZE(invariant_cp15); i++) +		invariant_cp15[i].reset(NULL, &invariant_cp15[i]); + +	/* +	 * CLIDR format is awkward, so clean it up.  See ARM B4.1.20: +	 * +	 *   If software reads the Cache Type fields from Ctype1 +	 *   upwards, once it has seen a value of 0b000, no caches +	 *   exist at further-out levels of the hierarchy. So, for +	 *   example, if Ctype3 is the first Cache Type field with a +	 *   value of 0b000, the values of Ctype4 to Ctype7 must be +	 *   ignored. +	 */ +	asm volatile("mrc p15, 1, %0, c0, c0, 1" : "=r" (cache_levels)); +	for (i = 0; i < 7; i++) +		if (((cache_levels >> (i*3)) & 7) == 0) +			break; +	/* Clear all higher bits. */ +	cache_levels &= (1 << (i*3))-1; +} + +/** + * kvm_reset_coprocs - sets cp15 registers to reset value + * @vcpu: The VCPU pointer + * + * This function finds the right table above and sets the registers on the + * virtual CPU struct to their architecturally defined reset values. + */ +void kvm_reset_coprocs(struct kvm_vcpu *vcpu) +{ +	size_t num; +	const struct coproc_reg *table; + +	/* Catch someone adding a register without putting in reset entry. */ +	memset(vcpu->arch.cp15, 0x42, sizeof(vcpu->arch.cp15)); + +	/* Generic chip reset first (so target could override). */ +	reset_coproc_regs(vcpu, cp15_regs, ARRAY_SIZE(cp15_regs)); + +	table = get_target_table(vcpu->arch.target, &num); +	reset_coproc_regs(vcpu, table, num); + +	for (num = 1; num < NR_CP15_REGS; num++) +		if (vcpu->arch.cp15[num] == 0x42424242) +			panic("Didn't reset vcpu->arch.cp15[%zi]", num); +} diff --git a/arch/arm/kvm/coproc.h b/arch/arm/kvm/coproc.h new file mode 100644 index 00000000000..992adfafa2f --- /dev/null +++ b/arch/arm/kvm/coproc.h @@ -0,0 +1,153 @@ +/* + * Copyright (C) 2012 - Virtual Open Systems and Columbia University + * Authors: Christoffer Dall <c.dall@virtualopensystems.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA. + */ + +#ifndef __ARM_KVM_COPROC_LOCAL_H__ +#define __ARM_KVM_COPROC_LOCAL_H__ + +struct coproc_params { +	unsigned long CRn; +	unsigned long CRm; +	unsigned long Op1; +	unsigned long Op2; +	unsigned long Rt1; +	unsigned long Rt2; +	bool is_64bit; +	bool is_write; +}; + +struct coproc_reg { +	/* MRC/MCR/MRRC/MCRR instruction which accesses it. */ +	unsigned long CRn; +	unsigned long CRm; +	unsigned long Op1; +	unsigned long Op2; + +	bool is_64; + +	/* Trapped access from guest, if non-NULL. */ +	bool (*access)(struct kvm_vcpu *, +		       const struct coproc_params *, +		       const struct coproc_reg *); + +	/* Initialization for vcpu. */ +	void (*reset)(struct kvm_vcpu *, const struct coproc_reg *); + +	/* Index into vcpu->arch.cp15[], or 0 if we don't need to save it. */ +	unsigned long reg; + +	/* Value (usually reset value) */ +	u64 val; +}; + +static inline void print_cp_instr(const struct coproc_params *p) +{ +	/* Look, we even formatted it for you to paste into the table! */ +	if (p->is_64bit) { +		kvm_pr_unimpl(" { CRm(%2lu), Op1(%2lu), is64, func_%s },\n", +			      p->CRm, p->Op1, p->is_write ? "write" : "read"); +	} else { +		kvm_pr_unimpl(" { CRn(%2lu), CRm(%2lu), Op1(%2lu), Op2(%2lu), is32," +			      " func_%s },\n", +			      p->CRn, p->CRm, p->Op1, p->Op2, +			      p->is_write ? "write" : "read"); +	} +} + +static inline bool ignore_write(struct kvm_vcpu *vcpu, +				const struct coproc_params *p) +{ +	return true; +} + +static inline bool read_zero(struct kvm_vcpu *vcpu, +			     const struct coproc_params *p) +{ +	*vcpu_reg(vcpu, p->Rt1) = 0; +	return true; +} + +static inline bool write_to_read_only(struct kvm_vcpu *vcpu, +				      const struct coproc_params *params) +{ +	kvm_debug("CP15 write to read-only register at: %08x\n", +		  *vcpu_pc(vcpu)); +	print_cp_instr(params); +	return false; +} + +static inline bool read_from_write_only(struct kvm_vcpu *vcpu, +					const struct coproc_params *params) +{ +	kvm_debug("CP15 read to write-only register at: %08x\n", +		  *vcpu_pc(vcpu)); +	print_cp_instr(params); +	return false; +} + +/* Reset functions */ +static inline void reset_unknown(struct kvm_vcpu *vcpu, +				 const struct coproc_reg *r) +{ +	BUG_ON(!r->reg); +	BUG_ON(r->reg >= ARRAY_SIZE(vcpu->arch.cp15)); +	vcpu->arch.cp15[r->reg] = 0xdecafbad; +} + +static inline void reset_val(struct kvm_vcpu *vcpu, const struct coproc_reg *r) +{ +	BUG_ON(!r->reg); +	BUG_ON(r->reg >= ARRAY_SIZE(vcpu->arch.cp15)); +	vcpu->arch.cp15[r->reg] = r->val; +} + +static inline void reset_unknown64(struct kvm_vcpu *vcpu, +				   const struct coproc_reg *r) +{ +	BUG_ON(!r->reg); +	BUG_ON(r->reg + 1 >= ARRAY_SIZE(vcpu->arch.cp15)); + +	vcpu->arch.cp15[r->reg] = 0xdecafbad; +	vcpu->arch.cp15[r->reg+1] = 0xd0c0ffee; +} + +static inline int cmp_reg(const struct coproc_reg *i1, +			  const struct coproc_reg *i2) +{ +	BUG_ON(i1 == i2); +	if (!i1) +		return 1; +	else if (!i2) +		return -1; +	if (i1->CRn != i2->CRn) +		return i1->CRn - i2->CRn; +	if (i1->CRm != i2->CRm) +		return i1->CRm - i2->CRm; +	if (i1->Op1 != i2->Op1) +		return i1->Op1 - i2->Op1; +	return i1->Op2 - i2->Op2; +} + + +#define CRn(_x)		.CRn = _x +#define CRm(_x) 	.CRm = _x +#define Op1(_x) 	.Op1 = _x +#define Op2(_x) 	.Op2 = _x +#define is64		.is_64 = true +#define is32		.is_64 = false + +#endif /* __ARM_KVM_COPROC_LOCAL_H__ */ diff --git a/arch/arm/kvm/coproc_a15.c b/arch/arm/kvm/coproc_a15.c new file mode 100644 index 00000000000..685063a6d0c --- /dev/null +++ b/arch/arm/kvm/coproc_a15.c @@ -0,0 +1,162 @@ +/* + * Copyright (C) 2012 - Virtual Open Systems and Columbia University + * Authors: Rusty Russell <rusty@rustcorp.au> + *          Christoffer Dall <c.dall@virtualopensystems.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA. + */ +#include <linux/kvm_host.h> +#include <asm/cputype.h> +#include <asm/kvm_arm.h> +#include <asm/kvm_host.h> +#include <asm/kvm_emulate.h> +#include <asm/kvm_coproc.h> +#include <linux/init.h> + +static void reset_mpidr(struct kvm_vcpu *vcpu, const struct coproc_reg *r) +{ +	/* +	 * Compute guest MPIDR: +	 * (Even if we present only one VCPU to the guest on an SMP +	 * host we don't set the U bit in the MPIDR, or vice versa, as +	 * revealing the underlying hardware properties is likely to +	 * be the best choice). +	 */ +	vcpu->arch.cp15[c0_MPIDR] = (read_cpuid_mpidr() & ~MPIDR_LEVEL_MASK) +		| (vcpu->vcpu_id & MPIDR_LEVEL_MASK); +} + +#include "coproc.h" + +/* A15 TRM 4.3.28: RO WI */ +static bool access_actlr(struct kvm_vcpu *vcpu, +			 const struct coproc_params *p, +			 const struct coproc_reg *r) +{ +	if (p->is_write) +		return ignore_write(vcpu, p); + +	*vcpu_reg(vcpu, p->Rt1) = vcpu->arch.cp15[c1_ACTLR]; +	return true; +} + +/* A15 TRM 4.3.60: R/O. */ +static bool access_cbar(struct kvm_vcpu *vcpu, +			const struct coproc_params *p, +			const struct coproc_reg *r) +{ +	if (p->is_write) +		return write_to_read_only(vcpu, p); +	return read_zero(vcpu, p); +} + +/* A15 TRM 4.3.48: R/O WI. */ +static bool access_l2ctlr(struct kvm_vcpu *vcpu, +			  const struct coproc_params *p, +			  const struct coproc_reg *r) +{ +	if (p->is_write) +		return ignore_write(vcpu, p); + +	*vcpu_reg(vcpu, p->Rt1) = vcpu->arch.cp15[c9_L2CTLR]; +	return true; +} + +static void reset_l2ctlr(struct kvm_vcpu *vcpu, const struct coproc_reg *r) +{ +	u32 l2ctlr, ncores; + +	asm volatile("mrc p15, 1, %0, c9, c0, 2\n" : "=r" (l2ctlr)); +	l2ctlr &= ~(3 << 24); +	ncores = atomic_read(&vcpu->kvm->online_vcpus) - 1; +	l2ctlr |= (ncores & 3) << 24; + +	vcpu->arch.cp15[c9_L2CTLR] = l2ctlr; +} + +static void reset_actlr(struct kvm_vcpu *vcpu, const struct coproc_reg *r) +{ +	u32 actlr; + +	/* ACTLR contains SMP bit: make sure you create all cpus first! */ +	asm volatile("mrc p15, 0, %0, c1, c0, 1\n" : "=r" (actlr)); +	/* Make the SMP bit consistent with the guest configuration */ +	if (atomic_read(&vcpu->kvm->online_vcpus) > 1) +		actlr |= 1U << 6; +	else +		actlr &= ~(1U << 6); + +	vcpu->arch.cp15[c1_ACTLR] = actlr; +} + +/* A15 TRM 4.3.49: R/O WI (even if NSACR.NS_L2ERR, a write of 1 is ignored). */ +static bool access_l2ectlr(struct kvm_vcpu *vcpu, +			   const struct coproc_params *p, +			   const struct coproc_reg *r) +{ +	if (p->is_write) +		return ignore_write(vcpu, p); + +	*vcpu_reg(vcpu, p->Rt1) = 0; +	return true; +} + +/* + * A15-specific CP15 registers. + * Important: Must be sorted ascending by CRn, CRM, Op1, Op2 + */ +static const struct coproc_reg a15_regs[] = { +	/* MPIDR: we use VMPIDR for guest access. */ +	{ CRn( 0), CRm( 0), Op1( 0), Op2( 5), is32, +			NULL, reset_mpidr, c0_MPIDR }, + +	/* SCTLR: swapped by interrupt.S. */ +	{ CRn( 1), CRm( 0), Op1( 0), Op2( 0), is32, +			NULL, reset_val, c1_SCTLR, 0x00C50078 }, +	/* ACTLR: trapped by HCR.TAC bit. */ +	{ CRn( 1), CRm( 0), Op1( 0), Op2( 1), is32, +			access_actlr, reset_actlr, c1_ACTLR }, +	/* CPACR: swapped by interrupt.S. */ +	{ CRn( 1), CRm( 0), Op1( 0), Op2( 2), is32, +			NULL, reset_val, c1_CPACR, 0x00000000 }, + +	/* +	 * L2CTLR access (guest wants to know #CPUs). +	 */ +	{ CRn( 9), CRm( 0), Op1( 1), Op2( 2), is32, +			access_l2ctlr, reset_l2ctlr, c9_L2CTLR }, +	{ CRn( 9), CRm( 0), Op1( 1), Op2( 3), is32, access_l2ectlr}, + +	/* The Configuration Base Address Register. */ +	{ CRn(15), CRm( 0), Op1( 4), Op2( 0), is32, access_cbar}, +}; + +static struct kvm_coproc_target_table a15_target_table = { +	.target = KVM_ARM_TARGET_CORTEX_A15, +	.table = a15_regs, +	.num = ARRAY_SIZE(a15_regs), +}; + +static int __init coproc_a15_init(void) +{ +	unsigned int i; + +	for (i = 1; i < ARRAY_SIZE(a15_regs); i++) +		BUG_ON(cmp_reg(&a15_regs[i-1], +			       &a15_regs[i]) >= 0); + +	kvm_register_target_coproc_table(&a15_target_table); +	return 0; +} +late_initcall(coproc_a15_init); diff --git a/arch/arm/kvm/emulate.c b/arch/arm/kvm/emulate.c new file mode 100644 index 00000000000..d61450ac666 --- /dev/null +++ b/arch/arm/kvm/emulate.c @@ -0,0 +1,373 @@ +/* + * Copyright (C) 2012 - Virtual Open Systems and Columbia University + * Author: Christoffer Dall <c.dall@virtualopensystems.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA. + */ + +#include <linux/mm.h> +#include <linux/kvm_host.h> +#include <asm/kvm_arm.h> +#include <asm/kvm_emulate.h> +#include <trace/events/kvm.h> + +#include "trace.h" + +#define VCPU_NR_MODES		6 +#define VCPU_REG_OFFSET_USR	0 +#define VCPU_REG_OFFSET_FIQ	1 +#define VCPU_REG_OFFSET_IRQ	2 +#define VCPU_REG_OFFSET_SVC	3 +#define VCPU_REG_OFFSET_ABT	4 +#define VCPU_REG_OFFSET_UND	5 +#define REG_OFFSET(_reg) \ +	(offsetof(struct kvm_regs, _reg) / sizeof(u32)) + +#define USR_REG_OFFSET(_num) REG_OFFSET(usr_regs.uregs[_num]) + +static const unsigned long vcpu_reg_offsets[VCPU_NR_MODES][15] = { +	/* USR/SYS Registers */ +	[VCPU_REG_OFFSET_USR] = { +		USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2), +		USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5), +		USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8), +		USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11), +		USR_REG_OFFSET(12), USR_REG_OFFSET(13),	USR_REG_OFFSET(14), +	}, + +	/* FIQ Registers */ +	[VCPU_REG_OFFSET_FIQ] = { +		USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2), +		USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5), +		USR_REG_OFFSET(6), USR_REG_OFFSET(7), +		REG_OFFSET(fiq_regs[0]), /* r8 */ +		REG_OFFSET(fiq_regs[1]), /* r9 */ +		REG_OFFSET(fiq_regs[2]), /* r10 */ +		REG_OFFSET(fiq_regs[3]), /* r11 */ +		REG_OFFSET(fiq_regs[4]), /* r12 */ +		REG_OFFSET(fiq_regs[5]), /* r13 */ +		REG_OFFSET(fiq_regs[6]), /* r14 */ +	}, + +	/* IRQ Registers */ +	[VCPU_REG_OFFSET_IRQ] = { +		USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2), +		USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5), +		USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8), +		USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11), +		USR_REG_OFFSET(12), +		REG_OFFSET(irq_regs[0]), /* r13 */ +		REG_OFFSET(irq_regs[1]), /* r14 */ +	}, + +	/* SVC Registers */ +	[VCPU_REG_OFFSET_SVC] = { +		USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2), +		USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5), +		USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8), +		USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11), +		USR_REG_OFFSET(12), +		REG_OFFSET(svc_regs[0]), /* r13 */ +		REG_OFFSET(svc_regs[1]), /* r14 */ +	}, + +	/* ABT Registers */ +	[VCPU_REG_OFFSET_ABT] = { +		USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2), +		USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5), +		USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8), +		USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11), +		USR_REG_OFFSET(12), +		REG_OFFSET(abt_regs[0]), /* r13 */ +		REG_OFFSET(abt_regs[1]), /* r14 */ +	}, + +	/* UND Registers */ +	[VCPU_REG_OFFSET_UND] = { +		USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2), +		USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5), +		USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8), +		USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11), +		USR_REG_OFFSET(12), +		REG_OFFSET(und_regs[0]), /* r13 */ +		REG_OFFSET(und_regs[1]), /* r14 */ +	}, +}; + +/* + * Return a pointer to the register number valid in the current mode of + * the virtual CPU. + */ +u32 *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num) +{ +	u32 *reg_array = (u32 *)&vcpu->arch.regs; +	u32 mode = *vcpu_cpsr(vcpu) & MODE_MASK; + +	switch (mode) { +	case USR_MODE...SVC_MODE: +		mode &= ~MODE32_BIT; /* 0 ... 3 */ +		break; + +	case ABT_MODE: +		mode = VCPU_REG_OFFSET_ABT; +		break; + +	case UND_MODE: +		mode = VCPU_REG_OFFSET_UND; +		break; + +	case SYSTEM_MODE: +		mode = VCPU_REG_OFFSET_USR; +		break; + +	default: +		BUG(); +	} + +	return reg_array + vcpu_reg_offsets[mode][reg_num]; +} + +/* + * Return the SPSR for the current mode of the virtual CPU. + */ +u32 *vcpu_spsr(struct kvm_vcpu *vcpu) +{ +	u32 mode = *vcpu_cpsr(vcpu) & MODE_MASK; +	switch (mode) { +	case SVC_MODE: +		return &vcpu->arch.regs.KVM_ARM_SVC_spsr; +	case ABT_MODE: +		return &vcpu->arch.regs.KVM_ARM_ABT_spsr; +	case UND_MODE: +		return &vcpu->arch.regs.KVM_ARM_UND_spsr; +	case IRQ_MODE: +		return &vcpu->arch.regs.KVM_ARM_IRQ_spsr; +	case FIQ_MODE: +		return &vcpu->arch.regs.KVM_ARM_FIQ_spsr; +	default: +		BUG(); +	} +} + +/** + * kvm_handle_wfi - handle a wait-for-interrupts instruction executed by a guest + * @vcpu:	the vcpu pointer + * @run:	the kvm_run structure pointer + * + * Simply sets the wait_for_interrupts flag on the vcpu structure, which will + * halt execution of world-switches and schedule other host processes until + * there is an incoming IRQ or FIQ to the VM. + */ +int kvm_handle_wfi(struct kvm_vcpu *vcpu, struct kvm_run *run) +{ +	trace_kvm_wfi(*vcpu_pc(vcpu)); +	kvm_vcpu_block(vcpu); +	return 1; +} + +/** + * adjust_itstate - adjust ITSTATE when emulating instructions in IT-block + * @vcpu:	The VCPU pointer + * + * When exceptions occur while instructions are executed in Thumb IF-THEN + * blocks, the ITSTATE field of the CPSR is not advanved (updated), so we have + * to do this little bit of work manually. The fields map like this: + * + * IT[7:0] -> CPSR[26:25],CPSR[15:10] + */ +static void kvm_adjust_itstate(struct kvm_vcpu *vcpu) +{ +	unsigned long itbits, cond; +	unsigned long cpsr = *vcpu_cpsr(vcpu); +	bool is_arm = !(cpsr & PSR_T_BIT); + +	BUG_ON(is_arm && (cpsr & PSR_IT_MASK)); + +	if (!(cpsr & PSR_IT_MASK)) +		return; + +	cond = (cpsr & 0xe000) >> 13; +	itbits = (cpsr & 0x1c00) >> (10 - 2); +	itbits |= (cpsr & (0x3 << 25)) >> 25; + +	/* Perform ITAdvance (see page A-52 in ARM DDI 0406C) */ +	if ((itbits & 0x7) == 0) +		itbits = cond = 0; +	else +		itbits = (itbits << 1) & 0x1f; + +	cpsr &= ~PSR_IT_MASK; +	cpsr |= cond << 13; +	cpsr |= (itbits & 0x1c) << (10 - 2); +	cpsr |= (itbits & 0x3) << 25; +	*vcpu_cpsr(vcpu) = cpsr; +} + +/** + * kvm_skip_instr - skip a trapped instruction and proceed to the next + * @vcpu: The vcpu pointer + */ +void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr) +{ +	bool is_thumb; + +	is_thumb = !!(*vcpu_cpsr(vcpu) & PSR_T_BIT); +	if (is_thumb && !is_wide_instr) +		*vcpu_pc(vcpu) += 2; +	else +		*vcpu_pc(vcpu) += 4; +	kvm_adjust_itstate(vcpu); +} + + +/****************************************************************************** + * Inject exceptions into the guest + */ + +static u32 exc_vector_base(struct kvm_vcpu *vcpu) +{ +	u32 sctlr = vcpu->arch.cp15[c1_SCTLR]; +	u32 vbar = vcpu->arch.cp15[c12_VBAR]; + +	if (sctlr & SCTLR_V) +		return 0xffff0000; +	else /* always have security exceptions */ +		return vbar; +} + +/** + * kvm_inject_undefined - inject an undefined exception into the guest + * @vcpu: The VCPU to receive the undefined exception + * + * It is assumed that this code is called from the VCPU thread and that the + * VCPU therefore is not currently executing guest code. + * + * Modelled after TakeUndefInstrException() pseudocode. + */ +void kvm_inject_undefined(struct kvm_vcpu *vcpu) +{ +	u32 new_lr_value; +	u32 new_spsr_value; +	u32 cpsr = *vcpu_cpsr(vcpu); +	u32 sctlr = vcpu->arch.cp15[c1_SCTLR]; +	bool is_thumb = (cpsr & PSR_T_BIT); +	u32 vect_offset = 4; +	u32 return_offset = (is_thumb) ? 2 : 4; + +	new_spsr_value = cpsr; +	new_lr_value = *vcpu_pc(vcpu) - return_offset; + +	*vcpu_cpsr(vcpu) = (cpsr & ~MODE_MASK) | UND_MODE; +	*vcpu_cpsr(vcpu) |= PSR_I_BIT; +	*vcpu_cpsr(vcpu) &= ~(PSR_IT_MASK | PSR_J_BIT | PSR_E_BIT | PSR_T_BIT); + +	if (sctlr & SCTLR_TE) +		*vcpu_cpsr(vcpu) |= PSR_T_BIT; +	if (sctlr & SCTLR_EE) +		*vcpu_cpsr(vcpu) |= PSR_E_BIT; + +	/* Note: These now point to UND banked copies */ +	*vcpu_spsr(vcpu) = cpsr; +	*vcpu_reg(vcpu, 14) = new_lr_value; + +	/* Branch to exception vector */ +	*vcpu_pc(vcpu) = exc_vector_base(vcpu) + vect_offset; +} + +/* + * Modelled after TakeDataAbortException() and TakePrefetchAbortException + * pseudocode. + */ +static void inject_abt(struct kvm_vcpu *vcpu, bool is_pabt, unsigned long addr) +{ +	u32 new_lr_value; +	u32 new_spsr_value; +	u32 cpsr = *vcpu_cpsr(vcpu); +	u32 sctlr = vcpu->arch.cp15[c1_SCTLR]; +	bool is_thumb = (cpsr & PSR_T_BIT); +	u32 vect_offset; +	u32 return_offset = (is_thumb) ? 4 : 0; +	bool is_lpae; + +	new_spsr_value = cpsr; +	new_lr_value = *vcpu_pc(vcpu) + return_offset; + +	*vcpu_cpsr(vcpu) = (cpsr & ~MODE_MASK) | ABT_MODE; +	*vcpu_cpsr(vcpu) |= PSR_I_BIT | PSR_A_BIT; +	*vcpu_cpsr(vcpu) &= ~(PSR_IT_MASK | PSR_J_BIT | PSR_E_BIT | PSR_T_BIT); + +	if (sctlr & SCTLR_TE) +		*vcpu_cpsr(vcpu) |= PSR_T_BIT; +	if (sctlr & SCTLR_EE) +		*vcpu_cpsr(vcpu) |= PSR_E_BIT; + +	/* Note: These now point to ABT banked copies */ +	*vcpu_spsr(vcpu) = cpsr; +	*vcpu_reg(vcpu, 14) = new_lr_value; + +	if (is_pabt) +		vect_offset = 12; +	else +		vect_offset = 16; + +	/* Branch to exception vector */ +	*vcpu_pc(vcpu) = exc_vector_base(vcpu) + vect_offset; + +	if (is_pabt) { +		/* Set DFAR and DFSR */ +		vcpu->arch.cp15[c6_IFAR] = addr; +		is_lpae = (vcpu->arch.cp15[c2_TTBCR] >> 31); +		/* Always give debug fault for now - should give guest a clue */ +		if (is_lpae) +			vcpu->arch.cp15[c5_IFSR] = 1 << 9 | 0x22; +		else +			vcpu->arch.cp15[c5_IFSR] = 2; +	} else { /* !iabt */ +		/* Set DFAR and DFSR */ +		vcpu->arch.cp15[c6_DFAR] = addr; +		is_lpae = (vcpu->arch.cp15[c2_TTBCR] >> 31); +		/* Always give debug fault for now - should give guest a clue */ +		if (is_lpae) +			vcpu->arch.cp15[c5_DFSR] = 1 << 9 | 0x22; +		else +			vcpu->arch.cp15[c5_DFSR] = 2; +	} + +} + +/** + * kvm_inject_dabt - inject a data abort into the guest + * @vcpu: The VCPU to receive the undefined exception + * @addr: The address to report in the DFAR + * + * It is assumed that this code is called from the VCPU thread and that the + * VCPU therefore is not currently executing guest code. + */ +void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr) +{ +	inject_abt(vcpu, false, addr); +} + +/** + * kvm_inject_pabt - inject a prefetch abort into the guest + * @vcpu: The VCPU to receive the undefined exception + * @addr: The address to report in the DFAR + * + * It is assumed that this code is called from the VCPU thread and that the + * VCPU therefore is not currently executing guest code. + */ +void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr) +{ +	inject_abt(vcpu, true, addr); +} diff --git a/arch/arm/kvm/guest.c b/arch/arm/kvm/guest.c new file mode 100644 index 00000000000..2339d9609d3 --- /dev/null +++ b/arch/arm/kvm/guest.c @@ -0,0 +1,222 @@ +/* + * Copyright (C) 2012 - Virtual Open Systems and Columbia University + * Author: Christoffer Dall <c.dall@virtualopensystems.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA. + */ + +#include <linux/errno.h> +#include <linux/err.h> +#include <linux/kvm_host.h> +#include <linux/module.h> +#include <linux/vmalloc.h> +#include <linux/fs.h> +#include <asm/uaccess.h> +#include <asm/kvm.h> +#include <asm/kvm_asm.h> +#include <asm/kvm_emulate.h> +#include <asm/kvm_coproc.h> + +#define VM_STAT(x) { #x, offsetof(struct kvm, stat.x), KVM_STAT_VM } +#define VCPU_STAT(x) { #x, offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU } + +struct kvm_stats_debugfs_item debugfs_entries[] = { +	{ NULL } +}; + +int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) +{ +	return 0; +} + +static u64 core_reg_offset_from_id(u64 id) +{ +	return id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_CORE); +} + +static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) +{ +	u32 __user *uaddr = (u32 __user *)(long)reg->addr; +	struct kvm_regs *regs = &vcpu->arch.regs; +	u64 off; + +	if (KVM_REG_SIZE(reg->id) != 4) +		return -ENOENT; + +	/* Our ID is an index into the kvm_regs struct. */ +	off = core_reg_offset_from_id(reg->id); +	if (off >= sizeof(*regs) / KVM_REG_SIZE(reg->id)) +		return -ENOENT; + +	return put_user(((u32 *)regs)[off], uaddr); +} + +static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) +{ +	u32 __user *uaddr = (u32 __user *)(long)reg->addr; +	struct kvm_regs *regs = &vcpu->arch.regs; +	u64 off, val; + +	if (KVM_REG_SIZE(reg->id) != 4) +		return -ENOENT; + +	/* Our ID is an index into the kvm_regs struct. */ +	off = core_reg_offset_from_id(reg->id); +	if (off >= sizeof(*regs) / KVM_REG_SIZE(reg->id)) +		return -ENOENT; + +	if (get_user(val, uaddr) != 0) +		return -EFAULT; + +	if (off == KVM_REG_ARM_CORE_REG(usr_regs.ARM_cpsr)) { +		unsigned long mode = val & MODE_MASK; +		switch (mode) { +		case USR_MODE: +		case FIQ_MODE: +		case IRQ_MODE: +		case SVC_MODE: +		case ABT_MODE: +		case UND_MODE: +			break; +		default: +			return -EINVAL; +		} +	} + +	((u32 *)regs)[off] = val; +	return 0; +} + +int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) +{ +	return -EINVAL; +} + +int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) +{ +	return -EINVAL; +} + +static unsigned long num_core_regs(void) +{ +	return sizeof(struct kvm_regs) / sizeof(u32); +} + +/** + * kvm_arm_num_regs - how many registers do we present via KVM_GET_ONE_REG + * + * This is for all registers. + */ +unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu) +{ +	return num_core_regs() + kvm_arm_num_coproc_regs(vcpu); +} + +/** + * kvm_arm_copy_reg_indices - get indices of all registers. + * + * We do core registers right here, then we apppend coproc regs. + */ +int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) +{ +	unsigned int i; +	const u64 core_reg = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_CORE; + +	for (i = 0; i < sizeof(struct kvm_regs)/sizeof(u32); i++) { +		if (put_user(core_reg | i, uindices)) +			return -EFAULT; +		uindices++; +	} + +	return kvm_arm_copy_coproc_indices(vcpu, uindices); +} + +int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) +{ +	/* We currently use nothing arch-specific in upper 32 bits */ +	if ((reg->id & ~KVM_REG_SIZE_MASK) >> 32 != KVM_REG_ARM >> 32) +		return -EINVAL; + +	/* Register group 16 means we want a core register. */ +	if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE) +		return get_core_reg(vcpu, reg); + +	return kvm_arm_coproc_get_reg(vcpu, reg); +} + +int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) +{ +	/* We currently use nothing arch-specific in upper 32 bits */ +	if ((reg->id & ~KVM_REG_SIZE_MASK) >> 32 != KVM_REG_ARM >> 32) +		return -EINVAL; + +	/* Register group 16 means we set a core register. */ +	if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE) +		return set_core_reg(vcpu, reg); + +	return kvm_arm_coproc_set_reg(vcpu, reg); +} + +int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, +				  struct kvm_sregs *sregs) +{ +	return -EINVAL; +} + +int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, +				  struct kvm_sregs *sregs) +{ +	return -EINVAL; +} + +int kvm_vcpu_set_target(struct kvm_vcpu *vcpu, +			const struct kvm_vcpu_init *init) +{ +	unsigned int i; + +	/* We can only do a cortex A15 for now. */ +	if (init->target != kvm_target_cpu()) +		return -EINVAL; + +	vcpu->arch.target = init->target; +	bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES); + +	/* -ENOENT for unknown features, -EINVAL for invalid combinations. */ +	for (i = 0; i < sizeof(init->features) * 8; i++) { +		if (test_bit(i, (void *)init->features)) { +			if (i >= KVM_VCPU_MAX_FEATURES) +				return -ENOENT; +			set_bit(i, vcpu->arch.features); +		} +	} + +	/* Now we know what it is, we can reset it. */ +	return kvm_reset_vcpu(vcpu); +} + +int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) +{ +	return -EINVAL; +} + +int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) +{ +	return -EINVAL; +} + +int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, +				  struct kvm_translation *tr) +{ +	return -EINVAL; +} diff --git a/arch/arm/kvm/init.S b/arch/arm/kvm/init.S new file mode 100644 index 00000000000..9f37a79b880 --- /dev/null +++ b/arch/arm/kvm/init.S @@ -0,0 +1,114 @@ +/* + * Copyright (C) 2012 - Virtual Open Systems and Columbia University + * Author: Christoffer Dall <c.dall@virtualopensystems.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA. + */ + +#include <linux/linkage.h> +#include <asm/unified.h> +#include <asm/asm-offsets.h> +#include <asm/kvm_asm.h> +#include <asm/kvm_arm.h> + +/******************************************************************** + * Hypervisor initialization + *   - should be called with: + *       r0,r1 = Hypervisor pgd pointer + *       r2 = top of Hyp stack (kernel VA) + *       r3 = pointer to hyp vectors + */ + +	.text +	.pushsection    .hyp.idmap.text,"ax" +	.align 5 +__kvm_hyp_init: +	.globl __kvm_hyp_init + +	@ Hyp-mode exception vector +	W(b)	. +	W(b)	. +	W(b)	. +	W(b)	. +	W(b)	. +	W(b)	__do_hyp_init +	W(b)	. +	W(b)	. + +__do_hyp_init: +	@ Set the HTTBR to point to the hypervisor PGD pointer passed +	mcrr	p15, 4, r0, r1, c2 + +	@ Set the HTCR and VTCR to the same shareability and cacheability +	@ settings as the non-secure TTBCR and with T0SZ == 0. +	mrc	p15, 4, r0, c2, c0, 2	@ HTCR +	ldr	r12, =HTCR_MASK +	bic	r0, r0, r12 +	mrc	p15, 0, r1, c2, c0, 2	@ TTBCR +	and	r1, r1, #(HTCR_MASK & ~TTBCR_T0SZ) +	orr	r0, r0, r1 +	mcr	p15, 4, r0, c2, c0, 2	@ HTCR + +	mrc	p15, 4, r1, c2, c1, 2	@ VTCR +	ldr	r12, =VTCR_MASK +	bic	r1, r1, r12 +	bic	r0, r0, #(~VTCR_HTCR_SH)	@ clear non-reusable HTCR bits +	orr	r1, r0, r1 +	orr	r1, r1, #(KVM_VTCR_SL0 | KVM_VTCR_T0SZ | KVM_VTCR_S) +	mcr	p15, 4, r1, c2, c1, 2	@ VTCR + +	@ Use the same memory attributes for hyp. accesses as the kernel +	@ (copy MAIRx ro HMAIRx). +	mrc	p15, 0, r0, c10, c2, 0 +	mcr	p15, 4, r0, c10, c2, 0 +	mrc	p15, 0, r0, c10, c2, 1 +	mcr	p15, 4, r0, c10, c2, 1 + +	@ Set the HSCTLR to: +	@  - ARM/THUMB exceptions: Kernel config (Thumb-2 kernel) +	@  - Endianness: Kernel config +	@  - Fast Interrupt Features: Kernel config +	@  - Write permission implies XN: disabled +	@  - Instruction cache: enabled +	@  - Data/Unified cache: enabled +	@  - Memory alignment checks: enabled +	@  - MMU: enabled (this code must be run from an identity mapping) +	mrc	p15, 4, r0, c1, c0, 0	@ HSCR +	ldr	r12, =HSCTLR_MASK +	bic	r0, r0, r12 +	mrc	p15, 0, r1, c1, c0, 0	@ SCTLR +	ldr	r12, =(HSCTLR_EE | HSCTLR_FI | HSCTLR_I | HSCTLR_C) +	and	r1, r1, r12 + ARM(	ldr	r12, =(HSCTLR_M | HSCTLR_A)			) + THUMB(	ldr	r12, =(HSCTLR_M | HSCTLR_A | HSCTLR_TE)		) +	orr	r1, r1, r12 +	orr	r0, r0, r1 +	isb +	mcr	p15, 4, r0, c1, c0, 0	@ HSCR +	isb + +	@ Set stack pointer and return to the kernel +	mov	sp, r2 + +	@ Set HVBAR to point to the HYP vectors +	mcr	p15, 4, r3, c12, c0, 0	@ HVBAR + +	eret + +	.ltorg + +	.globl __kvm_hyp_init_end +__kvm_hyp_init_end: + +	.popsection diff --git a/arch/arm/kvm/interrupts.S b/arch/arm/kvm/interrupts.S new file mode 100644 index 00000000000..c5400d2e97c --- /dev/null +++ b/arch/arm/kvm/interrupts.S @@ -0,0 +1,478 @@ +/* + * Copyright (C) 2012 - Virtual Open Systems and Columbia University + * Author: Christoffer Dall <c.dall@virtualopensystems.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA. + */ + +#include <linux/linkage.h> +#include <linux/const.h> +#include <asm/unified.h> +#include <asm/page.h> +#include <asm/ptrace.h> +#include <asm/asm-offsets.h> +#include <asm/kvm_asm.h> +#include <asm/kvm_arm.h> +#include <asm/vfpmacros.h> +#include "interrupts_head.S" + +	.text + +__kvm_hyp_code_start: +	.globl __kvm_hyp_code_start + +/******************************************************************** + * Flush per-VMID TLBs + * + * void __kvm_tlb_flush_vmid(struct kvm *kvm); + * + * We rely on the hardware to broadcast the TLB invalidation to all CPUs + * inside the inner-shareable domain (which is the case for all v7 + * implementations).  If we come across a non-IS SMP implementation, we'll + * have to use an IPI based mechanism. Until then, we stick to the simple + * hardware assisted version. + */ +ENTRY(__kvm_tlb_flush_vmid) +	push	{r2, r3} + +	add	r0, r0, #KVM_VTTBR +	ldrd	r2, r3, [r0] +	mcrr	p15, 6, r2, r3, c2	@ Write VTTBR +	isb +	mcr     p15, 0, r0, c8, c3, 0	@ TLBIALLIS (rt ignored) +	dsb +	isb +	mov	r2, #0 +	mov	r3, #0 +	mcrr	p15, 6, r2, r3, c2	@ Back to VMID #0 +	isb				@ Not necessary if followed by eret + +	pop	{r2, r3} +	bx	lr +ENDPROC(__kvm_tlb_flush_vmid) + +/******************************************************************** + * Flush TLBs and instruction caches of all CPUs inside the inner-shareable + * domain, for all VMIDs + * + * void __kvm_flush_vm_context(void); + */ +ENTRY(__kvm_flush_vm_context) +	mov	r0, #0			@ rn parameter for c15 flushes is SBZ + +	/* Invalidate NS Non-Hyp TLB Inner Shareable (TLBIALLNSNHIS) */ +	mcr     p15, 4, r0, c8, c3, 4 +	/* Invalidate instruction caches Inner Shareable (ICIALLUIS) */ +	mcr     p15, 0, r0, c7, c1, 0 +	dsb +	isb				@ Not necessary if followed by eret + +	bx	lr +ENDPROC(__kvm_flush_vm_context) + + +/******************************************************************** + *  Hypervisor world-switch code + * + * + * int __kvm_vcpu_run(struct kvm_vcpu *vcpu) + */ +ENTRY(__kvm_vcpu_run) +	@ Save the vcpu pointer +	mcr	p15, 4, vcpu, c13, c0, 2	@ HTPIDR + +	save_host_regs + +	@ Store hardware CP15 state and load guest state +	read_cp15_state store_to_vcpu = 0 +	write_cp15_state read_from_vcpu = 1 + +	@ If the host kernel has not been configured with VFPv3 support, +	@ then it is safer if we deny guests from using it as well. +#ifdef CONFIG_VFPv3 +	@ Set FPEXC_EN so the guest doesn't trap floating point instructions +	VFPFMRX r2, FPEXC		@ VMRS +	push	{r2} +	orr	r2, r2, #FPEXC_EN +	VFPFMXR FPEXC, r2		@ VMSR +#endif + +	@ Configure Hyp-role +	configure_hyp_role vmentry + +	@ Trap coprocessor CRx accesses +	set_hstr vmentry +	set_hcptr vmentry, (HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11)) +	set_hdcr vmentry + +	@ Write configured ID register into MIDR alias +	ldr	r1, [vcpu, #VCPU_MIDR] +	mcr	p15, 4, r1, c0, c0, 0 + +	@ Write guest view of MPIDR into VMPIDR +	ldr	r1, [vcpu, #CP15_OFFSET(c0_MPIDR)] +	mcr	p15, 4, r1, c0, c0, 5 + +	@ Set up guest memory translation +	ldr	r1, [vcpu, #VCPU_KVM] +	add	r1, r1, #KVM_VTTBR +	ldrd	r2, r3, [r1] +	mcrr	p15, 6, r2, r3, c2	@ Write VTTBR + +	@ We're all done, just restore the GPRs and go to the guest +	restore_guest_regs +	clrex				@ Clear exclusive monitor +	eret + +__kvm_vcpu_return: +	/* +	 * return convention: +	 * guest r0, r1, r2 saved on the stack +	 * r0: vcpu pointer +	 * r1: exception code +	 */ +	save_guest_regs + +	@ Set VMID == 0 +	mov	r2, #0 +	mov	r3, #0 +	mcrr	p15, 6, r2, r3, c2	@ Write VTTBR + +	@ Don't trap coprocessor accesses for host kernel +	set_hstr vmexit +	set_hdcr vmexit +	set_hcptr vmexit, (HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11)) + +#ifdef CONFIG_VFPv3 +	@ Save floating point registers we if let guest use them. +	tst	r2, #(HCPTR_TCP(10) | HCPTR_TCP(11)) +	bne	after_vfp_restore + +	@ Switch VFP/NEON hardware state to the host's +	add	r7, vcpu, #VCPU_VFP_GUEST +	store_vfp_state r7 +	add	r7, vcpu, #VCPU_VFP_HOST +	ldr	r7, [r7] +	restore_vfp_state r7 + +after_vfp_restore: +	@ Restore FPEXC_EN which we clobbered on entry +	pop	{r2} +	VFPFMXR FPEXC, r2 +#endif + +	@ Reset Hyp-role +	configure_hyp_role vmexit + +	@ Let host read hardware MIDR +	mrc	p15, 0, r2, c0, c0, 0 +	mcr	p15, 4, r2, c0, c0, 0 + +	@ Back to hardware MPIDR +	mrc	p15, 0, r2, c0, c0, 5 +	mcr	p15, 4, r2, c0, c0, 5 + +	@ Store guest CP15 state and restore host state +	read_cp15_state store_to_vcpu = 1 +	write_cp15_state read_from_vcpu = 0 + +	restore_host_regs +	clrex				@ Clear exclusive monitor +	mov	r0, r1			@ Return the return code +	mov	r1, #0			@ Clear upper bits in return value +	bx	lr			@ return to IOCTL + +/******************************************************************** + *  Call function in Hyp mode + * + * + * u64 kvm_call_hyp(void *hypfn, ...); + * + * This is not really a variadic function in the classic C-way and care must + * be taken when calling this to ensure parameters are passed in registers + * only, since the stack will change between the caller and the callee. + * + * Call the function with the first argument containing a pointer to the + * function you wish to call in Hyp mode, and subsequent arguments will be + * passed as r0, r1, and r2 (a maximum of 3 arguments in addition to the + * function pointer can be passed).  The function being called must be mapped + * in Hyp mode (see init_hyp_mode in arch/arm/kvm/arm.c).  Return values are + * passed in r0 and r1. + * + * The calling convention follows the standard AAPCS: + *   r0 - r3: caller save + *   r12:     caller save + *   rest:    callee save + */ +ENTRY(kvm_call_hyp) +	hvc	#0 +	bx	lr + +/******************************************************************** + * Hypervisor exception vector and handlers + * + * + * The KVM/ARM Hypervisor ABI is defined as follows: + * + * Entry to Hyp mode from the host kernel will happen _only_ when an HVC + * instruction is issued since all traps are disabled when running the host + * kernel as per the Hyp-mode initialization at boot time. + * + * HVC instructions cause a trap to the vector page + offset 0x18 (see hyp_hvc + * below) when the HVC instruction is called from SVC mode (i.e. a guest or the + * host kernel) and they cause a trap to the vector page + offset 0xc when HVC + * instructions are called from within Hyp-mode. + * + * Hyp-ABI: Calling HYP-mode functions from host (in SVC mode): + *    Switching to Hyp mode is done through a simple HVC #0 instruction. The + *    exception vector code will check that the HVC comes from VMID==0 and if + *    so will push the necessary state (SPSR, lr_usr) on the Hyp stack. + *    - r0 contains a pointer to a HYP function + *    - r1, r2, and r3 contain arguments to the above function. + *    - The HYP function will be called with its arguments in r0, r1 and r2. + *    On HYP function return, we return directly to SVC. + * + * Note that the above is used to execute code in Hyp-mode from a host-kernel + * point of view, and is a different concept from performing a world-switch and + * executing guest code SVC mode (with a VMID != 0). + */ + +/* Handle undef, svc, pabt, or dabt by crashing with a user notice */ +.macro bad_exception exception_code, panic_str +	push	{r0-r2} +	mrrc	p15, 6, r0, r1, c2	@ Read VTTBR +	lsr	r1, r1, #16 +	ands	r1, r1, #0xff +	beq	99f + +	load_vcpu			@ Load VCPU pointer +	.if \exception_code == ARM_EXCEPTION_DATA_ABORT +	mrc	p15, 4, r2, c5, c2, 0	@ HSR +	mrc	p15, 4, r1, c6, c0, 0	@ HDFAR +	str	r2, [vcpu, #VCPU_HSR] +	str	r1, [vcpu, #VCPU_HxFAR] +	.endif +	.if \exception_code == ARM_EXCEPTION_PREF_ABORT +	mrc	p15, 4, r2, c5, c2, 0	@ HSR +	mrc	p15, 4, r1, c6, c0, 2	@ HIFAR +	str	r2, [vcpu, #VCPU_HSR] +	str	r1, [vcpu, #VCPU_HxFAR] +	.endif +	mov	r1, #\exception_code +	b	__kvm_vcpu_return + +	@ We were in the host already. Let's craft a panic-ing return to SVC. +99:	mrs	r2, cpsr +	bic	r2, r2, #MODE_MASK +	orr	r2, r2, #SVC_MODE +THUMB(	orr	r2, r2, #PSR_T_BIT	) +	msr	spsr_cxsf, r2 +	mrs	r1, ELR_hyp +	ldr	r2, =BSYM(panic) +	msr	ELR_hyp, r2 +	ldr	r0, =\panic_str +	eret +.endm + +	.text + +	.align 5 +__kvm_hyp_vector: +	.globl __kvm_hyp_vector + +	@ Hyp-mode exception vector +	W(b)	hyp_reset +	W(b)	hyp_undef +	W(b)	hyp_svc +	W(b)	hyp_pabt +	W(b)	hyp_dabt +	W(b)	hyp_hvc +	W(b)	hyp_irq +	W(b)	hyp_fiq + +	.align +hyp_reset: +	b	hyp_reset + +	.align +hyp_undef: +	bad_exception ARM_EXCEPTION_UNDEFINED, und_die_str + +	.align +hyp_svc: +	bad_exception ARM_EXCEPTION_HVC, svc_die_str + +	.align +hyp_pabt: +	bad_exception ARM_EXCEPTION_PREF_ABORT, pabt_die_str + +	.align +hyp_dabt: +	bad_exception ARM_EXCEPTION_DATA_ABORT, dabt_die_str + +	.align +hyp_hvc: +	/* +	 * Getting here is either becuase of a trap from a guest or from calling +	 * HVC from the host kernel, which means "switch to Hyp mode". +	 */ +	push	{r0, r1, r2} + +	@ Check syndrome register +	mrc	p15, 4, r1, c5, c2, 0	@ HSR +	lsr	r0, r1, #HSR_EC_SHIFT +#ifdef CONFIG_VFPv3 +	cmp	r0, #HSR_EC_CP_0_13 +	beq	switch_to_guest_vfp +#endif +	cmp	r0, #HSR_EC_HVC +	bne	guest_trap		@ Not HVC instr. + +	/* +	 * Let's check if the HVC came from VMID 0 and allow simple +	 * switch to Hyp mode +	 */ +	mrrc    p15, 6, r0, r2, c2 +	lsr     r2, r2, #16 +	and     r2, r2, #0xff +	cmp     r2, #0 +	bne	guest_trap		@ Guest called HVC + +host_switch_to_hyp: +	pop	{r0, r1, r2} + +	push	{lr} +	mrs	lr, SPSR +	push	{lr} + +	mov	lr, r0 +	mov	r0, r1 +	mov	r1, r2 +	mov	r2, r3 + +THUMB(	orr	lr, #1) +	blx	lr			@ Call the HYP function + +	pop	{lr} +	msr	SPSR_csxf, lr +	pop	{lr} +	eret + +guest_trap: +	load_vcpu			@ Load VCPU pointer to r0 +	str	r1, [vcpu, #VCPU_HSR] + +	@ Check if we need the fault information +	lsr	r1, r1, #HSR_EC_SHIFT +	cmp	r1, #HSR_EC_IABT +	mrceq	p15, 4, r2, c6, c0, 2	@ HIFAR +	beq	2f +	cmp	r1, #HSR_EC_DABT +	bne	1f +	mrc	p15, 4, r2, c6, c0, 0	@ HDFAR + +2:	str	r2, [vcpu, #VCPU_HxFAR] + +	/* +	 * B3.13.5 Reporting exceptions taken to the Non-secure PL2 mode: +	 * +	 * Abort on the stage 2 translation for a memory access from a +	 * Non-secure PL1 or PL0 mode: +	 * +	 * For any Access flag fault or Translation fault, and also for any +	 * Permission fault on the stage 2 translation of a memory access +	 * made as part of a translation table walk for a stage 1 translation, +	 * the HPFAR holds the IPA that caused the fault. Otherwise, the HPFAR +	 * is UNKNOWN. +	 */ + +	/* Check for permission fault, and S1PTW */ +	mrc	p15, 4, r1, c5, c2, 0	@ HSR +	and	r0, r1, #HSR_FSC_TYPE +	cmp	r0, #FSC_PERM +	tsteq	r1, #(1 << 7)		@ S1PTW +	mrcne	p15, 4, r2, c6, c0, 4	@ HPFAR +	bne	3f + +	/* Resolve IPA using the xFAR */ +	mcr	p15, 0, r2, c7, c8, 0	@ ATS1CPR +	isb +	mrrc	p15, 0, r0, r1, c7	@ PAR +	tst	r0, #1 +	bne	4f			@ Failed translation +	ubfx	r2, r0, #12, #20 +	lsl	r2, r2, #4 +	orr	r2, r2, r1, lsl #24 + +3:	load_vcpu			@ Load VCPU pointer to r0 +	str	r2, [r0, #VCPU_HPFAR] + +1:	mov	r1, #ARM_EXCEPTION_HVC +	b	__kvm_vcpu_return + +4:	pop	{r0, r1, r2}		@ Failed translation, return to guest +	eret + +/* + * If VFPv3 support is not available, then we will not switch the VFP + * registers; however cp10 and cp11 accesses will still trap and fallback + * to the regular coprocessor emulation code, which currently will + * inject an undefined exception to the guest. + */ +#ifdef CONFIG_VFPv3 +switch_to_guest_vfp: +	load_vcpu			@ Load VCPU pointer to r0 +	push	{r3-r7} + +	@ NEON/VFP used.  Turn on VFP access. +	set_hcptr vmexit, (HCPTR_TCP(10) | HCPTR_TCP(11)) + +	@ Switch VFP/NEON hardware state to the guest's +	add	r7, r0, #VCPU_VFP_HOST +	ldr	r7, [r7] +	store_vfp_state r7 +	add	r7, r0, #VCPU_VFP_GUEST +	restore_vfp_state r7 + +	pop	{r3-r7} +	pop	{r0-r2} +	eret +#endif + +	.align +hyp_irq: +	push	{r0, r1, r2} +	mov	r1, #ARM_EXCEPTION_IRQ +	load_vcpu			@ Load VCPU pointer to r0 +	b	__kvm_vcpu_return + +	.align +hyp_fiq: +	b	hyp_fiq + +	.ltorg + +__kvm_hyp_code_end: +	.globl	__kvm_hyp_code_end + +	.section ".rodata" + +und_die_str: +	.ascii	"unexpected undefined exception in Hyp mode at: %#08x" +pabt_die_str: +	.ascii	"unexpected prefetch abort in Hyp mode at: %#08x" +dabt_die_str: +	.ascii	"unexpected data abort in Hyp mode at: %#08x" +svc_die_str: +	.ascii	"unexpected HVC/SVC trap in Hyp mode at: %#08x" diff --git a/arch/arm/kvm/interrupts_head.S b/arch/arm/kvm/interrupts_head.S new file mode 100644 index 00000000000..6a95d341e9c --- /dev/null +++ b/arch/arm/kvm/interrupts_head.S @@ -0,0 +1,441 @@ +#define VCPU_USR_REG(_reg_nr)	(VCPU_USR_REGS + (_reg_nr * 4)) +#define VCPU_USR_SP		(VCPU_USR_REG(13)) +#define VCPU_USR_LR		(VCPU_USR_REG(14)) +#define CP15_OFFSET(_cp15_reg_idx) (VCPU_CP15 + (_cp15_reg_idx * 4)) + +/* + * Many of these macros need to access the VCPU structure, which is always + * held in r0. These macros should never clobber r1, as it is used to hold the + * exception code on the return path (except of course the macro that switches + * all the registers before the final jump to the VM). + */ +vcpu	.req	r0		@ vcpu pointer always in r0 + +/* Clobbers {r2-r6} */ +.macro store_vfp_state vfp_base +	@ The VFPFMRX and VFPFMXR macros are the VMRS and VMSR instructions +	VFPFMRX	r2, FPEXC +	@ Make sure VFP is enabled so we can touch the registers. +	orr	r6, r2, #FPEXC_EN +	VFPFMXR	FPEXC, r6 + +	VFPFMRX	r3, FPSCR +	tst	r2, #FPEXC_EX		@ Check for VFP Subarchitecture +	beq	1f +	@ If FPEXC_EX is 0, then FPINST/FPINST2 reads are upredictable, so +	@ we only need to save them if FPEXC_EX is set. +	VFPFMRX r4, FPINST +	tst	r2, #FPEXC_FP2V +	VFPFMRX r5, FPINST2, ne		@ vmrsne +	bic	r6, r2, #FPEXC_EX	@ FPEXC_EX disable +	VFPFMXR	FPEXC, r6 +1: +	VFPFSTMIA \vfp_base, r6		@ Save VFP registers +	stm	\vfp_base, {r2-r5}	@ Save FPEXC, FPSCR, FPINST, FPINST2 +.endm + +/* Assume FPEXC_EN is on and FPEXC_EX is off, clobbers {r2-r6} */ +.macro restore_vfp_state vfp_base +	VFPFLDMIA \vfp_base, r6		@ Load VFP registers +	ldm	\vfp_base, {r2-r5}	@ Load FPEXC, FPSCR, FPINST, FPINST2 + +	VFPFMXR FPSCR, r3 +	tst	r2, #FPEXC_EX		@ Check for VFP Subarchitecture +	beq	1f +	VFPFMXR FPINST, r4 +	tst	r2, #FPEXC_FP2V +	VFPFMXR FPINST2, r5, ne +1: +	VFPFMXR FPEXC, r2	@ FPEXC	(last, in case !EN) +.endm + +/* These are simply for the macros to work - value don't have meaning */ +.equ usr, 0 +.equ svc, 1 +.equ abt, 2 +.equ und, 3 +.equ irq, 4 +.equ fiq, 5 + +.macro push_host_regs_mode mode +	mrs	r2, SP_\mode +	mrs	r3, LR_\mode +	mrs	r4, SPSR_\mode +	push	{r2, r3, r4} +.endm + +/* + * Store all host persistent registers on the stack. + * Clobbers all registers, in all modes, except r0 and r1. + */ +.macro save_host_regs +	/* Hyp regs. Only ELR_hyp (SPSR_hyp already saved) */ +	mrs	r2, ELR_hyp +	push	{r2} + +	/* usr regs */ +	push	{r4-r12}	@ r0-r3 are always clobbered +	mrs	r2, SP_usr +	mov	r3, lr +	push	{r2, r3} + +	push_host_regs_mode svc +	push_host_regs_mode abt +	push_host_regs_mode und +	push_host_regs_mode irq + +	/* fiq regs */ +	mrs	r2, r8_fiq +	mrs	r3, r9_fiq +	mrs	r4, r10_fiq +	mrs	r5, r11_fiq +	mrs	r6, r12_fiq +	mrs	r7, SP_fiq +	mrs	r8, LR_fiq +	mrs	r9, SPSR_fiq +	push	{r2-r9} +.endm + +.macro pop_host_regs_mode mode +	pop	{r2, r3, r4} +	msr	SP_\mode, r2 +	msr	LR_\mode, r3 +	msr	SPSR_\mode, r4 +.endm + +/* + * Restore all host registers from the stack. + * Clobbers all registers, in all modes, except r0 and r1. + */ +.macro restore_host_regs +	pop	{r2-r9} +	msr	r8_fiq, r2 +	msr	r9_fiq, r3 +	msr	r10_fiq, r4 +	msr	r11_fiq, r5 +	msr	r12_fiq, r6 +	msr	SP_fiq, r7 +	msr	LR_fiq, r8 +	msr	SPSR_fiq, r9 + +	pop_host_regs_mode irq +	pop_host_regs_mode und +	pop_host_regs_mode abt +	pop_host_regs_mode svc + +	pop	{r2, r3} +	msr	SP_usr, r2 +	mov	lr, r3 +	pop	{r4-r12} + +	pop	{r2} +	msr	ELR_hyp, r2 +.endm + +/* + * Restore SP, LR and SPSR for a given mode. offset is the offset of + * this mode's registers from the VCPU base. + * + * Assumes vcpu pointer in vcpu reg + * + * Clobbers r1, r2, r3, r4. + */ +.macro restore_guest_regs_mode mode, offset +	add	r1, vcpu, \offset +	ldm	r1, {r2, r3, r4} +	msr	SP_\mode, r2 +	msr	LR_\mode, r3 +	msr	SPSR_\mode, r4 +.endm + +/* + * Restore all guest registers from the vcpu struct. + * + * Assumes vcpu pointer in vcpu reg + * + * Clobbers *all* registers. + */ +.macro restore_guest_regs +	restore_guest_regs_mode svc, #VCPU_SVC_REGS +	restore_guest_regs_mode abt, #VCPU_ABT_REGS +	restore_guest_regs_mode und, #VCPU_UND_REGS +	restore_guest_regs_mode irq, #VCPU_IRQ_REGS + +	add	r1, vcpu, #VCPU_FIQ_REGS +	ldm	r1, {r2-r9} +	msr	r8_fiq, r2 +	msr	r9_fiq, r3 +	msr	r10_fiq, r4 +	msr	r11_fiq, r5 +	msr	r12_fiq, r6 +	msr	SP_fiq, r7 +	msr	LR_fiq, r8 +	msr	SPSR_fiq, r9 + +	@ Load return state +	ldr	r2, [vcpu, #VCPU_PC] +	ldr	r3, [vcpu, #VCPU_CPSR] +	msr	ELR_hyp, r2 +	msr	SPSR_cxsf, r3 + +	@ Load user registers +	ldr	r2, [vcpu, #VCPU_USR_SP] +	ldr	r3, [vcpu, #VCPU_USR_LR] +	msr	SP_usr, r2 +	mov	lr, r3 +	add	vcpu, vcpu, #(VCPU_USR_REGS) +	ldm	vcpu, {r0-r12} +.endm + +/* + * Save SP, LR and SPSR for a given mode. offset is the offset of + * this mode's registers from the VCPU base. + * + * Assumes vcpu pointer in vcpu reg + * + * Clobbers r2, r3, r4, r5. + */ +.macro save_guest_regs_mode mode, offset +	add	r2, vcpu, \offset +	mrs	r3, SP_\mode +	mrs	r4, LR_\mode +	mrs	r5, SPSR_\mode +	stm	r2, {r3, r4, r5} +.endm + +/* + * Save all guest registers to the vcpu struct + * Expects guest's r0, r1, r2 on the stack. + * + * Assumes vcpu pointer in vcpu reg + * + * Clobbers r2, r3, r4, r5. + */ +.macro save_guest_regs +	@ Store usr registers +	add	r2, vcpu, #VCPU_USR_REG(3) +	stm	r2, {r3-r12} +	add	r2, vcpu, #VCPU_USR_REG(0) +	pop	{r3, r4, r5}		@ r0, r1, r2 +	stm	r2, {r3, r4, r5} +	mrs	r2, SP_usr +	mov	r3, lr +	str	r2, [vcpu, #VCPU_USR_SP] +	str	r3, [vcpu, #VCPU_USR_LR] + +	@ Store return state +	mrs	r2, ELR_hyp +	mrs	r3, spsr +	str	r2, [vcpu, #VCPU_PC] +	str	r3, [vcpu, #VCPU_CPSR] + +	@ Store other guest registers +	save_guest_regs_mode svc, #VCPU_SVC_REGS +	save_guest_regs_mode abt, #VCPU_ABT_REGS +	save_guest_regs_mode und, #VCPU_UND_REGS +	save_guest_regs_mode irq, #VCPU_IRQ_REGS +.endm + +/* Reads cp15 registers from hardware and stores them in memory + * @store_to_vcpu: If 0, registers are written in-order to the stack, + * 		   otherwise to the VCPU struct pointed to by vcpup + * + * Assumes vcpu pointer in vcpu reg + * + * Clobbers r2 - r12 + */ +.macro read_cp15_state store_to_vcpu +	mrc	p15, 0, r2, c1, c0, 0	@ SCTLR +	mrc	p15, 0, r3, c1, c0, 2	@ CPACR +	mrc	p15, 0, r4, c2, c0, 2	@ TTBCR +	mrc	p15, 0, r5, c3, c0, 0	@ DACR +	mrrc	p15, 0, r6, r7, c2	@ TTBR 0 +	mrrc	p15, 1, r8, r9, c2	@ TTBR 1 +	mrc	p15, 0, r10, c10, c2, 0	@ PRRR +	mrc	p15, 0, r11, c10, c2, 1	@ NMRR +	mrc	p15, 2, r12, c0, c0, 0	@ CSSELR + +	.if \store_to_vcpu == 0 +	push	{r2-r12}		@ Push CP15 registers +	.else +	str	r2, [vcpu, #CP15_OFFSET(c1_SCTLR)] +	str	r3, [vcpu, #CP15_OFFSET(c1_CPACR)] +	str	r4, [vcpu, #CP15_OFFSET(c2_TTBCR)] +	str	r5, [vcpu, #CP15_OFFSET(c3_DACR)] +	add	r2, vcpu, #CP15_OFFSET(c2_TTBR0) +	strd	r6, r7, [r2] +	add	r2, vcpu, #CP15_OFFSET(c2_TTBR1) +	strd	r8, r9, [r2] +	str	r10, [vcpu, #CP15_OFFSET(c10_PRRR)] +	str	r11, [vcpu, #CP15_OFFSET(c10_NMRR)] +	str	r12, [vcpu, #CP15_OFFSET(c0_CSSELR)] +	.endif + +	mrc	p15, 0, r2, c13, c0, 1	@ CID +	mrc	p15, 0, r3, c13, c0, 2	@ TID_URW +	mrc	p15, 0, r4, c13, c0, 3	@ TID_URO +	mrc	p15, 0, r5, c13, c0, 4	@ TID_PRIV +	mrc	p15, 0, r6, c5, c0, 0	@ DFSR +	mrc	p15, 0, r7, c5, c0, 1	@ IFSR +	mrc	p15, 0, r8, c5, c1, 0	@ ADFSR +	mrc	p15, 0, r9, c5, c1, 1	@ AIFSR +	mrc	p15, 0, r10, c6, c0, 0	@ DFAR +	mrc	p15, 0, r11, c6, c0, 2	@ IFAR +	mrc	p15, 0, r12, c12, c0, 0	@ VBAR + +	.if \store_to_vcpu == 0 +	push	{r2-r12}		@ Push CP15 registers +	.else +	str	r2, [vcpu, #CP15_OFFSET(c13_CID)] +	str	r3, [vcpu, #CP15_OFFSET(c13_TID_URW)] +	str	r4, [vcpu, #CP15_OFFSET(c13_TID_URO)] +	str	r5, [vcpu, #CP15_OFFSET(c13_TID_PRIV)] +	str	r6, [vcpu, #CP15_OFFSET(c5_DFSR)] +	str	r7, [vcpu, #CP15_OFFSET(c5_IFSR)] +	str	r8, [vcpu, #CP15_OFFSET(c5_ADFSR)] +	str	r9, [vcpu, #CP15_OFFSET(c5_AIFSR)] +	str	r10, [vcpu, #CP15_OFFSET(c6_DFAR)] +	str	r11, [vcpu, #CP15_OFFSET(c6_IFAR)] +	str	r12, [vcpu, #CP15_OFFSET(c12_VBAR)] +	.endif +.endm + +/* + * Reads cp15 registers from memory and writes them to hardware + * @read_from_vcpu: If 0, registers are read in-order from the stack, + *		    otherwise from the VCPU struct pointed to by vcpup + * + * Assumes vcpu pointer in vcpu reg + */ +.macro write_cp15_state read_from_vcpu +	.if \read_from_vcpu == 0 +	pop	{r2-r12} +	.else +	ldr	r2, [vcpu, #CP15_OFFSET(c13_CID)] +	ldr	r3, [vcpu, #CP15_OFFSET(c13_TID_URW)] +	ldr	r4, [vcpu, #CP15_OFFSET(c13_TID_URO)] +	ldr	r5, [vcpu, #CP15_OFFSET(c13_TID_PRIV)] +	ldr	r6, [vcpu, #CP15_OFFSET(c5_DFSR)] +	ldr	r7, [vcpu, #CP15_OFFSET(c5_IFSR)] +	ldr	r8, [vcpu, #CP15_OFFSET(c5_ADFSR)] +	ldr	r9, [vcpu, #CP15_OFFSET(c5_AIFSR)] +	ldr	r10, [vcpu, #CP15_OFFSET(c6_DFAR)] +	ldr	r11, [vcpu, #CP15_OFFSET(c6_IFAR)] +	ldr	r12, [vcpu, #CP15_OFFSET(c12_VBAR)] +	.endif + +	mcr	p15, 0, r2, c13, c0, 1	@ CID +	mcr	p15, 0, r3, c13, c0, 2	@ TID_URW +	mcr	p15, 0, r4, c13, c0, 3	@ TID_URO +	mcr	p15, 0, r5, c13, c0, 4	@ TID_PRIV +	mcr	p15, 0, r6, c5, c0, 0	@ DFSR +	mcr	p15, 0, r7, c5, c0, 1	@ IFSR +	mcr	p15, 0, r8, c5, c1, 0	@ ADFSR +	mcr	p15, 0, r9, c5, c1, 1	@ AIFSR +	mcr	p15, 0, r10, c6, c0, 0	@ DFAR +	mcr	p15, 0, r11, c6, c0, 2	@ IFAR +	mcr	p15, 0, r12, c12, c0, 0	@ VBAR + +	.if \read_from_vcpu == 0 +	pop	{r2-r12} +	.else +	ldr	r2, [vcpu, #CP15_OFFSET(c1_SCTLR)] +	ldr	r3, [vcpu, #CP15_OFFSET(c1_CPACR)] +	ldr	r4, [vcpu, #CP15_OFFSET(c2_TTBCR)] +	ldr	r5, [vcpu, #CP15_OFFSET(c3_DACR)] +	add	r12, vcpu, #CP15_OFFSET(c2_TTBR0) +	ldrd	r6, r7, [r12] +	add	r12, vcpu, #CP15_OFFSET(c2_TTBR1) +	ldrd	r8, r9, [r12] +	ldr	r10, [vcpu, #CP15_OFFSET(c10_PRRR)] +	ldr	r11, [vcpu, #CP15_OFFSET(c10_NMRR)] +	ldr	r12, [vcpu, #CP15_OFFSET(c0_CSSELR)] +	.endif + +	mcr	p15, 0, r2, c1, c0, 0	@ SCTLR +	mcr	p15, 0, r3, c1, c0, 2	@ CPACR +	mcr	p15, 0, r4, c2, c0, 2	@ TTBCR +	mcr	p15, 0, r5, c3, c0, 0	@ DACR +	mcrr	p15, 0, r6, r7, c2	@ TTBR 0 +	mcrr	p15, 1, r8, r9, c2	@ TTBR 1 +	mcr	p15, 0, r10, c10, c2, 0	@ PRRR +	mcr	p15, 0, r11, c10, c2, 1	@ NMRR +	mcr	p15, 2, r12, c0, c0, 0	@ CSSELR +.endm + +/* + * Save the VGIC CPU state into memory + * + * Assumes vcpu pointer in vcpu reg + */ +.macro save_vgic_state +.endm + +/* + * Restore the VGIC CPU state from memory + * + * Assumes vcpu pointer in vcpu reg + */ +.macro restore_vgic_state +.endm + +.equ vmentry,	0 +.equ vmexit,	1 + +/* Configures the HSTR (Hyp System Trap Register) on entry/return + * (hardware reset value is 0) */ +.macro set_hstr operation +	mrc	p15, 4, r2, c1, c1, 3 +	ldr	r3, =HSTR_T(15) +	.if \operation == vmentry +	orr	r2, r2, r3		@ Trap CR{15} +	.else +	bic	r2, r2, r3		@ Don't trap any CRx accesses +	.endif +	mcr	p15, 4, r2, c1, c1, 3 +.endm + +/* Configures the HCPTR (Hyp Coprocessor Trap Register) on entry/return + * (hardware reset value is 0). Keep previous value in r2. */ +.macro set_hcptr operation, mask +	mrc	p15, 4, r2, c1, c1, 2 +	ldr	r3, =\mask +	.if \operation == vmentry +	orr	r3, r2, r3		@ Trap coproc-accesses defined in mask +	.else +	bic	r3, r2, r3		@ Don't trap defined coproc-accesses +	.endif +	mcr	p15, 4, r3, c1, c1, 2 +.endm + +/* Configures the HDCR (Hyp Debug Configuration Register) on entry/return + * (hardware reset value is 0) */ +.macro set_hdcr operation +	mrc	p15, 4, r2, c1, c1, 1 +	ldr	r3, =(HDCR_TPM|HDCR_TPMCR) +	.if \operation == vmentry +	orr	r2, r2, r3		@ Trap some perfmon accesses +	.else +	bic	r2, r2, r3		@ Don't trap any perfmon accesses +	.endif +	mcr	p15, 4, r2, c1, c1, 1 +.endm + +/* Enable/Disable: stage-2 trans., trap interrupts, trap wfi, trap smc */ +.macro configure_hyp_role operation +	mrc	p15, 4, r2, c1, c1, 0	@ HCR +	bic	r2, r2, #HCR_VIRT_EXCP_MASK +	ldr	r3, =HCR_GUEST_MASK +	.if \operation == vmentry +	orr	r2, r2, r3 +	ldr	r3, [vcpu, #VCPU_IRQ_LINES] +	orr	r2, r2, r3 +	.else +	bic	r2, r2, r3 +	.endif +	mcr	p15, 4, r2, c1, c1, 0 +.endm + +.macro load_vcpu +	mrc	p15, 4, vcpu, c13, c0, 2	@ HTPIDR +.endm diff --git a/arch/arm/kvm/mmio.c b/arch/arm/kvm/mmio.c new file mode 100644 index 00000000000..0144baf8290 --- /dev/null +++ b/arch/arm/kvm/mmio.c @@ -0,0 +1,153 @@ +/* + * Copyright (C) 2012 - Virtual Open Systems and Columbia University + * Author: Christoffer Dall <c.dall@virtualopensystems.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA. + */ + +#include <linux/kvm_host.h> +#include <asm/kvm_mmio.h> +#include <asm/kvm_emulate.h> +#include <trace/events/kvm.h> + +#include "trace.h" + +/** + * kvm_handle_mmio_return -- Handle MMIO loads after user space emulation + * @vcpu: The VCPU pointer + * @run:  The VCPU run struct containing the mmio data + * + * This should only be called after returning from userspace for MMIO load + * emulation. + */ +int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run) +{ +	__u32 *dest; +	unsigned int len; +	int mask; + +	if (!run->mmio.is_write) { +		dest = vcpu_reg(vcpu, vcpu->arch.mmio_decode.rt); +		memset(dest, 0, sizeof(int)); + +		len = run->mmio.len; +		if (len > 4) +			return -EINVAL; + +		memcpy(dest, run->mmio.data, len); + +		trace_kvm_mmio(KVM_TRACE_MMIO_READ, len, run->mmio.phys_addr, +				*((u64 *)run->mmio.data)); + +		if (vcpu->arch.mmio_decode.sign_extend && len < 4) { +			mask = 1U << ((len * 8) - 1); +			*dest = (*dest ^ mask) - mask; +		} +	} + +	return 0; +} + +static int decode_hsr(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, +		      struct kvm_exit_mmio *mmio) +{ +	unsigned long rt, len; +	bool is_write, sign_extend; + +	if ((vcpu->arch.hsr >> 8) & 1) { +		/* cache operation on I/O addr, tell guest unsupported */ +		kvm_inject_dabt(vcpu, vcpu->arch.hxfar); +		return 1; +	} + +	if ((vcpu->arch.hsr >> 7) & 1) { +		/* page table accesses IO mem: tell guest to fix its TTBR */ +		kvm_inject_dabt(vcpu, vcpu->arch.hxfar); +		return 1; +	} + +	switch ((vcpu->arch.hsr >> 22) & 0x3) { +	case 0: +		len = 1; +		break; +	case 1: +		len = 2; +		break; +	case 2: +		len = 4; +		break; +	default: +		kvm_err("Hardware is weird: SAS 0b11 is reserved\n"); +		return -EFAULT; +	} + +	is_write = vcpu->arch.hsr & HSR_WNR; +	sign_extend = vcpu->arch.hsr & HSR_SSE; +	rt = (vcpu->arch.hsr & HSR_SRT_MASK) >> HSR_SRT_SHIFT; + +	if (kvm_vcpu_reg_is_pc(vcpu, rt)) { +		/* IO memory trying to read/write pc */ +		kvm_inject_pabt(vcpu, vcpu->arch.hxfar); +		return 1; +	} + +	mmio->is_write = is_write; +	mmio->phys_addr = fault_ipa; +	mmio->len = len; +	vcpu->arch.mmio_decode.sign_extend = sign_extend; +	vcpu->arch.mmio_decode.rt = rt; + +	/* +	 * The MMIO instruction is emulated and should not be re-executed +	 * in the guest. +	 */ +	kvm_skip_instr(vcpu, (vcpu->arch.hsr >> 25) & 1); +	return 0; +} + +int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run, +		 phys_addr_t fault_ipa) +{ +	struct kvm_exit_mmio mmio; +	unsigned long rt; +	int ret; + +	/* +	 * Prepare MMIO operation. First stash it in a private +	 * structure that we can use for in-kernel emulation. If the +	 * kernel can't handle it, copy it into run->mmio and let user +	 * space do its magic. +	 */ + +	if (vcpu->arch.hsr & HSR_ISV) { +		ret = decode_hsr(vcpu, fault_ipa, &mmio); +		if (ret) +			return ret; +	} else { +		kvm_err("load/store instruction decoding not implemented\n"); +		return -ENOSYS; +	} + +	rt = vcpu->arch.mmio_decode.rt; +	trace_kvm_mmio((mmio.is_write) ? KVM_TRACE_MMIO_WRITE : +					 KVM_TRACE_MMIO_READ_UNSATISFIED, +			mmio.len, fault_ipa, +			(mmio.is_write) ? *vcpu_reg(vcpu, rt) : 0); + +	if (mmio.is_write) +		memcpy(mmio.data, vcpu_reg(vcpu, rt), mmio.len); + +	kvm_prepare_mmio(run, &mmio); +	return 0; +} diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c new file mode 100644 index 00000000000..f30e13163a9 --- /dev/null +++ b/arch/arm/kvm/mmu.c @@ -0,0 +1,787 @@ +/* + * Copyright (C) 2012 - Virtual Open Systems and Columbia University + * Author: Christoffer Dall <c.dall@virtualopensystems.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA. + */ + +#include <linux/mman.h> +#include <linux/kvm_host.h> +#include <linux/io.h> +#include <trace/events/kvm.h> +#include <asm/idmap.h> +#include <asm/pgalloc.h> +#include <asm/cacheflush.h> +#include <asm/kvm_arm.h> +#include <asm/kvm_mmu.h> +#include <asm/kvm_mmio.h> +#include <asm/kvm_asm.h> +#include <asm/kvm_emulate.h> +#include <asm/mach/map.h> +#include <trace/events/kvm.h> + +#include "trace.h" + +extern char  __hyp_idmap_text_start[], __hyp_idmap_text_end[]; + +static DEFINE_MUTEX(kvm_hyp_pgd_mutex); + +static void kvm_tlb_flush_vmid(struct kvm *kvm) +{ +	kvm_call_hyp(__kvm_tlb_flush_vmid, kvm); +} + +static void kvm_set_pte(pte_t *pte, pte_t new_pte) +{ +	pte_val(*pte) = new_pte; +	/* +	 * flush_pmd_entry just takes a void pointer and cleans the necessary +	 * cache entries, so we can reuse the function for ptes. +	 */ +	flush_pmd_entry(pte); +} + +static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache, +				  int min, int max) +{ +	void *page; + +	BUG_ON(max > KVM_NR_MEM_OBJS); +	if (cache->nobjs >= min) +		return 0; +	while (cache->nobjs < max) { +		page = (void *)__get_free_page(PGALLOC_GFP); +		if (!page) +			return -ENOMEM; +		cache->objects[cache->nobjs++] = page; +	} +	return 0; +} + +static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc) +{ +	while (mc->nobjs) +		free_page((unsigned long)mc->objects[--mc->nobjs]); +} + +static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc) +{ +	void *p; + +	BUG_ON(!mc || !mc->nobjs); +	p = mc->objects[--mc->nobjs]; +	return p; +} + +static void free_ptes(pmd_t *pmd, unsigned long addr) +{ +	pte_t *pte; +	unsigned int i; + +	for (i = 0; i < PTRS_PER_PMD; i++, addr += PMD_SIZE) { +		if (!pmd_none(*pmd) && pmd_table(*pmd)) { +			pte = pte_offset_kernel(pmd, addr); +			pte_free_kernel(NULL, pte); +		} +		pmd++; +	} +} + +/** + * free_hyp_pmds - free a Hyp-mode level-2 tables and child level-3 tables + * + * Assumes this is a page table used strictly in Hyp-mode and therefore contains + * only mappings in the kernel memory area, which is above PAGE_OFFSET. + */ +void free_hyp_pmds(void) +{ +	pgd_t *pgd; +	pud_t *pud; +	pmd_t *pmd; +	unsigned long addr; + +	mutex_lock(&kvm_hyp_pgd_mutex); +	for (addr = PAGE_OFFSET; addr != 0; addr += PGDIR_SIZE) { +		pgd = hyp_pgd + pgd_index(addr); +		pud = pud_offset(pgd, addr); + +		if (pud_none(*pud)) +			continue; +		BUG_ON(pud_bad(*pud)); + +		pmd = pmd_offset(pud, addr); +		free_ptes(pmd, addr); +		pmd_free(NULL, pmd); +		pud_clear(pud); +	} +	mutex_unlock(&kvm_hyp_pgd_mutex); +} + +static void create_hyp_pte_mappings(pmd_t *pmd, unsigned long start, +				    unsigned long end) +{ +	pte_t *pte; +	unsigned long addr; +	struct page *page; + +	for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) { +		pte = pte_offset_kernel(pmd, addr); +		BUG_ON(!virt_addr_valid(addr)); +		page = virt_to_page(addr); +		kvm_set_pte(pte, mk_pte(page, PAGE_HYP)); +	} +} + +static void create_hyp_io_pte_mappings(pmd_t *pmd, unsigned long start, +				       unsigned long end, +				       unsigned long *pfn_base) +{ +	pte_t *pte; +	unsigned long addr; + +	for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) { +		pte = pte_offset_kernel(pmd, addr); +		BUG_ON(pfn_valid(*pfn_base)); +		kvm_set_pte(pte, pfn_pte(*pfn_base, PAGE_HYP_DEVICE)); +		(*pfn_base)++; +	} +} + +static int create_hyp_pmd_mappings(pud_t *pud, unsigned long start, +				   unsigned long end, unsigned long *pfn_base) +{ +	pmd_t *pmd; +	pte_t *pte; +	unsigned long addr, next; + +	for (addr = start; addr < end; addr = next) { +		pmd = pmd_offset(pud, addr); + +		BUG_ON(pmd_sect(*pmd)); + +		if (pmd_none(*pmd)) { +			pte = pte_alloc_one_kernel(NULL, addr); +			if (!pte) { +				kvm_err("Cannot allocate Hyp pte\n"); +				return -ENOMEM; +			} +			pmd_populate_kernel(NULL, pmd, pte); +		} + +		next = pmd_addr_end(addr, end); + +		/* +		 * If pfn_base is NULL, we map kernel pages into HYP with the +		 * virtual address. Otherwise, this is considered an I/O +		 * mapping and we map the physical region starting at +		 * *pfn_base to [start, end[. +		 */ +		if (!pfn_base) +			create_hyp_pte_mappings(pmd, addr, next); +		else +			create_hyp_io_pte_mappings(pmd, addr, next, pfn_base); +	} + +	return 0; +} + +static int __create_hyp_mappings(void *from, void *to, unsigned long *pfn_base) +{ +	unsigned long start = (unsigned long)from; +	unsigned long end = (unsigned long)to; +	pgd_t *pgd; +	pud_t *pud; +	pmd_t *pmd; +	unsigned long addr, next; +	int err = 0; + +	BUG_ON(start > end); +	if (start < PAGE_OFFSET) +		return -EINVAL; + +	mutex_lock(&kvm_hyp_pgd_mutex); +	for (addr = start; addr < end; addr = next) { +		pgd = hyp_pgd + pgd_index(addr); +		pud = pud_offset(pgd, addr); + +		if (pud_none_or_clear_bad(pud)) { +			pmd = pmd_alloc_one(NULL, addr); +			if (!pmd) { +				kvm_err("Cannot allocate Hyp pmd\n"); +				err = -ENOMEM; +				goto out; +			} +			pud_populate(NULL, pud, pmd); +		} + +		next = pgd_addr_end(addr, end); +		err = create_hyp_pmd_mappings(pud, addr, next, pfn_base); +		if (err) +			goto out; +	} +out: +	mutex_unlock(&kvm_hyp_pgd_mutex); +	return err; +} + +/** + * create_hyp_mappings - map a kernel virtual address range in Hyp mode + * @from:	The virtual kernel start address of the range + * @to:		The virtual kernel end address of the range (exclusive) + * + * The same virtual address as the kernel virtual address is also used in + * Hyp-mode mapping to the same underlying physical pages. + * + * Note: Wrapping around zero in the "to" address is not supported. + */ +int create_hyp_mappings(void *from, void *to) +{ +	return __create_hyp_mappings(from, to, NULL); +} + +/** + * create_hyp_io_mappings - map a physical IO range in Hyp mode + * @from:	The virtual HYP start address of the range + * @to:		The virtual HYP end address of the range (exclusive) + * @addr:	The physical start address which gets mapped + */ +int create_hyp_io_mappings(void *from, void *to, phys_addr_t addr) +{ +	unsigned long pfn = __phys_to_pfn(addr); +	return __create_hyp_mappings(from, to, &pfn); +} + +/** + * kvm_alloc_stage2_pgd - allocate level-1 table for stage-2 translation. + * @kvm:	The KVM struct pointer for the VM. + * + * Allocates the 1st level table only of size defined by S2_PGD_ORDER (can + * support either full 40-bit input addresses or limited to 32-bit input + * addresses). Clears the allocated pages. + * + * Note we don't need locking here as this is only called when the VM is + * created, which can only be done once. + */ +int kvm_alloc_stage2_pgd(struct kvm *kvm) +{ +	pgd_t *pgd; + +	if (kvm->arch.pgd != NULL) { +		kvm_err("kvm_arch already initialized?\n"); +		return -EINVAL; +	} + +	pgd = (pgd_t *)__get_free_pages(GFP_KERNEL, S2_PGD_ORDER); +	if (!pgd) +		return -ENOMEM; + +	/* stage-2 pgd must be aligned to its size */ +	VM_BUG_ON((unsigned long)pgd & (S2_PGD_SIZE - 1)); + +	memset(pgd, 0, PTRS_PER_S2_PGD * sizeof(pgd_t)); +	clean_dcache_area(pgd, PTRS_PER_S2_PGD * sizeof(pgd_t)); +	kvm->arch.pgd = pgd; + +	return 0; +} + +static void clear_pud_entry(pud_t *pud) +{ +	pmd_t *pmd_table = pmd_offset(pud, 0); +	pud_clear(pud); +	pmd_free(NULL, pmd_table); +	put_page(virt_to_page(pud)); +} + +static void clear_pmd_entry(pmd_t *pmd) +{ +	pte_t *pte_table = pte_offset_kernel(pmd, 0); +	pmd_clear(pmd); +	pte_free_kernel(NULL, pte_table); +	put_page(virt_to_page(pmd)); +} + +static bool pmd_empty(pmd_t *pmd) +{ +	struct page *pmd_page = virt_to_page(pmd); +	return page_count(pmd_page) == 1; +} + +static void clear_pte_entry(pte_t *pte) +{ +	if (pte_present(*pte)) { +		kvm_set_pte(pte, __pte(0)); +		put_page(virt_to_page(pte)); +	} +} + +static bool pte_empty(pte_t *pte) +{ +	struct page *pte_page = virt_to_page(pte); +	return page_count(pte_page) == 1; +} + +/** + * unmap_stage2_range -- Clear stage2 page table entries to unmap a range + * @kvm:   The VM pointer + * @start: The intermediate physical base address of the range to unmap + * @size:  The size of the area to unmap + * + * Clear a range of stage-2 mappings, lowering the various ref-counts.  Must + * be called while holding mmu_lock (unless for freeing the stage2 pgd before + * destroying the VM), otherwise another faulting VCPU may come in and mess + * with things behind our backs. + */ +static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size) +{ +	pgd_t *pgd; +	pud_t *pud; +	pmd_t *pmd; +	pte_t *pte; +	phys_addr_t addr = start, end = start + size; +	u64 range; + +	while (addr < end) { +		pgd = kvm->arch.pgd + pgd_index(addr); +		pud = pud_offset(pgd, addr); +		if (pud_none(*pud)) { +			addr += PUD_SIZE; +			continue; +		} + +		pmd = pmd_offset(pud, addr); +		if (pmd_none(*pmd)) { +			addr += PMD_SIZE; +			continue; +		} + +		pte = pte_offset_kernel(pmd, addr); +		clear_pte_entry(pte); +		range = PAGE_SIZE; + +		/* If we emptied the pte, walk back up the ladder */ +		if (pte_empty(pte)) { +			clear_pmd_entry(pmd); +			range = PMD_SIZE; +			if (pmd_empty(pmd)) { +				clear_pud_entry(pud); +				range = PUD_SIZE; +			} +		} + +		addr += range; +	} +} + +/** + * kvm_free_stage2_pgd - free all stage-2 tables + * @kvm:	The KVM struct pointer for the VM. + * + * Walks the level-1 page table pointed to by kvm->arch.pgd and frees all + * underlying level-2 and level-3 tables before freeing the actual level-1 table + * and setting the struct pointer to NULL. + * + * Note we don't need locking here as this is only called when the VM is + * destroyed, which can only be done once. + */ +void kvm_free_stage2_pgd(struct kvm *kvm) +{ +	if (kvm->arch.pgd == NULL) +		return; + +	unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE); +	free_pages((unsigned long)kvm->arch.pgd, S2_PGD_ORDER); +	kvm->arch.pgd = NULL; +} + + +static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache, +			  phys_addr_t addr, const pte_t *new_pte, bool iomap) +{ +	pgd_t *pgd; +	pud_t *pud; +	pmd_t *pmd; +	pte_t *pte, old_pte; + +	/* Create 2nd stage page table mapping - Level 1 */ +	pgd = kvm->arch.pgd + pgd_index(addr); +	pud = pud_offset(pgd, addr); +	if (pud_none(*pud)) { +		if (!cache) +			return 0; /* ignore calls from kvm_set_spte_hva */ +		pmd = mmu_memory_cache_alloc(cache); +		pud_populate(NULL, pud, pmd); +		pmd += pmd_index(addr); +		get_page(virt_to_page(pud)); +	} else +		pmd = pmd_offset(pud, addr); + +	/* Create 2nd stage page table mapping - Level 2 */ +	if (pmd_none(*pmd)) { +		if (!cache) +			return 0; /* ignore calls from kvm_set_spte_hva */ +		pte = mmu_memory_cache_alloc(cache); +		clean_pte_table(pte); +		pmd_populate_kernel(NULL, pmd, pte); +		pte += pte_index(addr); +		get_page(virt_to_page(pmd)); +	} else +		pte = pte_offset_kernel(pmd, addr); + +	if (iomap && pte_present(*pte)) +		return -EFAULT; + +	/* Create 2nd stage page table mapping - Level 3 */ +	old_pte = *pte; +	kvm_set_pte(pte, *new_pte); +	if (pte_present(old_pte)) +		kvm_tlb_flush_vmid(kvm); +	else +		get_page(virt_to_page(pte)); + +	return 0; +} + +/** + * kvm_phys_addr_ioremap - map a device range to guest IPA + * + * @kvm:	The KVM pointer + * @guest_ipa:	The IPA at which to insert the mapping + * @pa:		The physical address of the device + * @size:	The size of the mapping + */ +int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa, +			  phys_addr_t pa, unsigned long size) +{ +	phys_addr_t addr, end; +	int ret = 0; +	unsigned long pfn; +	struct kvm_mmu_memory_cache cache = { 0, }; + +	end = (guest_ipa + size + PAGE_SIZE - 1) & PAGE_MASK; +	pfn = __phys_to_pfn(pa); + +	for (addr = guest_ipa; addr < end; addr += PAGE_SIZE) { +		pte_t pte = pfn_pte(pfn, PAGE_S2_DEVICE | L_PTE_S2_RDWR); + +		ret = mmu_topup_memory_cache(&cache, 2, 2); +		if (ret) +			goto out; +		spin_lock(&kvm->mmu_lock); +		ret = stage2_set_pte(kvm, &cache, addr, &pte, true); +		spin_unlock(&kvm->mmu_lock); +		if (ret) +			goto out; + +		pfn++; +	} + +out: +	mmu_free_memory_cache(&cache); +	return ret; +} + +static void coherent_icache_guest_page(struct kvm *kvm, gfn_t gfn) +{ +	/* +	 * If we are going to insert an instruction page and the icache is +	 * either VIPT or PIPT, there is a potential problem where the host +	 * (or another VM) may have used the same page as this guest, and we +	 * read incorrect data from the icache.  If we're using a PIPT cache, +	 * we can invalidate just that page, but if we are using a VIPT cache +	 * we need to invalidate the entire icache - damn shame - as written +	 * in the ARM ARM (DDI 0406C.b - Page B3-1393). +	 * +	 * VIVT caches are tagged using both the ASID and the VMID and doesn't +	 * need any kind of flushing (DDI 0406C.b - Page B3-1392). +	 */ +	if (icache_is_pipt()) { +		unsigned long hva = gfn_to_hva(kvm, gfn); +		__cpuc_coherent_user_range(hva, hva + PAGE_SIZE); +	} else if (!icache_is_vivt_asid_tagged()) { +		/* any kind of VIPT cache */ +		__flush_icache_all(); +	} +} + +static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, +			  gfn_t gfn, struct kvm_memory_slot *memslot, +			  unsigned long fault_status) +{ +	pte_t new_pte; +	pfn_t pfn; +	int ret; +	bool write_fault, writable; +	unsigned long mmu_seq; +	struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache; + +	write_fault = kvm_is_write_fault(vcpu->arch.hsr); +	if (fault_status == FSC_PERM && !write_fault) { +		kvm_err("Unexpected L2 read permission error\n"); +		return -EFAULT; +	} + +	/* We need minimum second+third level pages */ +	ret = mmu_topup_memory_cache(memcache, 2, KVM_NR_MEM_OBJS); +	if (ret) +		return ret; + +	mmu_seq = vcpu->kvm->mmu_notifier_seq; +	/* +	 * Ensure the read of mmu_notifier_seq happens before we call +	 * gfn_to_pfn_prot (which calls get_user_pages), so that we don't risk +	 * the page we just got a reference to gets unmapped before we have a +	 * chance to grab the mmu_lock, which ensure that if the page gets +	 * unmapped afterwards, the call to kvm_unmap_hva will take it away +	 * from us again properly. This smp_rmb() interacts with the smp_wmb() +	 * in kvm_mmu_notifier_invalidate_<page|range_end>. +	 */ +	smp_rmb(); + +	pfn = gfn_to_pfn_prot(vcpu->kvm, gfn, write_fault, &writable); +	if (is_error_pfn(pfn)) +		return -EFAULT; + +	new_pte = pfn_pte(pfn, PAGE_S2); +	coherent_icache_guest_page(vcpu->kvm, gfn); + +	spin_lock(&vcpu->kvm->mmu_lock); +	if (mmu_notifier_retry(vcpu->kvm, mmu_seq)) +		goto out_unlock; +	if (writable) { +		pte_val(new_pte) |= L_PTE_S2_RDWR; +		kvm_set_pfn_dirty(pfn); +	} +	stage2_set_pte(vcpu->kvm, memcache, fault_ipa, &new_pte, false); + +out_unlock: +	spin_unlock(&vcpu->kvm->mmu_lock); +	kvm_release_pfn_clean(pfn); +	return 0; +} + +/** + * kvm_handle_guest_abort - handles all 2nd stage aborts + * @vcpu:	the VCPU pointer + * @run:	the kvm_run structure + * + * Any abort that gets to the host is almost guaranteed to be caused by a + * missing second stage translation table entry, which can mean that either the + * guest simply needs more memory and we must allocate an appropriate page or it + * can mean that the guest tried to access I/O memory, which is emulated by user + * space. The distinction is based on the IPA causing the fault and whether this + * memory region has been registered as standard RAM by user space. + */ +int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run) +{ +	unsigned long hsr_ec; +	unsigned long fault_status; +	phys_addr_t fault_ipa; +	struct kvm_memory_slot *memslot; +	bool is_iabt; +	gfn_t gfn; +	int ret, idx; + +	hsr_ec = vcpu->arch.hsr >> HSR_EC_SHIFT; +	is_iabt = (hsr_ec == HSR_EC_IABT); +	fault_ipa = ((phys_addr_t)vcpu->arch.hpfar & HPFAR_MASK) << 8; + +	trace_kvm_guest_fault(*vcpu_pc(vcpu), vcpu->arch.hsr, +			      vcpu->arch.hxfar, fault_ipa); + +	/* Check the stage-2 fault is trans. fault or write fault */ +	fault_status = (vcpu->arch.hsr & HSR_FSC_TYPE); +	if (fault_status != FSC_FAULT && fault_status != FSC_PERM) { +		kvm_err("Unsupported fault status: EC=%#lx DFCS=%#lx\n", +			hsr_ec, fault_status); +		return -EFAULT; +	} + +	idx = srcu_read_lock(&vcpu->kvm->srcu); + +	gfn = fault_ipa >> PAGE_SHIFT; +	if (!kvm_is_visible_gfn(vcpu->kvm, gfn)) { +		if (is_iabt) { +			/* Prefetch Abort on I/O address */ +			kvm_inject_pabt(vcpu, vcpu->arch.hxfar); +			ret = 1; +			goto out_unlock; +		} + +		if (fault_status != FSC_FAULT) { +			kvm_err("Unsupported fault status on io memory: %#lx\n", +				fault_status); +			ret = -EFAULT; +			goto out_unlock; +		} + +		/* Adjust page offset */ +		fault_ipa |= vcpu->arch.hxfar & ~PAGE_MASK; +		ret = io_mem_abort(vcpu, run, fault_ipa); +		goto out_unlock; +	} + +	memslot = gfn_to_memslot(vcpu->kvm, gfn); +	if (!memslot->user_alloc) { +		kvm_err("non user-alloc memslots not supported\n"); +		ret = -EINVAL; +		goto out_unlock; +	} + +	ret = user_mem_abort(vcpu, fault_ipa, gfn, memslot, fault_status); +	if (ret == 0) +		ret = 1; +out_unlock: +	srcu_read_unlock(&vcpu->kvm->srcu, idx); +	return ret; +} + +static void handle_hva_to_gpa(struct kvm *kvm, +			      unsigned long start, +			      unsigned long end, +			      void (*handler)(struct kvm *kvm, +					      gpa_t gpa, void *data), +			      void *data) +{ +	struct kvm_memslots *slots; +	struct kvm_memory_slot *memslot; + +	slots = kvm_memslots(kvm); + +	/* we only care about the pages that the guest sees */ +	kvm_for_each_memslot(memslot, slots) { +		unsigned long hva_start, hva_end; +		gfn_t gfn, gfn_end; + +		hva_start = max(start, memslot->userspace_addr); +		hva_end = min(end, memslot->userspace_addr + +					(memslot->npages << PAGE_SHIFT)); +		if (hva_start >= hva_end) +			continue; + +		/* +		 * {gfn(page) | page intersects with [hva_start, hva_end)} = +		 * {gfn_start, gfn_start+1, ..., gfn_end-1}. +		 */ +		gfn = hva_to_gfn_memslot(hva_start, memslot); +		gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot); + +		for (; gfn < gfn_end; ++gfn) { +			gpa_t gpa = gfn << PAGE_SHIFT; +			handler(kvm, gpa, data); +		} +	} +} + +static void kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, void *data) +{ +	unmap_stage2_range(kvm, gpa, PAGE_SIZE); +	kvm_tlb_flush_vmid(kvm); +} + +int kvm_unmap_hva(struct kvm *kvm, unsigned long hva) +{ +	unsigned long end = hva + PAGE_SIZE; + +	if (!kvm->arch.pgd) +		return 0; + +	trace_kvm_unmap_hva(hva); +	handle_hva_to_gpa(kvm, hva, end, &kvm_unmap_hva_handler, NULL); +	return 0; +} + +int kvm_unmap_hva_range(struct kvm *kvm, +			unsigned long start, unsigned long end) +{ +	if (!kvm->arch.pgd) +		return 0; + +	trace_kvm_unmap_hva_range(start, end); +	handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL); +	return 0; +} + +static void kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, void *data) +{ +	pte_t *pte = (pte_t *)data; + +	stage2_set_pte(kvm, NULL, gpa, pte, false); +} + + +void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) +{ +	unsigned long end = hva + PAGE_SIZE; +	pte_t stage2_pte; + +	if (!kvm->arch.pgd) +		return; + +	trace_kvm_set_spte_hva(hva); +	stage2_pte = pfn_pte(pte_pfn(pte), PAGE_S2); +	handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &stage2_pte); +} + +void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu) +{ +	mmu_free_memory_cache(&vcpu->arch.mmu_page_cache); +} + +phys_addr_t kvm_mmu_get_httbr(void) +{ +	VM_BUG_ON(!virt_addr_valid(hyp_pgd)); +	return virt_to_phys(hyp_pgd); +} + +int kvm_mmu_init(void) +{ +	if (!hyp_pgd) { +		kvm_err("Hyp mode PGD not allocated\n"); +		return -ENOMEM; +	} + +	return 0; +} + +/** + * kvm_clear_idmap - remove all idmaps from the hyp pgd + * + * Free the underlying pmds for all pgds in range and clear the pgds (but + * don't free them) afterwards. + */ +void kvm_clear_hyp_idmap(void) +{ +	unsigned long addr, end; +	unsigned long next; +	pgd_t *pgd = hyp_pgd; +	pud_t *pud; +	pmd_t *pmd; + +	addr = virt_to_phys(__hyp_idmap_text_start); +	end = virt_to_phys(__hyp_idmap_text_end); + +	pgd += pgd_index(addr); +	do { +		next = pgd_addr_end(addr, end); +		if (pgd_none_or_clear_bad(pgd)) +			continue; +		pud = pud_offset(pgd, addr); +		pmd = pmd_offset(pud, addr); + +		pud_clear(pud); +		clean_pmd_entry(pmd); +		pmd_free(NULL, (pmd_t *)((unsigned long)pmd & PAGE_MASK)); +	} while (pgd++, addr = next, addr < end); +} diff --git a/arch/arm/kvm/psci.c b/arch/arm/kvm/psci.c new file mode 100644 index 00000000000..7ee5bb7a366 --- /dev/null +++ b/arch/arm/kvm/psci.c @@ -0,0 +1,108 @@ +/* + * Copyright (C) 2012 - ARM Ltd + * Author: Marc Zyngier <marc.zyngier@arm.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program.  If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/kvm_host.h> +#include <linux/wait.h> + +#include <asm/kvm_emulate.h> +#include <asm/kvm_psci.h> + +/* + * This is an implementation of the Power State Coordination Interface + * as described in ARM document number ARM DEN 0022A. + */ + +static void kvm_psci_vcpu_off(struct kvm_vcpu *vcpu) +{ +	vcpu->arch.pause = true; +} + +static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu) +{ +	struct kvm *kvm = source_vcpu->kvm; +	struct kvm_vcpu *vcpu; +	wait_queue_head_t *wq; +	unsigned long cpu_id; +	phys_addr_t target_pc; + +	cpu_id = *vcpu_reg(source_vcpu, 1); +	if (vcpu_mode_is_32bit(source_vcpu)) +		cpu_id &= ~((u32) 0); + +	if (cpu_id >= atomic_read(&kvm->online_vcpus)) +		return KVM_PSCI_RET_INVAL; + +	target_pc = *vcpu_reg(source_vcpu, 2); + +	vcpu = kvm_get_vcpu(kvm, cpu_id); + +	wq = kvm_arch_vcpu_wq(vcpu); +	if (!waitqueue_active(wq)) +		return KVM_PSCI_RET_INVAL; + +	kvm_reset_vcpu(vcpu); + +	/* Gracefully handle Thumb2 entry point */ +	if (vcpu_mode_is_32bit(vcpu) && (target_pc & 1)) { +		target_pc &= ~((phys_addr_t) 1); +		vcpu_set_thumb(vcpu); +	} + +	*vcpu_pc(vcpu) = target_pc; +	vcpu->arch.pause = false; +	smp_mb();		/* Make sure the above is visible */ + +	wake_up_interruptible(wq); + +	return KVM_PSCI_RET_SUCCESS; +} + +/** + * kvm_psci_call - handle PSCI call if r0 value is in range + * @vcpu: Pointer to the VCPU struct + * + * Handle PSCI calls from guests through traps from HVC or SMC instructions. + * The calling convention is similar to SMC calls to the secure world where + * the function number is placed in r0 and this function returns true if the + * function number specified in r0 is withing the PSCI range, and false + * otherwise. + */ +bool kvm_psci_call(struct kvm_vcpu *vcpu) +{ +	unsigned long psci_fn = *vcpu_reg(vcpu, 0) & ~((u32) 0); +	unsigned long val; + +	switch (psci_fn) { +	case KVM_PSCI_FN_CPU_OFF: +		kvm_psci_vcpu_off(vcpu); +		val = KVM_PSCI_RET_SUCCESS; +		break; +	case KVM_PSCI_FN_CPU_ON: +		val = kvm_psci_vcpu_on(vcpu); +		break; +	case KVM_PSCI_FN_CPU_SUSPEND: +	case KVM_PSCI_FN_MIGRATE: +		val = KVM_PSCI_RET_NI; +		break; + +	default: +		return false; +	} + +	*vcpu_reg(vcpu, 0) = val; +	return true; +} diff --git a/arch/arm/kvm/reset.c b/arch/arm/kvm/reset.c new file mode 100644 index 00000000000..b80256b554c --- /dev/null +++ b/arch/arm/kvm/reset.c @@ -0,0 +1,74 @@ +/* + * Copyright (C) 2012 - Virtual Open Systems and Columbia University + * Author: Christoffer Dall <c.dall@virtualopensystems.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA. + */ +#include <linux/compiler.h> +#include <linux/errno.h> +#include <linux/sched.h> +#include <linux/kvm_host.h> +#include <linux/kvm.h> + +#include <asm/unified.h> +#include <asm/ptrace.h> +#include <asm/cputype.h> +#include <asm/kvm_arm.h> +#include <asm/kvm_coproc.h> + +/****************************************************************************** + * Cortex-A15 Reset Values + */ + +static const int a15_max_cpu_idx = 3; + +static struct kvm_regs a15_regs_reset = { +	.usr_regs.ARM_cpsr = SVC_MODE | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT, +}; + + +/******************************************************************************* + * Exported reset function + */ + +/** + * kvm_reset_vcpu - sets core registers and cp15 registers to reset value + * @vcpu: The VCPU pointer + * + * This function finds the right table above and sets the registers on the + * virtual CPU struct to their architectually defined reset values. + */ +int kvm_reset_vcpu(struct kvm_vcpu *vcpu) +{ +	struct kvm_regs *cpu_reset; + +	switch (vcpu->arch.target) { +	case KVM_ARM_TARGET_CORTEX_A15: +		if (vcpu->vcpu_id > a15_max_cpu_idx) +			return -EINVAL; +		cpu_reset = &a15_regs_reset; +		vcpu->arch.midr = read_cpuid_id(); +		break; +	default: +		return -ENODEV; +	} + +	/* Reset core registers */ +	memcpy(&vcpu->arch.regs, cpu_reset, sizeof(vcpu->arch.regs)); + +	/* Reset CP15 registers */ +	kvm_reset_coprocs(vcpu); + +	return 0; +} diff --git a/arch/arm/kvm/trace.h b/arch/arm/kvm/trace.h new file mode 100644 index 00000000000..a8e73ed5ad5 --- /dev/null +++ b/arch/arm/kvm/trace.h @@ -0,0 +1,235 @@ +#if !defined(_TRACE_KVM_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_KVM_H + +#include <linux/tracepoint.h> + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM kvm + +/* + * Tracepoints for entry/exit to guest + */ +TRACE_EVENT(kvm_entry, +	TP_PROTO(unsigned long vcpu_pc), +	TP_ARGS(vcpu_pc), + +	TP_STRUCT__entry( +		__field(	unsigned long,	vcpu_pc		) +	), + +	TP_fast_assign( +		__entry->vcpu_pc		= vcpu_pc; +	), + +	TP_printk("PC: 0x%08lx", __entry->vcpu_pc) +); + +TRACE_EVENT(kvm_exit, +	TP_PROTO(unsigned long vcpu_pc), +	TP_ARGS(vcpu_pc), + +	TP_STRUCT__entry( +		__field(	unsigned long,	vcpu_pc		) +	), + +	TP_fast_assign( +		__entry->vcpu_pc		= vcpu_pc; +	), + +	TP_printk("PC: 0x%08lx", __entry->vcpu_pc) +); + +TRACE_EVENT(kvm_guest_fault, +	TP_PROTO(unsigned long vcpu_pc, unsigned long hsr, +		 unsigned long hxfar, +		 unsigned long long ipa), +	TP_ARGS(vcpu_pc, hsr, hxfar, ipa), + +	TP_STRUCT__entry( +		__field(	unsigned long,	vcpu_pc		) +		__field(	unsigned long,	hsr		) +		__field(	unsigned long,	hxfar		) +		__field(   unsigned long long,	ipa		) +	), + +	TP_fast_assign( +		__entry->vcpu_pc		= vcpu_pc; +		__entry->hsr			= hsr; +		__entry->hxfar			= hxfar; +		__entry->ipa			= ipa; +	), + +	TP_printk("guest fault at PC %#08lx (hxfar %#08lx, " +		  "ipa %#16llx, hsr %#08lx", +		  __entry->vcpu_pc, __entry->hxfar, +		  __entry->ipa, __entry->hsr) +); + +TRACE_EVENT(kvm_irq_line, +	TP_PROTO(unsigned int type, int vcpu_idx, int irq_num, int level), +	TP_ARGS(type, vcpu_idx, irq_num, level), + +	TP_STRUCT__entry( +		__field(	unsigned int,	type		) +		__field(	int,		vcpu_idx	) +		__field(	int,		irq_num		) +		__field(	int,		level		) +	), + +	TP_fast_assign( +		__entry->type		= type; +		__entry->vcpu_idx	= vcpu_idx; +		__entry->irq_num	= irq_num; +		__entry->level		= level; +	), + +	TP_printk("Inject %s interrupt (%d), vcpu->idx: %d, num: %d, level: %d", +		  (__entry->type == KVM_ARM_IRQ_TYPE_CPU) ? "CPU" : +		  (__entry->type == KVM_ARM_IRQ_TYPE_PPI) ? "VGIC PPI" : +		  (__entry->type == KVM_ARM_IRQ_TYPE_SPI) ? "VGIC SPI" : "UNKNOWN", +		  __entry->type, __entry->vcpu_idx, __entry->irq_num, __entry->level) +); + +TRACE_EVENT(kvm_mmio_emulate, +	TP_PROTO(unsigned long vcpu_pc, unsigned long instr, +		 unsigned long cpsr), +	TP_ARGS(vcpu_pc, instr, cpsr), + +	TP_STRUCT__entry( +		__field(	unsigned long,	vcpu_pc		) +		__field(	unsigned long,	instr		) +		__field(	unsigned long,	cpsr		) +	), + +	TP_fast_assign( +		__entry->vcpu_pc		= vcpu_pc; +		__entry->instr			= instr; +		__entry->cpsr			= cpsr; +	), + +	TP_printk("Emulate MMIO at: 0x%08lx (instr: %08lx, cpsr: %08lx)", +		  __entry->vcpu_pc, __entry->instr, __entry->cpsr) +); + +/* Architecturally implementation defined CP15 register access */ +TRACE_EVENT(kvm_emulate_cp15_imp, +	TP_PROTO(unsigned long Op1, unsigned long Rt1, unsigned long CRn, +		 unsigned long CRm, unsigned long Op2, bool is_write), +	TP_ARGS(Op1, Rt1, CRn, CRm, Op2, is_write), + +	TP_STRUCT__entry( +		__field(	unsigned int,	Op1		) +		__field(	unsigned int,	Rt1		) +		__field(	unsigned int,	CRn		) +		__field(	unsigned int,	CRm		) +		__field(	unsigned int,	Op2		) +		__field(	bool,		is_write	) +	), + +	TP_fast_assign( +		__entry->is_write		= is_write; +		__entry->Op1			= Op1; +		__entry->Rt1			= Rt1; +		__entry->CRn			= CRn; +		__entry->CRm			= CRm; +		__entry->Op2			= Op2; +	), + +	TP_printk("Implementation defined CP15: %s\tp15, %u, r%u, c%u, c%u, %u", +			(__entry->is_write) ? "mcr" : "mrc", +			__entry->Op1, __entry->Rt1, __entry->CRn, +			__entry->CRm, __entry->Op2) +); + +TRACE_EVENT(kvm_wfi, +	TP_PROTO(unsigned long vcpu_pc), +	TP_ARGS(vcpu_pc), + +	TP_STRUCT__entry( +		__field(	unsigned long,	vcpu_pc		) +	), + +	TP_fast_assign( +		__entry->vcpu_pc		= vcpu_pc; +	), + +	TP_printk("guest executed wfi at: 0x%08lx", __entry->vcpu_pc) +); + +TRACE_EVENT(kvm_unmap_hva, +	TP_PROTO(unsigned long hva), +	TP_ARGS(hva), + +	TP_STRUCT__entry( +		__field(	unsigned long,	hva		) +	), + +	TP_fast_assign( +		__entry->hva		= hva; +	), + +	TP_printk("mmu notifier unmap hva: %#08lx", __entry->hva) +); + +TRACE_EVENT(kvm_unmap_hva_range, +	TP_PROTO(unsigned long start, unsigned long end), +	TP_ARGS(start, end), + +	TP_STRUCT__entry( +		__field(	unsigned long,	start		) +		__field(	unsigned long,	end		) +	), + +	TP_fast_assign( +		__entry->start		= start; +		__entry->end		= end; +	), + +	TP_printk("mmu notifier unmap range: %#08lx -- %#08lx", +		  __entry->start, __entry->end) +); + +TRACE_EVENT(kvm_set_spte_hva, +	TP_PROTO(unsigned long hva), +	TP_ARGS(hva), + +	TP_STRUCT__entry( +		__field(	unsigned long,	hva		) +	), + +	TP_fast_assign( +		__entry->hva		= hva; +	), + +	TP_printk("mmu notifier set pte hva: %#08lx", __entry->hva) +); + +TRACE_EVENT(kvm_hvc, +	TP_PROTO(unsigned long vcpu_pc, unsigned long r0, unsigned long imm), +	TP_ARGS(vcpu_pc, r0, imm), + +	TP_STRUCT__entry( +		__field(	unsigned long,	vcpu_pc		) +		__field(	unsigned long,	r0		) +		__field(	unsigned long,	imm		) +	), + +	TP_fast_assign( +		__entry->vcpu_pc		= vcpu_pc; +		__entry->r0		= r0; +		__entry->imm		= imm; +	), + +	TP_printk("HVC at 0x%08lx (r0: 0x%08lx, imm: 0x%lx", +		  __entry->vcpu_pc, __entry->r0, __entry->imm) +); + +#endif /* _TRACE_KVM_H */ + +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH arch/arm/kvm +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE trace + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/arch/arm/mach-davinci/cpuidle.c b/arch/arm/mach-davinci/cpuidle.c index 9107691adbd..5ac9e9384b1 100644 --- a/arch/arm/mach-davinci/cpuidle.c +++ b/arch/arm/mach-davinci/cpuidle.c @@ -25,35 +25,44 @@  #define DAVINCI_CPUIDLE_MAX_STATES	2 -struct davinci_ops { -	void (*enter) (u32 flags); -	void (*exit) (u32 flags); -	u32 flags; -}; +static DEFINE_PER_CPU(struct cpuidle_device, davinci_cpuidle_device); +static void __iomem *ddr2_reg_base; +static bool ddr2_pdown; + +static void davinci_save_ddr_power(int enter, bool pdown) +{ +	u32 val; + +	val = __raw_readl(ddr2_reg_base + DDR2_SDRCR_OFFSET); + +	if (enter) { +		if (pdown) +			val |= DDR2_SRPD_BIT; +		else +			val &= ~DDR2_SRPD_BIT; +		val |= DDR2_LPMODEN_BIT; +	} else { +		val &= ~(DDR2_SRPD_BIT | DDR2_LPMODEN_BIT); +	} + +	__raw_writel(val, ddr2_reg_base + DDR2_SDRCR_OFFSET); +}  /* Actual code that puts the SoC in different idle states */  static int davinci_enter_idle(struct cpuidle_device *dev,  				struct cpuidle_driver *drv,  						int index)  { -	struct cpuidle_state_usage *state_usage = &dev->states_usage[index]; -	struct davinci_ops *ops = cpuidle_get_statedata(state_usage); - -	if (ops && ops->enter) -		ops->enter(ops->flags); +	davinci_save_ddr_power(1, ddr2_pdown);  	index = cpuidle_wrap_enter(dev,	drv, index,  				arm_cpuidle_simple_enter); -	if (ops && ops->exit) -		ops->exit(ops->flags); +	davinci_save_ddr_power(0, ddr2_pdown);  	return index;  } -/* fields in davinci_ops.flags */ -#define DAVINCI_CPUIDLE_FLAGS_DDR2_PWDN	BIT(0) -  static struct cpuidle_driver davinci_idle_driver = {  	.name			= "cpuidle-davinci",  	.owner			= THIS_MODULE, @@ -70,45 +79,6 @@ static struct cpuidle_driver davinci_idle_driver = {  	.state_count = DAVINCI_CPUIDLE_MAX_STATES,  }; -static DEFINE_PER_CPU(struct cpuidle_device, davinci_cpuidle_device); -static void __iomem *ddr2_reg_base; - -static void davinci_save_ddr_power(int enter, bool pdown) -{ -	u32 val; - -	val = __raw_readl(ddr2_reg_base + DDR2_SDRCR_OFFSET); - -	if (enter) { -		if (pdown) -			val |= DDR2_SRPD_BIT; -		else -			val &= ~DDR2_SRPD_BIT; -		val |= DDR2_LPMODEN_BIT; -	} else { -		val &= ~(DDR2_SRPD_BIT | DDR2_LPMODEN_BIT); -	} - -	__raw_writel(val, ddr2_reg_base + DDR2_SDRCR_OFFSET); -} - -static void davinci_c2state_enter(u32 flags) -{ -	davinci_save_ddr_power(1, !!(flags & DAVINCI_CPUIDLE_FLAGS_DDR2_PWDN)); -} - -static void davinci_c2state_exit(u32 flags) -{ -	davinci_save_ddr_power(0, !!(flags & DAVINCI_CPUIDLE_FLAGS_DDR2_PWDN)); -} - -static struct davinci_ops davinci_states[DAVINCI_CPUIDLE_MAX_STATES] = { -	[1] = { -		.enter	= davinci_c2state_enter, -		.exit	= davinci_c2state_exit, -	}, -}; -  static int __init davinci_cpuidle_probe(struct platform_device *pdev)  {  	int ret; @@ -124,11 +94,7 @@ static int __init davinci_cpuidle_probe(struct platform_device *pdev)  	ddr2_reg_base = pdata->ddr2_ctlr_base; -	if (pdata->ddr2_pdown) -		davinci_states[1].flags |= DAVINCI_CPUIDLE_FLAGS_DDR2_PWDN; -	cpuidle_set_statedata(&device->states_usage[1], &davinci_states[1]); - -	device->state_count = DAVINCI_CPUIDLE_MAX_STATES; +	ddr2_pdown = pdata->ddr2_pdown;  	ret = cpuidle_register_driver(&davinci_idle_driver);  	if (ret) { diff --git a/arch/arm/mach-exynos/Kconfig b/arch/arm/mach-exynos/Kconfig index e103c290bc9..85afb031b67 100644 --- a/arch/arm/mach-exynos/Kconfig +++ b/arch/arm/mach-exynos/Kconfig @@ -414,7 +414,7 @@ config MACH_EXYNOS4_DT  	select CPU_EXYNOS4210  	select HAVE_SAMSUNG_KEYPAD if INPUT_KEYBOARD  	select PINCTRL -	select PINCTRL_EXYNOS4 +	select PINCTRL_EXYNOS  	select USE_OF  	help  	  Machine support for Samsung Exynos4 machine with device tree enabled. diff --git a/arch/arm/mach-exynos/include/mach/cpufreq.h b/arch/arm/mach-exynos/include/mach/cpufreq.h index 7517c3f417a..b5d39dd03b2 100644 --- a/arch/arm/mach-exynos/include/mach/cpufreq.h +++ b/arch/arm/mach-exynos/include/mach/cpufreq.h @@ -18,12 +18,25 @@ enum cpufreq_level_index {  	L20,  }; +#define APLL_FREQ(f, a0, a1, a2, a3, a4, a5, a6, a7, b0, b1, b2, m, p, s) \ +	{ \ +		.freq = (f) * 1000, \ +		.clk_div_cpu0 = ((a0) | (a1) << 4 | (a2) << 8 | (a3) << 12 | \ +			(a4) << 16 | (a5) << 20 | (a6) << 24 | (a7) << 28), \ +		.clk_div_cpu1 = (b0 << 0 | b1 << 4 | b2 << 8), \ +		.mps = ((m) << 16 | (p) << 8 | (s)), \ +	} + +struct apll_freq { +	unsigned int freq; +	u32 clk_div_cpu0; +	u32 clk_div_cpu1; +	u32 mps; +}; +  struct exynos_dvfs_info {  	unsigned long	mpll_freq_khz;  	unsigned int	pll_safe_idx; -	unsigned int	pm_lock_idx; -	unsigned int	max_support_idx; -	unsigned int	min_support_idx;  	struct clk	*cpu_clk;  	unsigned int	*volt_table;  	struct cpufreq_frequency_table	*freq_table; diff --git a/arch/arm/mach-exynos/mach-exynos5-dt.c b/arch/arm/mach-exynos/mach-exynos5-dt.c index e99d3d8f2bc..ea9e3020972 100644 --- a/arch/arm/mach-exynos/mach-exynos5-dt.c +++ b/arch/arm/mach-exynos/mach-exynos5-dt.c @@ -104,6 +104,12 @@ static const struct of_dev_auxdata exynos5250_auxdata_lookup[] __initconst = {  	OF_DEV_AUXDATA("samsung,mfc-v6", 0x11000000, "s5p-mfc-v6", NULL),  	OF_DEV_AUXDATA("samsung,exynos5250-tmu", 0x10060000,  				"exynos-tmu", NULL), +	OF_DEV_AUXDATA("samsung,i2s-v5", 0x03830000, +				"samsung-i2s.0", NULL), +	OF_DEV_AUXDATA("samsung,i2s-v5", 0x12D60000, +				"samsung-i2s.1", NULL), +	OF_DEV_AUXDATA("samsung,i2s-v5", 0x12D70000, +				"samsung-i2s.2", NULL),  	{},  }; diff --git a/arch/arm/mach-highbank/Kconfig b/arch/arm/mach-highbank/Kconfig index 551c97e87a7..44b12f9c158 100644 --- a/arch/arm/mach-highbank/Kconfig +++ b/arch/arm/mach-highbank/Kconfig @@ -1,5 +1,7 @@  config ARCH_HIGHBANK  	bool "Calxeda ECX-1000/2000 (Highbank/Midway)" if ARCH_MULTI_V7 +	select ARCH_HAS_CPUFREQ +	select ARCH_HAS_OPP  	select ARCH_WANT_OPTIONAL_GPIOLIB  	select ARM_AMBA  	select ARM_GIC @@ -11,5 +13,7 @@ config ARCH_HIGHBANK  	select GENERIC_CLOCKEVENTS  	select HAVE_ARM_SCU  	select HAVE_SMP +	select MAILBOX +	select PL320_MBOX  	select SPARSE_IRQ  	select USE_OF diff --git a/arch/arm/mach-highbank/core.h b/arch/arm/mach-highbank/core.h index 80235b46cb5..3f65206a9b9 100644 --- a/arch/arm/mach-highbank/core.h +++ b/arch/arm/mach-highbank/core.h @@ -2,7 +2,6 @@  #define __HIGHBANK_CORE_H  extern void highbank_set_cpu_jump(int cpu, void *jump_addr); -extern void highbank_clocks_init(void);  extern void highbank_restart(char, const char *);  extern void __iomem *scu_base_addr; diff --git a/arch/arm/mach-highbank/highbank.c b/arch/arm/mach-highbank/highbank.c index e6c06128293..65656ff0eb3 100644 --- a/arch/arm/mach-highbank/highbank.c +++ b/arch/arm/mach-highbank/highbank.c @@ -25,6 +25,7 @@  #include <linux/of_address.h>  #include <linux/smp.h>  #include <linux/amba/bus.h> +#include <linux/clk-provider.h>  #include <asm/arch_timer.h>  #include <asm/cacheflush.h> @@ -117,7 +118,7 @@ static void __init highbank_timer_init(void)  	WARN_ON(!timer_base);  	irq = irq_of_parse_and_map(np, 0); -	highbank_clocks_init(); +	of_clk_init(NULL);  	lookup.clk = of_clk_get(np, 0);  	clkdev_add(&lookup); diff --git a/arch/arm/mach-omap2/board-2430sdp.c b/arch/arm/mach-omap2/board-2430sdp.c index 4815ea6f8f5..1337f2c51f9 100644 --- a/arch/arm/mach-omap2/board-2430sdp.c +++ b/arch/arm/mach-omap2/board-2430sdp.c @@ -27,6 +27,7 @@  #include <linux/clk.h>  #include <linux/io.h>  #include <linux/gpio.h> +#include <linux/usb/phy.h>  #include <asm/mach-types.h>  #include <asm/mach/arch.h> @@ -263,6 +264,7 @@ static void __init omap_2430sdp_init(void)  	omap_hsmmc_init(mmc);  	omap_mux_init_signal("usb0hs_stp", OMAP_PULL_ENA | OMAP_PULL_UP); +	usb_bind_phy("musb-hdrc.0.auto", 0, "twl4030_usb");  	usb_musb_init(NULL);  	board_smc91x_init(); diff --git a/arch/arm/mach-omap2/board-3430sdp.c b/arch/arm/mach-omap2/board-3430sdp.c index bb73afc9ac1..8a2e242910e 100644 --- a/arch/arm/mach-omap2/board-3430sdp.c +++ b/arch/arm/mach-omap2/board-3430sdp.c @@ -25,6 +25,7 @@  #include <linux/gpio.h>  #include <linux/mmc/host.h>  #include <linux/platform_data/spi-omap2-mcspi.h> +#include <linux/usb/phy.h>  #include <asm/mach-types.h>  #include <asm/mach/arch.h> @@ -579,6 +580,7 @@ static void __init omap_3430sdp_init(void)  	omap_ads7846_init(1, gpio_pendown, 310, NULL);  	omap_serial_init();  	omap_sdrc_init(hyb18m512160af6_sdrc_params, NULL); +	usb_bind_phy("musb-hdrc.0.auto", 0, "twl4030_usb");  	usb_musb_init(NULL);  	board_smc91x_init();  	board_flash_init(sdp_flash_partitions, chip_sel_3430, 0); diff --git a/arch/arm/mach-omap2/board-4430sdp.c b/arch/arm/mach-omap2/board-4430sdp.c index 1cc6696594f..8e8efccf762 100644 --- a/arch/arm/mach-omap2/board-4430sdp.c +++ b/arch/arm/mach-omap2/board-4430sdp.c @@ -28,6 +28,7 @@  #include <linux/leds_pwm.h>  #include <linux/platform_data/omap4-keypad.h>  #include <linux/usb/musb.h> +#include <linux/usb/phy.h>  #include <asm/hardware/gic.h>  #include <asm/mach-types.h> @@ -696,6 +697,7 @@ static void __init omap_4430sdp_init(void)  	omap4_sdp4430_wifi_init();  	omap4_twl6030_hsmmc_init(mmc); +	usb_bind_phy("musb-hdrc.0.auto", 0, "omap-usb2.1.auto");  	usb_musb_init(&musb_board_data);  	status = omap_ethernet_init(); diff --git a/arch/arm/mach-omap2/board-cm-t35.c b/arch/arm/mach-omap2/board-cm-t35.c index b3102c2f4a3..f1172f2f1a7 100644 --- a/arch/arm/mach-omap2/board-cm-t35.c +++ b/arch/arm/mach-omap2/board-cm-t35.c @@ -30,6 +30,7 @@  #include <linux/regulator/fixed.h>  #include <linux/regulator/machine.h>  #include <linux/mmc/host.h> +#include <linux/usb/phy.h>  #include <linux/spi/spi.h>  #include <linux/spi/tdo24m.h> @@ -724,6 +725,7 @@ static void __init cm_t3x_common_init(void)  	cm_t35_init_display();  	omap_twl4030_audio_init("cm-t3x"); +	usb_bind_phy("musb-hdrc.0.auto", 0, "twl4030_usb");  	usb_musb_init(NULL);  	cm_t35_init_usbh();  	cm_t35_init_camera(); diff --git a/arch/arm/mach-omap2/board-devkit8000.c b/arch/arm/mach-omap2/board-devkit8000.c index 12865af25d3..77cade52b02 100644 --- a/arch/arm/mach-omap2/board-devkit8000.c +++ b/arch/arm/mach-omap2/board-devkit8000.c @@ -29,6 +29,7 @@  #include <linux/mtd/partitions.h>  #include <linux/mtd/nand.h>  #include <linux/mmc/host.h> +#include <linux/usb/phy.h>  #include <linux/regulator/machine.h>  #include <linux/i2c/twl.h> @@ -622,6 +623,7 @@ static void __init devkit8000_init(void)  	omap_ads7846_init(2, OMAP3_DEVKIT_TS_GPIO, 0, NULL); +	usb_bind_phy("musb-hdrc.0.auto", 0, "twl4030_usb");  	usb_musb_init(NULL);  	usbhs_init(&usbhs_bdata);  	board_nand_init(devkit8000_nand_partitions, diff --git a/arch/arm/mach-omap2/board-igep0020.c b/arch/arm/mach-omap2/board-igep0020.c index 0f24cb84ba5..15e58815a13 100644 --- a/arch/arm/mach-omap2/board-igep0020.c +++ b/arch/arm/mach-omap2/board-igep0020.c @@ -18,6 +18,7 @@  #include <linux/gpio.h>  #include <linux/interrupt.h>  #include <linux/input.h> +#include <linux/usb/phy.h>  #include <linux/regulator/machine.h>  #include <linux/regulator/fixed.h> @@ -625,6 +626,7 @@ static void __init igep_init(void)  	omap_serial_init();  	omap_sdrc_init(m65kxxxxam_sdrc_params,  				  m65kxxxxam_sdrc_params); +	usb_bind_phy("musb-hdrc.0.auto", 0, "twl4030_usb");  	usb_musb_init(NULL);  	igep_flash_init(); diff --git a/arch/arm/mach-omap2/board-ldp.c b/arch/arm/mach-omap2/board-ldp.c index 0869f4f3d3e..3b5510a433f 100644 --- a/arch/arm/mach-omap2/board-ldp.c +++ b/arch/arm/mach-omap2/board-ldp.c @@ -28,6 +28,7 @@  #include <linux/io.h>  #include <linux/smsc911x.h>  #include <linux/mmc/host.h> +#include <linux/usb/phy.h>  #include <linux/platform_data/spi-omap2-mcspi.h>  #include <asm/mach-types.h> @@ -418,6 +419,7 @@ static void __init omap_ldp_init(void)  	omap_ads7846_init(1, 54, 310, NULL);  	omap_serial_init();  	omap_sdrc_init(NULL, NULL); +	usb_bind_phy("musb-hdrc.0.auto", 0, "twl4030_usb");  	usb_musb_init(NULL);  	board_nand_init(ldp_nand_partitions, ARRAY_SIZE(ldp_nand_partitions),  			ZOOM_NAND_CS, 0, nand_default_timings); diff --git a/arch/arm/mach-omap2/board-omap3beagle.c b/arch/arm/mach-omap2/board-omap3beagle.c index 22c483d5dfa..4616f9269d0 100644 --- a/arch/arm/mach-omap2/board-omap3beagle.c +++ b/arch/arm/mach-omap2/board-omap3beagle.c @@ -30,6 +30,7 @@  #include <linux/mtd/partitions.h>  #include <linux/mtd/nand.h>  #include <linux/mmc/host.h> +#include <linux/usb/phy.h>  #include <linux/regulator/machine.h>  #include <linux/i2c/twl.h> @@ -519,6 +520,7 @@ static void __init omap3_beagle_init(void)  	omap_sdrc_init(mt46h32m32lf6_sdrc_params,  				  mt46h32m32lf6_sdrc_params); +	usb_bind_phy("musb-hdrc.0.auto", 0, "twl4030_usb");  	usb_musb_init(NULL);  	usbhs_init(&usbhs_bdata);  	board_nand_init(omap3beagle_nand_partitions, diff --git a/arch/arm/mach-omap2/board-omap3evm.c b/arch/arm/mach-omap2/board-omap3evm.c index 3985f35aee0..20238950345 100644 --- a/arch/arm/mach-omap2/board-omap3evm.c +++ b/arch/arm/mach-omap2/board-omap3evm.c @@ -41,6 +41,7 @@  #include <linux/regulator/machine.h>  #include <linux/mmc/host.h>  #include <linux/export.h> +#include <linux/usb/phy.h>  #include <asm/mach-types.h>  #include <asm/mach/arch.h> @@ -309,7 +310,7 @@ static struct omap2_hsmmc_info mmc[] = {  		.gpio_wp	= 63,  		.deferred	= true,  	}, -#ifdef CONFIG_WL12XX_PLATFORM_DATA +#ifdef CONFIG_WILINK_PLATFORM_DATA  	{  		.name		= "wl1271",  		.mmc		= 2, @@ -450,7 +451,7 @@ static struct regulator_init_data omap3evm_vio = {  	.consumer_supplies	= omap3evm_vio_supply,  }; -#ifdef CONFIG_WL12XX_PLATFORM_DATA +#ifdef CONFIG_WILINK_PLATFORM_DATA  #define OMAP3EVM_WLAN_PMENA_GPIO	(150)  #define OMAP3EVM_WLAN_IRQ_GPIO		(149) @@ -563,7 +564,7 @@ static struct omap_board_mux omap35x_board_mux[] __initdata = {  				OMAP_PIN_OFF_NONE),  	OMAP3_MUX(GPMC_WAIT2, OMAP_MUX_MODE4 | OMAP_PIN_INPUT_PULLUP |  				OMAP_PIN_OFF_NONE), -#ifdef CONFIG_WL12XX_PLATFORM_DATA +#ifdef CONFIG_WILINK_PLATFORM_DATA  	/* WLAN IRQ - GPIO 149 */  	OMAP3_MUX(UART1_RTS, OMAP_MUX_MODE4 | OMAP_PIN_INPUT), @@ -601,7 +602,7 @@ static struct omap_board_mux omap36x_board_mux[] __initdata = {  	OMAP3_MUX(SYS_BOOT4, OMAP_MUX_MODE3 | OMAP_PIN_OFF_NONE),  	OMAP3_MUX(SYS_BOOT5, OMAP_MUX_MODE3 | OMAP_PIN_OFF_NONE),  	OMAP3_MUX(SYS_BOOT6, OMAP_MUX_MODE3 | OMAP_PIN_OFF_NONE), -#ifdef CONFIG_WL12XX_PLATFORM_DATA +#ifdef CONFIG_WILINK_PLATFORM_DATA  	/* WLAN IRQ - GPIO 149 */  	OMAP3_MUX(UART1_RTS, OMAP_MUX_MODE4 | OMAP_PIN_INPUT), @@ -637,7 +638,7 @@ static struct gpio omap3_evm_ehci_gpios[] __initdata = {  static void __init omap3_evm_wl12xx_init(void)  { -#ifdef CONFIG_WL12XX_PLATFORM_DATA +#ifdef CONFIG_WILINK_PLATFORM_DATA  	int ret;  	/* WL12xx WLAN Init */ @@ -734,6 +735,7 @@ static void __init omap3_evm_init(void)  		omap_mux_init_gpio(135, OMAP_PIN_OUTPUT);  		usbhs_bdata.reset_gpio_port[1] = 135;  	} +	usb_bind_phy("musb-hdrc.0.auto", 0, "twl4030_usb");  	usb_musb_init(&musb_board_data);  	usbhs_init(&usbhs_bdata);  	board_nand_init(omap3evm_nand_partitions, diff --git a/arch/arm/mach-omap2/board-omap3logic.c b/arch/arm/mach-omap2/board-omap3logic.c index 2a065ba6eb5..9409eb897e2 100644 --- a/arch/arm/mach-omap2/board-omap3logic.c +++ b/arch/arm/mach-omap2/board-omap3logic.c @@ -29,6 +29,7 @@  #include <linux/i2c/twl.h>  #include <linux/mmc/host.h> +#include <linux/usb/phy.h>  #include <asm/mach-types.h>  #include <asm/mach/arch.h> @@ -215,6 +216,7 @@ static void __init omap3logic_init(void)  	board_mmc_init();  	board_smsc911x_init(); +	usb_bind_phy("musb-hdrc.0.auto", 0, "twl4030_usb");  	usb_musb_init(NULL);  	/* Ensure SDRC pins are mux'd for self-refresh */ diff --git a/arch/arm/mach-omap2/board-omap3pandora.c b/arch/arm/mach-omap2/board-omap3pandora.c index a53a6683c1b..1ac3e81969e 100644 --- a/arch/arm/mach-omap2/board-omap3pandora.c +++ b/arch/arm/mach-omap2/board-omap3pandora.c @@ -35,6 +35,7 @@  #include <linux/mmc/host.h>  #include <linux/mmc/card.h>  #include <linux/regulator/fixed.h> +#include <linux/usb/phy.h>  #include <linux/platform_data/spi-omap2-mcspi.h>  #include <asm/mach-types.h> @@ -601,6 +602,7 @@ static void __init omap3pandora_init(void)  			ARRAY_SIZE(omap3pandora_spi_board_info));  	omap_ads7846_init(1, OMAP3_PANDORA_TS_GPIO, 0, NULL);  	usbhs_init(&usbhs_bdata); +	usb_bind_phy("musb-hdrc.0.auto", 0, "twl4030_usb");  	usb_musb_init(NULL);  	gpmc_nand_init(&pandora_nand_data, NULL); diff --git a/arch/arm/mach-omap2/board-omap3stalker.c b/arch/arm/mach-omap2/board-omap3stalker.c index 53a6cbcf974..63cb204e081 100644 --- a/arch/arm/mach-omap2/board-omap3stalker.c +++ b/arch/arm/mach-omap2/board-omap3stalker.c @@ -33,6 +33,7 @@  #include <linux/interrupt.h>  #include <linux/smsc911x.h>  #include <linux/i2c/at24.h> +#include <linux/usb/phy.h>  #include <asm/mach-types.h>  #include <asm/mach/arch.h> @@ -404,6 +405,7 @@ static void __init omap3_stalker_init(void)  	omap_serial_init();  	omap_sdrc_init(mt46h32m32lf6_sdrc_params, NULL); +	usb_bind_phy("musb-hdrc.0.auto", 0, "twl4030_usb");  	usb_musb_init(NULL);  	usbhs_init(&usbhs_bdata);  	omap_ads7846_init(1, OMAP3_STALKER_TS_GPIO, 310, NULL); diff --git a/arch/arm/mach-omap2/board-omap3touchbook.c b/arch/arm/mach-omap2/board-omap3touchbook.c index 263cb9cfbf3..6b22ce3581d 100644 --- a/arch/arm/mach-omap2/board-omap3touchbook.c +++ b/arch/arm/mach-omap2/board-omap3touchbook.c @@ -28,6 +28,7 @@  #include <linux/mtd/partitions.h>  #include <linux/mtd/nand.h>  #include <linux/mmc/host.h> +#include <linux/usb/phy.h>  #include <linux/platform_data/spi-omap2-mcspi.h>  #include <linux/spi/spi.h> @@ -365,6 +366,7 @@ static void __init omap3_touchbook_init(void)  	/* Touchscreen and accelerometer */  	omap_ads7846_init(4, OMAP3_TS_GPIO, 310, &ads7846_pdata); +	usb_bind_phy("musb-hdrc.0.auto", 0, "twl4030_usb");  	usb_musb_init(NULL);  	usbhs_init(&usbhs_bdata);  	board_nand_init(omap3touchbook_nand_partitions, diff --git a/arch/arm/mach-omap2/board-omap4panda.c b/arch/arm/mach-omap2/board-omap4panda.c index 769c1feee1c..40184cc494f 100644 --- a/arch/arm/mach-omap2/board-omap4panda.c +++ b/arch/arm/mach-omap2/board-omap4panda.c @@ -30,6 +30,7 @@  #include <linux/regulator/fixed.h>  #include <linux/ti_wilink_st.h>  #include <linux/usb/musb.h> +#include <linux/usb/phy.h>  #include <linux/wl12xx.h>  #include <linux/platform_data/omap-abe-twl6040.h> @@ -447,6 +448,7 @@ static void __init omap4_panda_init(void)  	omap_sdrc_init(NULL, NULL);  	omap4_twl6030_hsmmc_init(mmc);  	omap4_ehci_init(); +	usb_bind_phy("musb-hdrc.0.auto", 0, "omap-usb2.1.auto");  	usb_musb_init(&musb_board_data);  	omap4_panda_display_init();  } diff --git a/arch/arm/mach-omap2/board-overo.c b/arch/arm/mach-omap2/board-overo.c index c8fde3e5644..7e43ff3f704 100644 --- a/arch/arm/mach-omap2/board-overo.c +++ b/arch/arm/mach-omap2/board-overo.c @@ -36,6 +36,7 @@  #include <linux/mtd/nand.h>  #include <linux/mtd/partitions.h>  #include <linux/mmc/host.h> +#include <linux/usb/phy.h>  #include <linux/platform_data/mtd-nand-omap2.h>  #include <linux/platform_data/spi-omap2-mcspi.h> @@ -499,6 +500,7 @@ static void __init overo_init(void)  				  mt46h32m32lf6_sdrc_params);  	board_nand_init(overo_nand_partitions,  			ARRAY_SIZE(overo_nand_partitions), NAND_CS, 0, NULL); +	usb_bind_phy("musb-hdrc.0.auto", 0, "twl4030_usb");  	usb_musb_init(NULL);  	usbhs_init(&usbhs_bdata);  	overo_spi_init(); diff --git a/arch/arm/mach-omap2/board-rm680.c b/arch/arm/mach-omap2/board-rm680.c index 0c777b75e48..f8a272c253f 100644 --- a/arch/arm/mach-omap2/board-rm680.c +++ b/arch/arm/mach-omap2/board-rm680.c @@ -18,6 +18,7 @@  #include <linux/regulator/machine.h>  #include <linux/regulator/consumer.h>  #include <linux/platform_data/mtd-onenand-omap2.h> +#include <linux/usb/phy.h>  #include <asm/mach/arch.h>  #include <asm/mach-types.h> @@ -134,6 +135,7 @@ static void __init rm680_init(void)  	sdrc_params = nokia_get_sdram_timings();  	omap_sdrc_init(sdrc_params, sdrc_params); +	usb_bind_phy("musb-hdrc.0.auto", 0, "twl4030_usb");  	usb_musb_init(NULL);  	rm680_peripherals_init();  } diff --git a/arch/arm/mach-omap2/board-rx51-peripherals.c b/arch/arm/mach-omap2/board-rx51-peripherals.c index cf07e289b4e..f3d075baebb 100644 --- a/arch/arm/mach-omap2/board-rx51-peripherals.c +++ b/arch/arm/mach-omap2/board-rx51-peripherals.c @@ -42,7 +42,7 @@  #include <media/si4713.h>  #include <linux/leds-lp5523.h> -#include <../drivers/staging/iio/light/tsl2563.h> +#include <linux/platform_data/tsl2563.h>  #include <linux/lis3lv02d.h>  #if defined(CONFIG_IR_RX51) || defined(CONFIG_IR_RX51_MODULE) diff --git a/arch/arm/mach-omap2/board-zoom-peripherals.c b/arch/arm/mach-omap2/board-zoom-peripherals.c index 26e07addc9d..dc5498b1b3a 100644 --- a/arch/arm/mach-omap2/board-zoom-peripherals.c +++ b/arch/arm/mach-omap2/board-zoom-peripherals.c @@ -20,6 +20,7 @@  #include <linux/wl12xx.h>  #include <linux/mmc/host.h>  #include <linux/platform_data/gpio-omap.h> +#include <linux/usb/phy.h>  #include <asm/mach-types.h>  #include <asm/mach/arch.h> @@ -298,6 +299,7 @@ void __init zoom_peripherals_init(void)  	omap_hsmmc_init(mmc);  	omap_i2c_init();  	platform_device_register(&omap_vwlan_device); +	usb_bind_phy("musb-hdrc.0.auto", 0, "twl4030_usb");  	usb_musb_init(NULL);  	enable_board_wakeup_source();  	omap_serial_init(); diff --git a/arch/arm/mach-omap2/devices.c b/arch/arm/mach-omap2/devices.c index 626f3ea3142..b6cc233214d 100644 --- a/arch/arm/mach-omap2/devices.c +++ b/arch/arm/mach-omap2/devices.c @@ -20,6 +20,7 @@  #include <linux/pinctrl/machine.h>  #include <linux/platform_data/omap4-keypad.h>  #include <linux/platform_data/omap_ocp2scp.h> +#include <linux/usb/omap_control_usb.h>  #include <asm/mach-types.h>  #include <asm/mach/map.h> @@ -254,6 +255,49 @@ static inline void omap_init_camera(void)  #endif  } +#if IS_ENABLED(CONFIG_OMAP_CONTROL_USB) +static struct omap_control_usb_platform_data omap4_control_usb_pdata = { +	.type = 1, +}; + +struct resource omap4_control_usb_res[] = { +	{ +		.name	= "control_dev_conf", +		.start	= 0x4a002300, +		.end	= 0x4a002303, +		.flags	= IORESOURCE_MEM, +	}, +	{ +		.name	= "otghs_control", +		.start	= 0x4a00233c, +		.end	= 0x4a00233f, +		.flags	= IORESOURCE_MEM, +	}, +}; + +static struct platform_device omap4_control_usb = { +	.name = "omap-control-usb", +	.id = -1, +	.dev = { +		.platform_data = &omap4_control_usb_pdata, +	}, +	.num_resources = 2, +	.resource = omap4_control_usb_res, +}; + +static inline void __init omap_init_control_usb(void) +{ +	if (!cpu_is_omap44xx()) +		return; + +	if (platform_device_register(&omap4_control_usb)) +		pr_err("Error registering omap_control_usb device\n"); +} + +#else +static inline void omap_init_control_usb(void) { } +#endif /* CONFIG_OMAP_CONTROL_USB */ +  int __init omap4_keyboard_init(struct omap4_keypad_platform_data  			*sdp4430_keypad_data, struct omap_board_data *bdata)  { @@ -721,6 +765,7 @@ static int __init omap2_init_devices(void)  	omap_init_mbox();  	/* If dtb is there, the devices will be created dynamically */  	if (!of_have_populated_dt()) { +		omap_init_control_usb();  		omap_init_dmic();  		omap_init_mcpdm();  		omap_init_mcspi(); diff --git a/arch/arm/mach-omap2/gpmc.c b/arch/arm/mach-omap2/gpmc.c index 8033cb747c8..64bac53da0e 100644 --- a/arch/arm/mach-omap2/gpmc.c +++ b/arch/arm/mach-omap2/gpmc.c @@ -1134,11 +1134,9 @@ static int gpmc_probe(struct platform_device *pdev)  	phys_base = res->start;  	mem_size = resource_size(res); -	gpmc_base = devm_request_and_ioremap(&pdev->dev, res); -	if (!gpmc_base) { -		dev_err(&pdev->dev, "error: request memory / ioremap\n"); -		return -EADDRNOTAVAIL; -	} +	gpmc_base = devm_ioremap_resource(&pdev->dev, res); +	if (IS_ERR(gpmc_base)) +		return PTR_ERR(gpmc_base);  	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);  	if (res == NULL) diff --git a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c index 793f54ac7d1..624a7e84a68 100644 --- a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c @@ -2702,13 +2702,6 @@ static struct resource omap44xx_usb_phy_and_pll_addrs[] = {  		.end		= 0x4a0ae000,  		.flags		= IORESOURCE_MEM,  	}, -	{ -		/* XXX: Remove this once control module driver is in place */ -		.name		= "ctrl_dev", -		.start		= 0x4a002300, -		.end		= 0x4a002303, -		.flags		= IORESOURCE_MEM, -	},  	{ }  }; @@ -6156,12 +6149,6 @@ static struct omap_hwmod_addr_space omap44xx_usb_otg_hs_addrs[] = {  		.pa_end		= 0x4a0ab7ff,  		.flags		= ADDR_TYPE_RT  	}, -	{ -		/* XXX: Remove this once control module driver is in place */ -		.pa_start	= 0x4a00233c, -		.pa_end		= 0x4a00233f, -		.flags		= ADDR_TYPE_RT -	},  	{ }  }; diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c index 7be3622cfc8..2d93d8b2383 100644 --- a/arch/arm/mach-omap2/pm34xx.c +++ b/arch/arm/mach-omap2/pm34xx.c @@ -351,12 +351,10 @@ static void omap3_pm_idle(void)  	if (omap_irq_pending())  		goto out; -	trace_power_start(POWER_CSTATE, 1, smp_processor_id());  	trace_cpu_idle(1, smp_processor_id());  	omap_sram_idle(); -	trace_power_end(smp_processor_id());  	trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id());  out: diff --git a/arch/arm/mach-omap2/usb-musb.c b/arch/arm/mach-omap2/usb-musb.c index 7b33b375fe7..9d27e3f8a09 100644 --- a/arch/arm/mach-omap2/usb-musb.c +++ b/arch/arm/mach-omap2/usb-musb.c @@ -85,6 +85,9 @@ void __init usb_musb_init(struct omap_musb_board_data *musb_board_data)  	musb_plat.mode = board_data->mode;  	musb_plat.extvbus = board_data->extvbus; +	if (cpu_is_omap44xx()) +		musb_plat.has_mailbox = true; +  	if (soc_is_am35xx()) {  		oh_name = "am35x_otg_hs";  		name = "musb-am35x"; diff --git a/arch/arm/mach-pxa/pxa27x.c b/arch/arm/mach-pxa/pxa27x.c index 616cb87b617..69985b06c0d 100644 --- a/arch/arm/mach-pxa/pxa27x.c +++ b/arch/arm/mach-pxa/pxa27x.c @@ -53,17 +53,25 @@ static unsigned long ac97_reset_config[] = {  	GPIO95_AC97_nRESET,  }; -void pxa27x_assert_ac97reset(int reset_gpio, int on) +void pxa27x_configure_ac97reset(int reset_gpio, bool to_gpio)  { +	/* +	 * This helper function is used to work around a bug in the pxa27x's +	 * ac97 controller during a warm reset.  The configuration of the +	 * reset_gpio is changed as follows: +	 * to_gpio == true: configured to generic output gpio and driven high +	 * to_gpio == false: configured to ac97 controller alt fn AC97_nRESET +	 */ +  	if (reset_gpio == 113) -		pxa2xx_mfp_config(on ? &ac97_reset_config[0] : -				       &ac97_reset_config[1], 1); +		pxa2xx_mfp_config(to_gpio ? &ac97_reset_config[0] : +				  &ac97_reset_config[1], 1);  	if (reset_gpio == 95) -		pxa2xx_mfp_config(on ? &ac97_reset_config[2] : -				       &ac97_reset_config[3], 1); +		pxa2xx_mfp_config(to_gpio ? &ac97_reset_config[2] : +				  &ac97_reset_config[3], 1);  } -EXPORT_SYMBOL_GPL(pxa27x_assert_ac97reset); +EXPORT_SYMBOL_GPL(pxa27x_configure_ac97reset);  /* Crystal clock: 13MHz */  #define BASE_CLK	13000000 diff --git a/arch/arm/mach-realview/include/mach/irqs-eb.h b/arch/arm/mach-realview/include/mach/irqs-eb.h index d6b5073692d..44754230fdc 100644 --- a/arch/arm/mach-realview/include/mach/irqs-eb.h +++ b/arch/arm/mach-realview/include/mach/irqs-eb.h @@ -115,7 +115,7 @@  /*   * Only define NR_IRQS if less than NR_IRQS_EB   */ -#define NR_IRQS_EB		(IRQ_EB_GIC_START + 96) +#define NR_IRQS_EB		(IRQ_EB_GIC_START + 128)  #if defined(CONFIG_MACH_REALVIEW_EB) \  	&& (!defined(NR_IRQS) || (NR_IRQS < NR_IRQS_EB)) diff --git a/arch/arm/mach-s3c24xx/Kconfig b/arch/arm/mach-s3c24xx/Kconfig index d1e80d0fd67..7079a70b1ab 100644 --- a/arch/arm/mach-s3c24xx/Kconfig +++ b/arch/arm/mach-s3c24xx/Kconfig @@ -293,8 +293,8 @@ config MACH_JIVE  	  Say Y here if you are using the Logitech Jive.  config MACH_JIVE_SHOW_BOOTLOADER -	bool "Allow access to bootloader partitions in MTD (EXPERIMENTAL)" -	depends on MACH_JIVE && EXPERIMENTAL +	bool "Allow access to bootloader partitions in MTD" +	depends on MACH_JIVE  config MACH_S3C2413  	bool diff --git a/arch/arm/mach-sa1100/assabet.c b/arch/arm/mach-sa1100/assabet.c index 9a23739f702..442497363db 100644 --- a/arch/arm/mach-sa1100/assabet.c +++ b/arch/arm/mach-sa1100/assabet.c @@ -16,6 +16,7 @@  #include <linux/ioport.h>  #include <linux/platform_data/sa11x0-serial.h>  #include <linux/serial_core.h> +#include <linux/platform_device.h>  #include <linux/mfd/ucb1x00.h>  #include <linux/mtd/mtd.h>  #include <linux/mtd/partitions.h> diff --git a/arch/arm/mach-shmobile/board-ap4evb.c b/arch/arm/mach-shmobile/board-ap4evb.c index 99ef190d090..08294fa9e0d 100644 --- a/arch/arm/mach-shmobile/board-ap4evb.c +++ b/arch/arm/mach-shmobile/board-ap4evb.c @@ -657,14 +657,8 @@ static struct platform_device lcdc_device = {  /* FSI */  #define IRQ_FSI		evt2irq(0x1840)  static struct sh_fsi_platform_info fsi_info = { -	.port_a = { -		.flags		= SH_FSI_BRS_INV, -	},  	.port_b = { -		.flags		= SH_FSI_BRS_INV | -				  SH_FSI_BRM_INV | -				  SH_FSI_LRS_INV | -				  SH_FSI_CLK_CPG | +		.flags		= SH_FSI_CLK_CPG |  				  SH_FSI_FMT_SPDIF,  	},  }; @@ -692,21 +686,21 @@ static struct platform_device fsi_device = {  	},  }; -static struct asoc_simple_dai_init_info fsi2_ak4643_init_info = { -	.fmt		= SND_SOC_DAIFMT_LEFT_J, -	.codec_daifmt	= SND_SOC_DAIFMT_CBM_CFM, -	.cpu_daifmt	= SND_SOC_DAIFMT_CBS_CFS, -	.sysclk		= 11289600, -}; -  static struct asoc_simple_card_info fsi2_ak4643_info = {  	.name		= "AK4643",  	.card		= "FSI2A-AK4643", -	.cpu_dai	= "fsia-dai",  	.codec		= "ak4642-codec.0-0013",  	.platform	= "sh_fsi2", -	.codec_dai	= "ak4642-hifi", -	.init		= &fsi2_ak4643_init_info, +	.daifmt		= SND_SOC_DAIFMT_LEFT_J, +	.cpu_dai = { +		.name	= "fsia-dai", +		.fmt	= SND_SOC_DAIFMT_CBS_CFS, +	}, +	.codec_dai = { +		.name	= "ak4642-hifi", +		.fmt	= SND_SOC_DAIFMT_CBM_CFM, +		.sysclk	= 11289600, +	},  };  static struct platform_device fsi_ak4643_device = { @@ -815,18 +809,18 @@ static struct platform_device lcdc1_device = {  	},  }; -static struct asoc_simple_dai_init_info fsi2_hdmi_init_info = { -	.cpu_daifmt	= SND_SOC_DAIFMT_CBM_CFM, -}; -  static struct asoc_simple_card_info fsi2_hdmi_info = {  	.name		= "HDMI",  	.card		= "FSI2B-HDMI", -	.cpu_dai	= "fsib-dai",  	.codec		= "sh-mobile-hdmi",  	.platform	= "sh_fsi2", -	.codec_dai	= "sh_mobile_hdmi-hifi", -	.init		= &fsi2_hdmi_init_info, +	.cpu_dai = { +		.name	= "fsib-dai", +		.fmt	= SND_SOC_DAIFMT_CBM_CFM | SND_SOC_DAIFMT_IB_NF, +	}, +	.codec_dai = { +		.name	= "sh_mobile_hdmi-hifi", +	},  };  static struct platform_device fsi_hdmi_device = { diff --git a/arch/arm/mach-shmobile/board-armadillo800eva.c b/arch/arm/mach-shmobile/board-armadillo800eva.c index 5353adf6b82..0679ca6bf1f 100644 --- a/arch/arm/mach-shmobile/board-armadillo800eva.c +++ b/arch/arm/mach-shmobile/board-armadillo800eva.c @@ -806,21 +806,21 @@ static struct platform_device fsi_device = {  };  /* FSI-WM8978 */ -static struct asoc_simple_dai_init_info fsi_wm8978_init_info = { -	.fmt		= SND_SOC_DAIFMT_I2S, -	.codec_daifmt	= SND_SOC_DAIFMT_CBM_CFM | SND_SOC_DAIFMT_NB_NF, -	.cpu_daifmt	= SND_SOC_DAIFMT_CBS_CFS, -	.sysclk		= 12288000, -}; -  static struct asoc_simple_card_info fsi_wm8978_info = {  	.name		= "wm8978",  	.card		= "FSI2A-WM8978", -	.cpu_dai	= "fsia-dai",  	.codec		= "wm8978.0-001a",  	.platform	= "sh_fsi2", -	.codec_dai	= "wm8978-hifi", -	.init		= &fsi_wm8978_init_info, +	.daifmt		= SND_SOC_DAIFMT_I2S, +	.cpu_dai = { +		.name	= "fsia-dai", +		.fmt	= SND_SOC_DAIFMT_CBS_CFS | SND_SOC_DAIFMT_IB_NF, +	}, +	.codec_dai = { +		.name	= "wm8978-hifi", +		.fmt	= SND_SOC_DAIFMT_CBM_CFM | SND_SOC_DAIFMT_NB_NF, +		.sysclk	= 12288000, +	},  };  static struct platform_device fsi_wm8978_device = { @@ -832,18 +832,18 @@ static struct platform_device fsi_wm8978_device = {  };  /* FSI-HDMI */ -static struct asoc_simple_dai_init_info fsi2_hdmi_init_info = { -	.cpu_daifmt	= SND_SOC_DAIFMT_CBM_CFM, -}; -  static struct asoc_simple_card_info fsi2_hdmi_info = {  	.name		= "HDMI",  	.card		= "FSI2B-HDMI", -	.cpu_dai	= "fsib-dai",  	.codec		= "sh-mobile-hdmi",  	.platform	= "sh_fsi2", -	.codec_dai	= "sh_mobile_hdmi-hifi", -	.init		= &fsi2_hdmi_init_info, +	.cpu_dai = { +		.name	= "fsib-dai", +		.fmt	= SND_SOC_DAIFMT_CBM_CFM, +	}, +	.codec_dai = { +		.name = "sh_mobile_hdmi-hifi", +	},  };  static struct platform_device fsi_hdmi_device = { diff --git a/arch/arm/mach-shmobile/board-kzm9g.c b/arch/arm/mach-shmobile/board-kzm9g.c index c02448d6847..f41b71e8df3 100644 --- a/arch/arm/mach-shmobile/board-kzm9g.c +++ b/arch/arm/mach-shmobile/board-kzm9g.c @@ -525,21 +525,21 @@ static struct platform_device fsi_device = {  	},  }; -static struct asoc_simple_dai_init_info fsi2_ak4648_init_info = { -	.fmt		= SND_SOC_DAIFMT_LEFT_J, -	.codec_daifmt	= SND_SOC_DAIFMT_CBM_CFM, -	.cpu_daifmt	= SND_SOC_DAIFMT_CBS_CFS, -	.sysclk		= 11289600, -}; -  static struct asoc_simple_card_info fsi2_ak4648_info = {  	.name		= "AK4648",  	.card		= "FSI2A-AK4648", -	.cpu_dai	= "fsia-dai",  	.codec		= "ak4642-codec.0-0012",  	.platform	= "sh_fsi2", -	.codec_dai	= "ak4642-hifi", -	.init		= &fsi2_ak4648_init_info, +	.daifmt		= SND_SOC_DAIFMT_LEFT_J, +	.cpu_dai = { +		.name	= "fsia-dai", +		.fmt	= SND_SOC_DAIFMT_CBS_CFS, +	}, +	.codec_dai = { +		.name	= "ak4642-hifi", +		.fmt	= SND_SOC_DAIFMT_CBM_CFM, +		.sysclk	= 11289600, +	},  };  static struct platform_device fsi_ak4648_device = { diff --git a/arch/arm/mach-shmobile/board-mackerel.c b/arch/arm/mach-shmobile/board-mackerel.c index 2fed62f6604..3fd716dae40 100644 --- a/arch/arm/mach-shmobile/board-mackerel.c +++ b/arch/arm/mach-shmobile/board-mackerel.c @@ -502,18 +502,18 @@ static struct platform_device hdmi_lcdc_device = {  	},  }; -static struct asoc_simple_dai_init_info fsi2_hdmi_init_info = { -	.cpu_daifmt	= SND_SOC_DAIFMT_CBM_CFM, -}; -  static struct asoc_simple_card_info fsi2_hdmi_info = {  	.name		= "HDMI",  	.card		= "FSI2B-HDMI", -	.cpu_dai	= "fsib-dai",  	.codec		= "sh-mobile-hdmi",  	.platform	= "sh_fsi2", -	.codec_dai	= "sh_mobile_hdmi-hifi", -	.init		= &fsi2_hdmi_init_info, +	.cpu_dai = { +		.name	= "fsib-dai", +		.fmt	= SND_SOC_DAIFMT_CBM_CFM | SND_SOC_DAIFMT_IB_NF, +	}, +	.codec_dai = { +		.name	= "sh_mobile_hdmi-hifi", +	},  };  static struct platform_device fsi_hdmi_device = { @@ -858,16 +858,12 @@ static struct platform_device leds_device = {  #define IRQ_FSI evt2irq(0x1840)  static struct sh_fsi_platform_info fsi_info = {  	.port_a = { -		.flags = SH_FSI_BRS_INV,  		.tx_id = SHDMA_SLAVE_FSIA_TX,  		.rx_id = SHDMA_SLAVE_FSIA_RX,  	},  	.port_b = { -		.flags = SH_FSI_BRS_INV	| -			SH_FSI_BRM_INV	| -			SH_FSI_LRS_INV	| -			SH_FSI_CLK_CPG	| -			SH_FSI_FMT_SPDIF, +		.flags = SH_FSI_CLK_CPG	| +			 SH_FSI_FMT_SPDIF,  	}  }; @@ -896,21 +892,21 @@ static struct platform_device fsi_device = {  	},  }; -static struct asoc_simple_dai_init_info fsi2_ak4643_init_info = { -	.fmt		= SND_SOC_DAIFMT_LEFT_J, -	.codec_daifmt	= SND_SOC_DAIFMT_CBM_CFM, -	.cpu_daifmt	= SND_SOC_DAIFMT_CBS_CFS, -	.sysclk		= 11289600, -}; -  static struct asoc_simple_card_info fsi2_ak4643_info = {  	.name		= "AK4643",  	.card		= "FSI2A-AK4643", -	.cpu_dai	= "fsia-dai",  	.codec		= "ak4642-codec.0-0013",  	.platform	= "sh_fsi2", -	.codec_dai	= "ak4642-hifi", -	.init		= &fsi2_ak4643_init_info, +	.daifmt		= SND_SOC_DAIFMT_LEFT_J, +	.cpu_dai = { +		.name	= "fsia-dai", +		.fmt	= SND_SOC_DAIFMT_CBS_CFS, +	}, +	.codec_dai = { +		.name	= "ak4642-hifi", +		.fmt	= SND_SOC_DAIFMT_CBM_CFM, +		.sysclk	= 11289600, +	},  };  static struct platform_device fsi_ak4643_device = { diff --git a/arch/arm/mach-sunxi/Kconfig b/arch/arm/mach-sunxi/Kconfig index 3fdd0085e30..8709a39bd34 100644 --- a/arch/arm/mach-sunxi/Kconfig +++ b/arch/arm/mach-sunxi/Kconfig @@ -7,3 +7,4 @@ config ARCH_SUNXI  	select PINCTRL  	select SPARSE_IRQ  	select SUNXI_TIMER +	select PINCTRL_SUNXI
\ No newline at end of file diff --git a/arch/arm/mach-tegra/cpu-tegra.c b/arch/arm/mach-tegra/cpu-tegra.c index a74d3c7d2e2..a36a03d3c9a 100644 --- a/arch/arm/mach-tegra/cpu-tegra.c +++ b/arch/arm/mach-tegra/cpu-tegra.c @@ -243,8 +243,7 @@ static int tegra_cpu_init(struct cpufreq_policy *policy)  	/* FIXME: what's the actual transition time? */  	policy->cpuinfo.transition_latency = 300 * 1000; -	policy->shared_type = CPUFREQ_SHARED_TYPE_ALL; -	cpumask_copy(policy->related_cpus, cpu_possible_mask); +	cpumask_copy(policy->cpus, cpu_possible_mask);  	if (policy->cpu == 0)  		register_pm_notifier(&tegra_cpu_pm_notifier); diff --git a/arch/arm/mach-tegra/tegra2_emc.c b/arch/arm/mach-tegra/tegra2_emc.c index e18aa2f83eb..ce7ce42a1ac 100644 --- a/arch/arm/mach-tegra/tegra2_emc.c +++ b/arch/arm/mach-tegra/tegra2_emc.c @@ -312,11 +312,9 @@ static int tegra_emc_probe(struct platform_device *pdev)  		return -ENOMEM;  	} -	emc_regbase = devm_request_and_ioremap(&pdev->dev, res); -	if (!emc_regbase) { -		dev_err(&pdev->dev, "failed to remap registers\n"); -		return -ENOMEM; -	} +	emc_regbase = devm_ioremap_resource(&pdev->dev, res); +	if (IS_ERR(emc_regbase)) +		return PTR_ERR(emc_regbase);  	pdata = pdev->dev.platform_data; diff --git a/arch/arm/mach-ux500/Kconfig b/arch/arm/mach-ux500/Kconfig index 5dea90636d9..3e5bbd0e5b2 100644 --- a/arch/arm/mach-ux500/Kconfig +++ b/arch/arm/mach-ux500/Kconfig @@ -11,6 +11,7 @@ config UX500_SOC_COMMON  	select COMMON_CLK  	select PINCTRL  	select PINCTRL_NOMADIK +	select PINCTRL_ABX500  	select PL310_ERRATA_753970 if CACHE_PL310  config UX500_SOC_DB8500 @@ -18,6 +19,11 @@ config UX500_SOC_DB8500  	select CPU_FREQ_TABLE if CPU_FREQ  	select MFD_DB8500_PRCMU  	select PINCTRL_DB8500 +	select PINCTRL_DB8540 +	select PINCTRL_AB8500 +	select PINCTRL_AB8505 +	select PINCTRL_AB9540 +	select PINCTRL_AB8540  	select REGULATOR  	select REGULATOR_DB8500_PRCMU diff --git a/arch/arm/mach-ux500/board-mop500.c b/arch/arm/mach-ux500/board-mop500.c index d453522edb0..b8781caa54b 100644 --- a/arch/arm/mach-ux500/board-mop500.c +++ b/arch/arm/mach-ux500/board-mop500.c @@ -90,26 +90,8 @@ static struct platform_device snowball_gpio_en_3v3_regulator_dev = {         },  }; -static struct ab8500_gpio_platform_data ab8500_gpio_pdata = { +static struct abx500_gpio_platform_data ab8500_gpio_pdata = {  	.gpio_base		= MOP500_AB8500_PIN_GPIO(1), -	.irq_base		= MOP500_AB8500_VIR_GPIO_IRQ_BASE, -	/* config_reg is the initial configuration of ab8500 pins. -	 * The pins can be configured as GPIO or alt functions based -	 * on value present in GpioSel1 to GpioSel6 and AlternatFunction -	 * register. This is the array of 7 configuration settings. -	 * One has to compile time decide these settings. Below is the -	 * explanation of these setting -	 * GpioSel1 = 0x00 => Pins GPIO1 to GPIO8 are not used as GPIO -	 * GpioSel2 = 0x1E => Pins GPIO10 to GPIO13 are configured as GPIO -	 * GpioSel3 = 0x80 => Pin GPIO24 is configured as GPIO -	 * GpioSel4 = 0x01 => Pin GPIo25 is configured as GPIO -	 * GpioSel5 = 0x7A => Pins GPIO34, GPIO36 to GPIO39 are conf as GPIO -	 * GpioSel6 = 0x00 => Pins GPIO41 & GPIo42 are not configured as GPIO -	 * AlternaFunction = 0x00 => If Pins GPIO10 to 13 are not configured -	 * as GPIO then this register selectes the alternate fucntions -	 */ -	.config_reg		= {0x00, 0x1E, 0x80, 0x01, -					0x7A, 0x00, 0x00},  };  /* ab8500-codec */ diff --git a/arch/arm/mach-ux500/cpu-db8500.c b/arch/arm/mach-ux500/cpu-db8500.c index 5b286e06474..b80ad9610e9 100644 --- a/arch/arm/mach-ux500/cpu-db8500.c +++ b/arch/arm/mach-ux500/cpu-db8500.c @@ -285,7 +285,7 @@ static struct of_dev_auxdata u8500_auxdata_lookup[] __initdata = {  	OF_DEV_AUXDATA("st,nomadik-i2c", 0x80110000, "nmk-i2c.3", NULL),  	OF_DEV_AUXDATA("st,nomadik-i2c", 0x8012a000, "nmk-i2c.4", NULL),  	/* Requires device name bindings. */ -	OF_DEV_AUXDATA("stericsson,nmk_pinctrl", U8500_PRCMU_BASE, +	OF_DEV_AUXDATA("stericsson,nmk-pinctrl", U8500_PRCMU_BASE,  		"pinctrl-db8500", NULL),  	/* Requires clock name and DMA bindings. */  	OF_DEV_AUXDATA("stericsson,ux500-msp-i2s", 0x80123000, diff --git a/arch/arm/mach-ux500/include/mach/irqs-board-mop500.h b/arch/arm/mach-ux500/include/mach/irqs-board-mop500.h index 7d34c52798b..d526dd8e87d 100644 --- a/arch/arm/mach-ux500/include/mach/irqs-board-mop500.h +++ b/arch/arm/mach-ux500/include/mach/irqs-board-mop500.h @@ -38,15 +38,7 @@  #define MOP500_STMPE1601_IRQ_END	\  	MOP500_STMPE1601_IRQ(STMPE_NR_INTERNAL_IRQS) -/* AB8500 virtual gpio IRQ */ -#define AB8500_VIR_GPIO_NR_IRQS			16 - -#define MOP500_AB8500_VIR_GPIO_IRQ_BASE		\ -	MOP500_STMPE1601_IRQ_END -#define MOP500_AB8500_VIR_GPIO_IRQ_END		\ -	(MOP500_AB8500_VIR_GPIO_IRQ_BASE + AB8500_VIR_GPIO_NR_IRQS) - -#define MOP500_NR_IRQS		MOP500_AB8500_VIR_GPIO_IRQ_END +#define MOP500_NR_IRQS		MOP500_STMPE1601_IRQ_END  #define MOP500_IRQ_END		MOP500_NR_IRQS diff --git a/arch/arm/mach-versatile/core.c b/arch/arm/mach-versatile/core.c index 60c092cfdae..43478c299cc 100644 --- a/arch/arm/mach-versatile/core.c +++ b/arch/arm/mach-versatile/core.c @@ -36,6 +36,7 @@  #include <linux/gfp.h>  #include <linux/clkdev.h>  #include <linux/mtd/physmap.h> +#include <linux/bitops.h>  #include <asm/irq.h>  #include <asm/hardware/arm_timer.h> @@ -65,16 +66,28 @@  #define VA_VIC_BASE		__io_address(VERSATILE_VIC_BASE)  #define VA_SIC_BASE		__io_address(VERSATILE_SIC_BASE) +/* These PIC IRQs are valid in each configuration */ +#define PIC_VALID_ALL	BIT(SIC_INT_KMI0) | BIT(SIC_INT_KMI1) | \ +			BIT(SIC_INT_SCI3) | BIT(SIC_INT_UART3) | \ +			BIT(SIC_INT_CLCD) | BIT(SIC_INT_TOUCH) | \ +			BIT(SIC_INT_KEYPAD) | BIT(SIC_INT_DoC) | \ +			BIT(SIC_INT_USB) | BIT(SIC_INT_PCI0) | \ +			BIT(SIC_INT_PCI1) | BIT(SIC_INT_PCI2) | \ +			BIT(SIC_INT_PCI3)  #if 1  #define IRQ_MMCI0A	IRQ_VICSOURCE22  #define IRQ_AACI	IRQ_VICSOURCE24  #define IRQ_ETH		IRQ_VICSOURCE25  #define PIC_MASK	0xFFD00000 +#define PIC_VALID	PIC_VALID_ALL  #else  #define IRQ_MMCI0A	IRQ_SIC_MMCI0A  #define IRQ_AACI	IRQ_SIC_AACI  #define IRQ_ETH		IRQ_SIC_ETH  #define PIC_MASK	0 +#define PIC_VALID	PIC_VALID_ALL | BIT(SIC_INT_MMCI0A) | \ +			BIT(SIC_INT_MMCI1A) | BIT(SIC_INT_AACI) | \ +			BIT(SIC_INT_ETH)  #endif  /* Lookup table for finding a DT node that represents the vic instance */ @@ -102,7 +115,7 @@ void __init versatile_init_irq(void)  					      VERSATILE_SIC_BASE);  	fpga_irq_init(VA_SIC_BASE, "SIC", IRQ_SIC_START, -		IRQ_VICSOURCE31, ~PIC_MASK, np); +		IRQ_VICSOURCE31, PIC_VALID, np);  	/*  	 * Interrupts on secondary controller from 0 to 8 are routed to diff --git a/arch/arm/mach-versatile/pci.c b/arch/arm/mach-versatile/pci.c index 2f84f4094f1..e92e5e0705b 100644 --- a/arch/arm/mach-versatile/pci.c +++ b/arch/arm/mach-versatile/pci.c @@ -23,6 +23,7 @@  #include <linux/io.h>  #include <mach/hardware.h> +#include <mach/irqs.h>  #include <asm/irq.h>  #include <asm/mach/pci.h> @@ -327,12 +328,12 @@ static int __init versatile_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)  	int irq;  	/* slot,  pin,	irq -	 *  24     1     27 -	 *  25     1     28 -	 *  26     1     29 -	 *  27     1     30 +	 *  24     1     IRQ_SIC_PCI0 +	 *  25     1     IRQ_SIC_PCI1 +	 *  26     1     IRQ_SIC_PCI2 +	 *  27     1     IRQ_SIC_PCI3  	 */ -	irq = 27 + ((slot - 24 + pin - 1) & 3); +	irq = IRQ_SIC_PCI0 + ((slot - 24 + pin - 1) & 3);  	return irq;  } diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index 3fd629d5a51..025d1732873 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig @@ -629,8 +629,9 @@ config ARM_THUMBEE  	  make use of it. Say N for code that can run on CPUs without ThumbEE.  config ARM_VIRT_EXT -	bool "Native support for the ARM Virtualization Extensions" -	depends on MMU && CPU_V7 +	bool +	depends on MMU +	default y if CPU_V7  	help  	  Enable the kernel to make use of the ARM Virtualization  	  Extensions to install hypervisors without run-time firmware @@ -640,11 +641,6 @@ config ARM_VIRT_EXT  	  use of this feature.  Refer to Documentation/arm/Booting for  	  details. -	  It is safe to enable this option even if the kernel may not be -	  booted in HYP mode, may not have support for the -	  virtualization extensions, or may be booted with a -	  non-compliant bootloader. -  config SWP_EMULATE  	bool "Emulate SWP/SWPB instructions"  	depends on !CPU_USE_DOMAINS && CPU_V7 diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile index 8a9c4cb50a9..4e333fa2756 100644 --- a/arch/arm/mm/Makefile +++ b/arch/arm/mm/Makefile @@ -6,7 +6,7 @@ obj-y				:= dma-mapping.o extable.o fault.o init.o \  				   iomap.o  obj-$(CONFIG_MMU)		+= fault-armv.o flush.o idmap.o ioremap.o \ -				   mmap.o pgd.o mmu.o vmregion.o +				   mmap.o pgd.o mmu.o  ifneq ($(CONFIG_MMU),y)  obj-y				+= nommu.o diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c index bc4a5e9ebb7..7a0511191f6 100644 --- a/arch/arm/mm/context.c +++ b/arch/arm/mm/context.c @@ -34,6 +34,9 @@   * The ASID is used to tag entries in the CPU caches and TLBs.   * The context ID is used by debuggers and trace logic, and   * should be unique within all running processes. + * + * In big endian operation, the two 32 bit words are swapped if accesed by + * non 64-bit operations.   */  #define ASID_FIRST_VERSION	(1ULL << ASID_BITS)  #define NUM_USER_ASIDS		(ASID_FIRST_VERSION - 1) diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 076c26d4386..dda3904dc64 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -640,7 +640,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,  	if (is_coherent || nommu())  		addr = __alloc_simple_buffer(dev, size, gfp, &page); -	else if (gfp & GFP_ATOMIC) +	else if (!(gfp & __GFP_WAIT))  		addr = __alloc_from_pool(size, &page);  	else if (!IS_ENABLED(CONFIG_CMA))  		addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller); diff --git a/arch/arm/mm/idmap.c b/arch/arm/mm/idmap.c index 99db769307e..2dffc010cc4 100644 --- a/arch/arm/mm/idmap.c +++ b/arch/arm/mm/idmap.c @@ -1,4 +1,6 @@ +#include <linux/module.h>  #include <linux/kernel.h> +#include <linux/slab.h>  #include <asm/cputype.h>  #include <asm/idmap.h> @@ -6,6 +8,7 @@  #include <asm/pgtable.h>  #include <asm/sections.h>  #include <asm/system_info.h> +#include <asm/virt.h>  pgd_t *idmap_pgd; @@ -59,11 +62,17 @@ static void idmap_add_pud(pgd_t *pgd, unsigned long addr, unsigned long end,  	} while (pud++, addr = next, addr != end);  } -static void identity_mapping_add(pgd_t *pgd, unsigned long addr, unsigned long end) +static void identity_mapping_add(pgd_t *pgd, const char *text_start, +				 const char *text_end, unsigned long prot)  { -	unsigned long prot, next; +	unsigned long addr, end; +	unsigned long next; + +	addr = virt_to_phys(text_start); +	end = virt_to_phys(text_end); + +	prot |= PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AF; -	prot = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AF;  	if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale())  		prot |= PMD_BIT4; @@ -74,28 +83,52 @@ static void identity_mapping_add(pgd_t *pgd, unsigned long addr, unsigned long e  	} while (pgd++, addr = next, addr != end);  } +#if defined(CONFIG_ARM_VIRT_EXT) && defined(CONFIG_ARM_LPAE) +pgd_t *hyp_pgd; + +extern char  __hyp_idmap_text_start[], __hyp_idmap_text_end[]; + +static int __init init_static_idmap_hyp(void) +{ +	hyp_pgd = kzalloc(PTRS_PER_PGD * sizeof(pgd_t), GFP_KERNEL); +	if (!hyp_pgd) +		return -ENOMEM; + +	pr_info("Setting up static HYP identity map for 0x%p - 0x%p\n", +		__hyp_idmap_text_start, __hyp_idmap_text_end); +	identity_mapping_add(hyp_pgd, __hyp_idmap_text_start, +			     __hyp_idmap_text_end, PMD_SECT_AP1); + +	return 0; +} +#else +static int __init init_static_idmap_hyp(void) +{ +	return 0; +} +#endif +  extern char  __idmap_text_start[], __idmap_text_end[];  static int __init init_static_idmap(void)  { -	phys_addr_t idmap_start, idmap_end; +	int ret;  	idmap_pgd = pgd_alloc(&init_mm);  	if (!idmap_pgd)  		return -ENOMEM; -	/* Add an identity mapping for the physical address of the section. */ -	idmap_start = virt_to_phys((void *)__idmap_text_start); -	idmap_end = virt_to_phys((void *)__idmap_text_end); +	pr_info("Setting up static identity map for 0x%p - 0x%p\n", +		__idmap_text_start, __idmap_text_end); +	identity_mapping_add(idmap_pgd, __idmap_text_start, +			     __idmap_text_end, 0); -	pr_info("Setting up static identity map for 0x%llx - 0x%llx\n", -		(long long)idmap_start, (long long)idmap_end); -	identity_mapping_add(idmap_pgd, idmap_start, idmap_end); +	ret = init_static_idmap_hyp();  	/* Flush L1 for the hardware to see this page table content */  	flush_cache_louis(); -	return 0; +	return ret;  }  early_initcall(init_static_idmap); diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c index 88fd86cf3d9..04d9006eab1 100644 --- a/arch/arm/mm/ioremap.c +++ b/arch/arm/mm/ioremap.c @@ -39,6 +39,70 @@  #include <asm/mach/pci.h>  #include "mm.h" + +LIST_HEAD(static_vmlist); + +static struct static_vm *find_static_vm_paddr(phys_addr_t paddr, +			size_t size, unsigned int mtype) +{ +	struct static_vm *svm; +	struct vm_struct *vm; + +	list_for_each_entry(svm, &static_vmlist, list) { +		vm = &svm->vm; +		if (!(vm->flags & VM_ARM_STATIC_MAPPING)) +			continue; +		if ((vm->flags & VM_ARM_MTYPE_MASK) != VM_ARM_MTYPE(mtype)) +			continue; + +		if (vm->phys_addr > paddr || +			paddr + size - 1 > vm->phys_addr + vm->size - 1) +			continue; + +		return svm; +	} + +	return NULL; +} + +struct static_vm *find_static_vm_vaddr(void *vaddr) +{ +	struct static_vm *svm; +	struct vm_struct *vm; + +	list_for_each_entry(svm, &static_vmlist, list) { +		vm = &svm->vm; + +		/* static_vmlist is ascending order */ +		if (vm->addr > vaddr) +			break; + +		if (vm->addr <= vaddr && vm->addr + vm->size > vaddr) +			return svm; +	} + +	return NULL; +} + +void __init add_static_vm_early(struct static_vm *svm) +{ +	struct static_vm *curr_svm; +	struct vm_struct *vm; +	void *vaddr; + +	vm = &svm->vm; +	vm_area_add_early(vm); +	vaddr = vm->addr; + +	list_for_each_entry(curr_svm, &static_vmlist, list) { +		vm = &curr_svm->vm; + +		if (vm->addr > vaddr) +			break; +	} +	list_add_tail(&svm->list, &curr_svm->list); +} +  int ioremap_page(unsigned long virt, unsigned long phys,  		 const struct mem_type *mtype)  { @@ -197,13 +261,14 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,  	const struct mem_type *type;  	int err;  	unsigned long addr; - 	struct vm_struct * area; +	struct vm_struct *area; +	phys_addr_t paddr = __pfn_to_phys(pfn);  #ifndef CONFIG_ARM_LPAE  	/*  	 * High mappings must be supersection aligned  	 */ -	if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK)) +	if (pfn >= 0x100000 && (paddr & ~SUPERSECTION_MASK))  		return NULL;  #endif @@ -219,24 +284,16 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,  	/*  	 * Try to reuse one of the static mapping whenever possible.  	 */ -	read_lock(&vmlist_lock); -	for (area = vmlist; area; area = area->next) { -		if (!size || (sizeof(phys_addr_t) == 4 && pfn >= 0x100000)) -			break; -		if (!(area->flags & VM_ARM_STATIC_MAPPING)) -			continue; -		if ((area->flags & VM_ARM_MTYPE_MASK) != VM_ARM_MTYPE(mtype)) -			continue; -		if (__phys_to_pfn(area->phys_addr) > pfn || -		    __pfn_to_phys(pfn) + size-1 > area->phys_addr + area->size-1) -			continue; -		/* we can drop the lock here as we know *area is static */ -		read_unlock(&vmlist_lock); -		addr = (unsigned long)area->addr; -		addr += __pfn_to_phys(pfn) - area->phys_addr; -		return (void __iomem *) (offset + addr); +	if (size && !(sizeof(phys_addr_t) == 4 && pfn >= 0x100000)) { +		struct static_vm *svm; + +		svm = find_static_vm_paddr(paddr, size, mtype); +		if (svm) { +			addr = (unsigned long)svm->vm.addr; +			addr += paddr - svm->vm.phys_addr; +			return (void __iomem *) (offset + addr); +		}  	} -	read_unlock(&vmlist_lock);  	/*  	 * Don't allow RAM to be mapped - this causes problems with ARMv6+ @@ -248,21 +305,21 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,   	if (!area)   		return NULL;   	addr = (unsigned long)area->addr; -	area->phys_addr = __pfn_to_phys(pfn); +	area->phys_addr = paddr;  #if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)  	if (DOMAIN_IO == 0 &&  	    (((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) ||  	       cpu_is_xsc3()) && pfn >= 0x100000 && -	       !((__pfn_to_phys(pfn) | size | addr) & ~SUPERSECTION_MASK)) { +	       !((paddr | size | addr) & ~SUPERSECTION_MASK)) {  		area->flags |= VM_ARM_SECTION_MAPPING;  		err = remap_area_supersections(addr, pfn, size, type); -	} else if (!((__pfn_to_phys(pfn) | size | addr) & ~PMD_MASK)) { +	} else if (!((paddr | size | addr) & ~PMD_MASK)) {  		area->flags |= VM_ARM_SECTION_MAPPING;  		err = remap_area_sections(addr, pfn, size, type);  	} else  #endif -		err = ioremap_page_range(addr, addr + size, __pfn_to_phys(pfn), +		err = ioremap_page_range(addr, addr + size, paddr,  					 __pgprot(type->prot_pte));  	if (err) { @@ -346,34 +403,28 @@ __arm_ioremap_exec(unsigned long phys_addr, size_t size, bool cached)  void __iounmap(volatile void __iomem *io_addr)  {  	void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr); -	struct vm_struct *vm; +	struct static_vm *svm; + +	/* If this is a static mapping, we must leave it alone */ +	svm = find_static_vm_vaddr(addr); +	if (svm) +		return; -	read_lock(&vmlist_lock); -	for (vm = vmlist; vm; vm = vm->next) { -		if (vm->addr > addr) -			break; -		if (!(vm->flags & VM_IOREMAP)) -			continue; -		/* If this is a static mapping we must leave it alone */ -		if ((vm->flags & VM_ARM_STATIC_MAPPING) && -		    (vm->addr <= addr) && (vm->addr + vm->size > addr)) { -			read_unlock(&vmlist_lock); -			return; -		}  #if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE) +	{ +		struct vm_struct *vm; + +		vm = find_vm_area(addr); +  		/*  		 * If this is a section based mapping we need to handle it  		 * specially as the VM subsystem does not know how to handle  		 * such a beast.  		 */ -		if ((vm->addr == addr) && -		    (vm->flags & VM_ARM_SECTION_MAPPING)) { +		if (vm && (vm->flags & VM_ARM_SECTION_MAPPING))  			unmap_area_sections((unsigned long)vm->addr, vm->size); -			break; -		} -#endif  	} -	read_unlock(&vmlist_lock); +#endif  	vunmap(addr);  } diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h index a8ee92da354..d5a4e9ad8f0 100644 --- a/arch/arm/mm/mm.h +++ b/arch/arm/mm/mm.h @@ -1,4 +1,6 @@  #ifdef CONFIG_MMU +#include <linux/list.h> +#include <linux/vmalloc.h>  /* the upper-most page table pointer */  extern pmd_t *top_pmd; @@ -65,6 +67,16 @@ extern void __flush_dcache_page(struct address_space *mapping, struct page *page  /* consistent regions used by dma_alloc_attrs() */  #define VM_ARM_DMA_CONSISTENT	0x20000000 + +struct static_vm { +	struct vm_struct vm; +	struct list_head list; +}; + +extern struct list_head static_vmlist; +extern struct static_vm *find_static_vm_vaddr(void *vaddr); +extern __init void add_static_vm_early(struct static_vm *svm); +  #endif  #ifdef CONFIG_ZONE_DMA diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index ce328c7f5c9..e95a996ab78 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -57,6 +57,9 @@ static unsigned int cachepolicy __initdata = CPOLICY_WRITEBACK;  static unsigned int ecc_mask __initdata = 0;  pgprot_t pgprot_user;  pgprot_t pgprot_kernel; +pgprot_t pgprot_hyp_device; +pgprot_t pgprot_s2; +pgprot_t pgprot_s2_device;  EXPORT_SYMBOL(pgprot_user);  EXPORT_SYMBOL(pgprot_kernel); @@ -66,34 +69,46 @@ struct cachepolicy {  	unsigned int	cr_mask;  	pmdval_t	pmd;  	pteval_t	pte; +	pteval_t	pte_s2;  }; +#ifdef CONFIG_ARM_LPAE +#define s2_policy(policy)	policy +#else +#define s2_policy(policy)	0 +#endif +  static struct cachepolicy cache_policies[] __initdata = {  	{  		.policy		= "uncached",  		.cr_mask	= CR_W|CR_C,  		.pmd		= PMD_SECT_UNCACHED,  		.pte		= L_PTE_MT_UNCACHED, +		.pte_s2		= s2_policy(L_PTE_S2_MT_UNCACHED),  	}, {  		.policy		= "buffered",  		.cr_mask	= CR_C,  		.pmd		= PMD_SECT_BUFFERED,  		.pte		= L_PTE_MT_BUFFERABLE, +		.pte_s2		= s2_policy(L_PTE_S2_MT_UNCACHED),  	}, {  		.policy		= "writethrough",  		.cr_mask	= 0,  		.pmd		= PMD_SECT_WT,  		.pte		= L_PTE_MT_WRITETHROUGH, +		.pte_s2		= s2_policy(L_PTE_S2_MT_WRITETHROUGH),  	}, {  		.policy		= "writeback",  		.cr_mask	= 0,  		.pmd		= PMD_SECT_WB,  		.pte		= L_PTE_MT_WRITEBACK, +		.pte_s2		= s2_policy(L_PTE_S2_MT_WRITEBACK),  	}, {  		.policy		= "writealloc",  		.cr_mask	= 0,  		.pmd		= PMD_SECT_WBWA,  		.pte		= L_PTE_MT_WRITEALLOC, +		.pte_s2		= s2_policy(L_PTE_S2_MT_WRITEBACK),  	}  }; @@ -310,6 +325,7 @@ static void __init build_mem_type_table(void)  	struct cachepolicy *cp;  	unsigned int cr = get_cr();  	pteval_t user_pgprot, kern_pgprot, vecs_pgprot; +	pteval_t hyp_device_pgprot, s2_pgprot, s2_device_pgprot;  	int cpu_arch = cpu_architecture();  	int i; @@ -421,6 +437,8 @@ static void __init build_mem_type_table(void)  	 */  	cp = &cache_policies[cachepolicy];  	vecs_pgprot = kern_pgprot = user_pgprot = cp->pte; +	s2_pgprot = cp->pte_s2; +	hyp_device_pgprot = s2_device_pgprot = mem_types[MT_DEVICE].prot_pte;  	/*  	 * ARMv6 and above have extended page tables. @@ -444,6 +462,7 @@ static void __init build_mem_type_table(void)  			user_pgprot |= L_PTE_SHARED;  			kern_pgprot |= L_PTE_SHARED;  			vecs_pgprot |= L_PTE_SHARED; +			s2_pgprot |= L_PTE_SHARED;  			mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_S;  			mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;  			mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S; @@ -498,6 +517,9 @@ static void __init build_mem_type_table(void)  	pgprot_user   = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | user_pgprot);  	pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG |  				 L_PTE_DIRTY | kern_pgprot); +	pgprot_s2  = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | s2_pgprot); +	pgprot_s2_device  = __pgprot(s2_device_pgprot); +	pgprot_hyp_device  = __pgprot(hyp_device_pgprot);  	mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;  	mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask; @@ -757,21 +779,24 @@ void __init iotable_init(struct map_desc *io_desc, int nr)  {  	struct map_desc *md;  	struct vm_struct *vm; +	struct static_vm *svm;  	if (!nr)  		return; -	vm = early_alloc_aligned(sizeof(*vm) * nr, __alignof__(*vm)); +	svm = early_alloc_aligned(sizeof(*svm) * nr, __alignof__(*svm));  	for (md = io_desc; nr; md++, nr--) {  		create_mapping(md); + +		vm = &svm->vm;  		vm->addr = (void *)(md->virtual & PAGE_MASK);  		vm->size = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK));  		vm->phys_addr = __pfn_to_phys(md->pfn);  		vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING;  		vm->flags |= VM_ARM_MTYPE(md->type);  		vm->caller = iotable_init; -		vm_area_add_early(vm++); +		add_static_vm_early(svm++);  	}  } @@ -779,13 +804,16 @@ void __init vm_reserve_area_early(unsigned long addr, unsigned long size,  				  void *caller)  {  	struct vm_struct *vm; +	struct static_vm *svm; + +	svm = early_alloc_aligned(sizeof(*svm), __alignof__(*svm)); -	vm = early_alloc_aligned(sizeof(*vm), __alignof__(*vm)); +	vm = &svm->vm;  	vm->addr = (void *)addr;  	vm->size = size;  	vm->flags = VM_IOREMAP | VM_ARM_EMPTY_MAPPING;  	vm->caller = caller; -	vm_area_add_early(vm); +	add_static_vm_early(svm);  }  #ifndef CONFIG_ARM_LPAE @@ -810,14 +838,13 @@ static void __init pmd_empty_section_gap(unsigned long addr)  static void __init fill_pmd_gaps(void)  { +	struct static_vm *svm;  	struct vm_struct *vm;  	unsigned long addr, next = 0;  	pmd_t *pmd; -	/* we're still single threaded hence no lock needed here */ -	for (vm = vmlist; vm; vm = vm->next) { -		if (!(vm->flags & (VM_ARM_STATIC_MAPPING | VM_ARM_EMPTY_MAPPING))) -			continue; +	list_for_each_entry(svm, &static_vmlist, list) { +		vm = &svm->vm;  		addr = (unsigned long)vm->addr;  		if (addr < next)  			continue; @@ -857,19 +884,12 @@ static void __init fill_pmd_gaps(void)  #if defined(CONFIG_PCI) && !defined(CONFIG_NEED_MACH_IO_H)  static void __init pci_reserve_io(void)  { -	struct vm_struct *vm; -	unsigned long addr; +	struct static_vm *svm; -	/* we're still single threaded hence no lock needed here */ -	for (vm = vmlist; vm; vm = vm->next) { -		if (!(vm->flags & VM_ARM_STATIC_MAPPING)) -			continue; -		addr = (unsigned long)vm->addr; -		addr &= ~(SZ_2M - 1); -		if (addr == PCI_IO_VIRT_BASE) -			return; +	svm = find_static_vm_vaddr((void *)PCI_IO_VIRT_BASE); +	if (svm) +		return; -	}  	vm_reserve_area_early(PCI_IO_VIRT_BASE, SZ_2M, pci_reserve_io);  }  #else diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S index eb6aa73bc8b..f9a0aa725ea 100644 --- a/arch/arm/mm/proc-macros.S +++ b/arch/arm/mm/proc-macros.S @@ -38,9 +38,14 @@  /*   * mmid - get context id from mm pointer (mm->context.id) + * note, this field is 64bit, so in big-endian the two words are swapped too.   */  	.macro	mmid, rd, rn +#ifdef __ARMEB__ +	ldr	\rd, [\rn, #MM_CONTEXT_ID + 4 ] +#else  	ldr	\rd, [\rn, #MM_CONTEXT_ID] +#endif  	.endm  /* diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S index 09c5233f4df..bcaaa8de932 100644 --- a/arch/arm/mm/proc-v6.S +++ b/arch/arm/mm/proc-v6.S @@ -101,7 +101,7 @@ ENTRY(cpu_v6_dcache_clean_area)  ENTRY(cpu_v6_switch_mm)  #ifdef CONFIG_MMU  	mov	r2, #0 -	ldr	r1, [r1, #MM_CONTEXT_ID]	@ get mm->context.id +	mmid	r1, r1				@ get mm->context.id  	ALT_SMP(orr	r0, r0, #TTB_FLAGS_SMP)  	ALT_UP(orr	r0, r0, #TTB_FLAGS_UP)  	mcr	p15, 0, r2, c7, c5, 6		@ flush BTAC/BTB diff --git a/arch/arm/mm/proc-v7-2level.S b/arch/arm/mm/proc-v7-2level.S index 6d98c13ab82..78f520bc0e9 100644 --- a/arch/arm/mm/proc-v7-2level.S +++ b/arch/arm/mm/proc-v7-2level.S @@ -40,7 +40,7 @@  ENTRY(cpu_v7_switch_mm)  #ifdef CONFIG_MMU  	mov	r2, #0 -	ldr	r1, [r1, #MM_CONTEXT_ID]	@ get mm->context.id +	mmid	r1, r1				@ get mm->context.id  	ALT_SMP(orr	r0, r0, #TTB_FLAGS_SMP)  	ALT_UP(orr	r0, r0, #TTB_FLAGS_UP)  #ifdef CONFIG_ARM_ERRATA_430973 diff --git a/arch/arm/mm/proc-v7-3level.S b/arch/arm/mm/proc-v7-3level.S index 7b56386f949..50bf1dafc9e 100644 --- a/arch/arm/mm/proc-v7-3level.S +++ b/arch/arm/mm/proc-v7-3level.S @@ -47,7 +47,7 @@   */  ENTRY(cpu_v7_switch_mm)  #ifdef CONFIG_MMU -	ldr	r1, [r1, #MM_CONTEXT_ID]	@ get mm->context.id +	mmid	r1, r1				@ get mm->context.id  	and	r3, r1, #0xff  	mov	r3, r3, lsl #(48 - 32)		@ ASID  	mcrr	p15, 0, r0, r3, c2		@ set TTB 0 diff --git a/arch/arm/mm/vmregion.c b/arch/arm/mm/vmregion.c deleted file mode 100644 index a631016e1f8..00000000000 --- a/arch/arm/mm/vmregion.c +++ /dev/null @@ -1,205 +0,0 @@ -#include <linux/fs.h> -#include <linux/spinlock.h> -#include <linux/list.h> -#include <linux/proc_fs.h> -#include <linux/seq_file.h> -#include <linux/slab.h> - -#include "vmregion.h" - -/* - * VM region handling support. - * - * This should become something generic, handling VM region allocations for - * vmalloc and similar (ioremap, module space, etc). - * - * I envisage vmalloc()'s supporting vm_struct becoming: - * - *  struct vm_struct { - *    struct vmregion	region; - *    unsigned long	flags; - *    struct page	**pages; - *    unsigned int	nr_pages; - *    unsigned long	phys_addr; - *  }; - * - * get_vm_area() would then call vmregion_alloc with an appropriate - * struct vmregion head (eg): - * - *  struct vmregion vmalloc_head = { - *	.vm_list	= LIST_HEAD_INIT(vmalloc_head.vm_list), - *	.vm_start	= VMALLOC_START, - *	.vm_end		= VMALLOC_END, - *  }; - * - * However, vmalloc_head.vm_start is variable (typically, it is dependent on - * the amount of RAM found at boot time.)  I would imagine that get_vm_area() - * would have to initialise this each time prior to calling vmregion_alloc(). - */ - -struct arm_vmregion * -arm_vmregion_alloc(struct arm_vmregion_head *head, size_t align, -		   size_t size, gfp_t gfp, const void *caller) -{ -	unsigned long start = head->vm_start, addr = head->vm_end; -	unsigned long flags; -	struct arm_vmregion *c, *new; - -	if (head->vm_end - head->vm_start < size) { -		printk(KERN_WARNING "%s: allocation too big (requested %#x)\n", -			__func__, size); -		goto out; -	} - -	new = kmalloc(sizeof(struct arm_vmregion), gfp); -	if (!new) -		goto out; - -	new->caller = caller; - -	spin_lock_irqsave(&head->vm_lock, flags); - -	addr = rounddown(addr - size, align); -	list_for_each_entry_reverse(c, &head->vm_list, vm_list) { -		if (addr >= c->vm_end) -			goto found; -		addr = rounddown(c->vm_start - size, align); -		if (addr < start) -			goto nospc; -	} - - found: -	/* -	 * Insert this entry after the one we found. -	 */ -	list_add(&new->vm_list, &c->vm_list); -	new->vm_start = addr; -	new->vm_end = addr + size; -	new->vm_active = 1; - -	spin_unlock_irqrestore(&head->vm_lock, flags); -	return new; - - nospc: -	spin_unlock_irqrestore(&head->vm_lock, flags); -	kfree(new); - out: -	return NULL; -} - -static struct arm_vmregion *__arm_vmregion_find(struct arm_vmregion_head *head, unsigned long addr) -{ -	struct arm_vmregion *c; - -	list_for_each_entry(c, &head->vm_list, vm_list) { -		if (c->vm_active && c->vm_start == addr) -			goto out; -	} -	c = NULL; - out: -	return c; -} - -struct arm_vmregion *arm_vmregion_find(struct arm_vmregion_head *head, unsigned long addr) -{ -	struct arm_vmregion *c; -	unsigned long flags; - -	spin_lock_irqsave(&head->vm_lock, flags); -	c = __arm_vmregion_find(head, addr); -	spin_unlock_irqrestore(&head->vm_lock, flags); -	return c; -} - -struct arm_vmregion *arm_vmregion_find_remove(struct arm_vmregion_head *head, unsigned long addr) -{ -	struct arm_vmregion *c; -	unsigned long flags; - -	spin_lock_irqsave(&head->vm_lock, flags); -	c = __arm_vmregion_find(head, addr); -	if (c) -		c->vm_active = 0; -	spin_unlock_irqrestore(&head->vm_lock, flags); -	return c; -} - -void arm_vmregion_free(struct arm_vmregion_head *head, struct arm_vmregion *c) -{ -	unsigned long flags; - -	spin_lock_irqsave(&head->vm_lock, flags); -	list_del(&c->vm_list); -	spin_unlock_irqrestore(&head->vm_lock, flags); - -	kfree(c); -} - -#ifdef CONFIG_PROC_FS -static int arm_vmregion_show(struct seq_file *m, void *p) -{ -	struct arm_vmregion *c = list_entry(p, struct arm_vmregion, vm_list); - -	seq_printf(m, "0x%08lx-0x%08lx %7lu", c->vm_start, c->vm_end, -		c->vm_end - c->vm_start); -	if (c->caller) -		seq_printf(m, " %pS", (void *)c->caller); -	seq_putc(m, '\n'); -	return 0; -} - -static void *arm_vmregion_start(struct seq_file *m, loff_t *pos) -{ -	struct arm_vmregion_head *h = m->private; -	spin_lock_irq(&h->vm_lock); -	return seq_list_start(&h->vm_list, *pos); -} - -static void *arm_vmregion_next(struct seq_file *m, void *p, loff_t *pos) -{ -	struct arm_vmregion_head *h = m->private; -	return seq_list_next(p, &h->vm_list, pos); -} - -static void arm_vmregion_stop(struct seq_file *m, void *p) -{ -	struct arm_vmregion_head *h = m->private; -	spin_unlock_irq(&h->vm_lock); -} - -static const struct seq_operations arm_vmregion_ops = { -	.start	= arm_vmregion_start, -	.stop	= arm_vmregion_stop, -	.next	= arm_vmregion_next, -	.show	= arm_vmregion_show, -}; - -static int arm_vmregion_open(struct inode *inode, struct file *file) -{ -	struct arm_vmregion_head *h = PDE(inode)->data; -	int ret = seq_open(file, &arm_vmregion_ops); -	if (!ret) { -		struct seq_file *m = file->private_data; -		m->private = h; -	} -	return ret; -} - -static const struct file_operations arm_vmregion_fops = { -	.open	= arm_vmregion_open, -	.read	= seq_read, -	.llseek	= seq_lseek, -	.release = seq_release, -}; - -int arm_vmregion_create_proc(const char *path, struct arm_vmregion_head *h) -{ -	proc_create_data(path, S_IRUSR, NULL, &arm_vmregion_fops, h); -	return 0; -} -#else -int arm_vmregion_create_proc(const char *path, struct arm_vmregion_head *h) -{ -	return 0; -} -#endif diff --git a/arch/arm/mm/vmregion.h b/arch/arm/mm/vmregion.h deleted file mode 100644 index 0f5a5f2a2c7..00000000000 --- a/arch/arm/mm/vmregion.h +++ /dev/null @@ -1,31 +0,0 @@ -#ifndef VMREGION_H -#define VMREGION_H - -#include <linux/spinlock.h> -#include <linux/list.h> - -struct page; - -struct arm_vmregion_head { -	spinlock_t		vm_lock; -	struct list_head	vm_list; -	unsigned long		vm_start; -	unsigned long		vm_end; -}; - -struct arm_vmregion { -	struct list_head	vm_list; -	unsigned long		vm_start; -	unsigned long		vm_end; -	int			vm_active; -	const void		*caller; -}; - -struct arm_vmregion *arm_vmregion_alloc(struct arm_vmregion_head *, size_t, size_t, gfp_t, const void *); -struct arm_vmregion *arm_vmregion_find(struct arm_vmregion_head *, unsigned long); -struct arm_vmregion *arm_vmregion_find_remove(struct arm_vmregion_head *, unsigned long); -void arm_vmregion_free(struct arm_vmregion_head *, struct arm_vmregion *); - -int arm_vmregion_create_proc(const char *, struct arm_vmregion_head *); - -#endif diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c index a34f1e21411..6828ef6ce80 100644 --- a/arch/arm/net/bpf_jit_32.c +++ b/arch/arm/net/bpf_jit_32.c @@ -341,10 +341,17 @@ static void emit_load_be16(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx)  static inline void emit_swap16(u8 r_dst, u8 r_src, struct jit_ctx *ctx)  { -	emit(ARM_LSL_R(ARM_R1, r_src, 8), ctx); -	emit(ARM_ORR_S(r_dst, ARM_R1, r_src, SRTYPE_LSL, 8), ctx); -	emit(ARM_LSL_I(r_dst, r_dst, 8), ctx); -	emit(ARM_LSL_R(r_dst, r_dst, 8), ctx); +	/* r_dst = (r_src << 8) | (r_src >> 8) */ +	emit(ARM_LSL_I(ARM_R1, r_src, 8), ctx); +	emit(ARM_ORR_S(r_dst, ARM_R1, r_src, SRTYPE_LSR, 8), ctx); + +	/* +	 * we need to mask out the bits set in r_dst[23:16] due to +	 * the first shift instruction. +	 * +	 * note that 0x8ff is the encoded immediate 0x00ff0000. +	 */ +	emit(ARM_BIC_I(r_dst, r_dst, 0x8ff), ctx);  }  #else  /* ARMv6+ */ diff --git a/arch/arm/plat-omap/dmtimer.c b/arch/arm/plat-omap/dmtimer.c index 7b433f3bddc..a0daa2fb5de 100644 --- a/arch/arm/plat-omap/dmtimer.c +++ b/arch/arm/plat-omap/dmtimer.c @@ -808,11 +808,9 @@ static int omap_dm_timer_probe(struct platform_device *pdev)  		return  -ENOMEM;  	} -	timer->io_base = devm_request_and_ioremap(dev, mem); -	if (!timer->io_base) { -		dev_err(dev, "%s: region already claimed.\n", __func__); -		return -ENOMEM; -	} +	timer->io_base = devm_ioremap_resource(dev, mem); +	if (IS_ERR(timer->io_base)) +		return PTR_ERR(timer->io_base);  	if (dev->of_node) {  		if (of_find_property(dev->of_node, "ti,timer-alwon", NULL)) diff --git a/arch/arm/plat-samsung/adc.c b/arch/arm/plat-samsung/adc.c index 2d676ab50f7..ca07cb1b155 100644 --- a/arch/arm/plat-samsung/adc.c +++ b/arch/arm/plat-samsung/adc.c @@ -386,11 +386,9 @@ static int s3c_adc_probe(struct platform_device *pdev)  		return -ENXIO;  	} -	adc->regs = devm_request_and_ioremap(dev, regs); -	if (!adc->regs) { -		dev_err(dev, "failed to map registers\n"); -		return -ENXIO; -	} +	adc->regs = devm_ioremap_resource(dev, regs); +	if (IS_ERR(adc->regs)) +		return PTR_ERR(adc->regs);  	ret = regulator_enable(adc->vdd);  	if (ret) diff --git a/arch/arm/plat-samsung/dma-ops.c b/arch/arm/plat-samsung/dma-ops.c index d088afa034e..71d58ddea9c 100644 --- a/arch/arm/plat-samsung/dma-ops.c +++ b/arch/arm/plat-samsung/dma-ops.c @@ -19,7 +19,8 @@  #include <mach/dma.h>  static unsigned samsung_dmadev_request(enum dma_ch dma_ch, -				struct samsung_dma_req *param) +				struct samsung_dma_req *param, +				struct device *dev, char *ch_name)  {  	dma_cap_mask_t mask;  	void *filter_param; @@ -33,7 +34,12 @@ static unsigned samsung_dmadev_request(enum dma_ch dma_ch,  	 */  	filter_param = (dma_ch == DMACH_DT_PROP) ?  		(void *)param->dt_dmach_prop : (void *)dma_ch; -	return (unsigned)dma_request_channel(mask, pl330_filter, filter_param); + +	if (dev->of_node) +		return (unsigned)dma_request_slave_channel(dev, ch_name); +	else +		return (unsigned)dma_request_channel(mask, pl330_filter, +							filter_param);  }  static int samsung_dmadev_release(unsigned ch, void *param) diff --git a/arch/arm/plat-samsung/include/plat/adc.h b/arch/arm/plat-samsung/include/plat/adc.h index b258a08de59..2fc89315553 100644 --- a/arch/arm/plat-samsung/include/plat/adc.h +++ b/arch/arm/plat-samsung/include/plat/adc.h @@ -15,6 +15,7 @@  #define __ASM_PLAT_ADC_H __FILE__  struct s3c_adc_client; +struct platform_device;  extern int s3c_adc_start(struct s3c_adc_client *client,  			 unsigned int channel, unsigned int nr_samples); diff --git a/arch/arm/plat-samsung/include/plat/dma-ops.h b/arch/arm/plat-samsung/include/plat/dma-ops.h index f5144cdd300..114178268b7 100644 --- a/arch/arm/plat-samsung/include/plat/dma-ops.h +++ b/arch/arm/plat-samsung/include/plat/dma-ops.h @@ -39,7 +39,8 @@ struct samsung_dma_config {  };  struct samsung_dma_ops { -	unsigned (*request)(enum dma_ch ch, struct samsung_dma_req *param); +	unsigned (*request)(enum dma_ch ch, struct samsung_dma_req *param, +				struct device *dev, char *ch_name);  	int (*release)(unsigned ch, void *param);  	int (*config)(unsigned ch, struct samsung_dma_config *param);  	int (*prepare)(unsigned ch, struct samsung_dma_prep *param); diff --git a/arch/arm/plat-samsung/s3c-dma-ops.c b/arch/arm/plat-samsung/s3c-dma-ops.c index f99448c48d3..0cc40aea3f5 100644 --- a/arch/arm/plat-samsung/s3c-dma-ops.c +++ b/arch/arm/plat-samsung/s3c-dma-ops.c @@ -36,7 +36,8 @@ static void s3c_dma_cb(struct s3c2410_dma_chan *channel, void *param,  }  static unsigned s3c_dma_request(enum dma_ch dma_ch, -					struct samsung_dma_req *param) +				struct samsung_dma_req *param, +				struct device *dev, char *ch_name)  {  	struct cb_data *data;  |