Merge branch 'android-3.10' into goldfish-3.10
5bf7393 net: tcp: check for SOCK_DEAD again in tcp_nuke_addr
5ab7fa9 BACKPORT: mm: /proc/pid/smaps:: show proportional swap share of the mapping
214c1ad Merge branch 'android-3.10-zram' into android-3.10
2203f3f UPSTREAM: zram: fix possible use after free in zcomp_create()
...
13e0ef9 dm verity: move dm-verity.c to dm-verity-target.c
e7f44b0 dm verity: separate function for parsing opt args
bbdba4c dm verity: clean up duplicate hashing code
82cdd95 dm verity: port upstream changes to 3.10
Change-Id: I8b9885ea65639292787211e4f8599c29b1c10253
diff --git a/Documentation/DMA-API-HOWTO.txt b/Documentation/DMA-API-HOWTO.txt
index 14129f1..5e98303 100644
--- a/Documentation/DMA-API-HOWTO.txt
+++ b/Documentation/DMA-API-HOWTO.txt
@@ -101,14 +101,23 @@
because this shows that you did think about these issues wrt. your
device.
-The query is performed via a call to dma_set_mask():
+The query is performed via a call to dma_set_mask_and_coherent():
- int dma_set_mask(struct device *dev, u64 mask);
+ int dma_set_mask_and_coherent(struct device *dev, u64 mask);
-The query for consistent allocations is performed via a call to
-dma_set_coherent_mask():
+which will query the mask for both streaming and coherent APIs together.
+If you have some special requirements, then the following two separate
+queries can be used instead:
- int dma_set_coherent_mask(struct device *dev, u64 mask);
+ The query for streaming mappings is performed via a call to
+ dma_set_mask():
+
+ int dma_set_mask(struct device *dev, u64 mask);
+
+ The query for consistent allocations is performed via a call
+ to dma_set_coherent_mask():
+
+ int dma_set_coherent_mask(struct device *dev, u64 mask);
Here, dev is a pointer to the device struct of your device, and mask
is a bit mask describing which bits of an address your device
@@ -137,7 +146,7 @@
The standard 32-bit addressing device would do something like this:
- if (dma_set_mask(dev, DMA_BIT_MASK(32))) {
+ if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32))) {
printk(KERN_WARNING
"mydev: No suitable DMA available.\n");
goto ignore_this_device;
@@ -171,22 +180,20 @@
int using_dac, consistent_using_dac;
- if (!dma_set_mask(dev, DMA_BIT_MASK(64))) {
+ if (!dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64))) {
using_dac = 1;
consistent_using_dac = 1;
- dma_set_coherent_mask(dev, DMA_BIT_MASK(64));
- } else if (!dma_set_mask(dev, DMA_BIT_MASK(32))) {
+ } else if (!dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32))) {
using_dac = 0;
consistent_using_dac = 0;
- dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
} else {
printk(KERN_WARNING
"mydev: No suitable DMA available.\n");
goto ignore_this_device;
}
-dma_set_coherent_mask() will always be able to set the same or a
-smaller mask as dma_set_mask(). However for the rare case that a
+The coherent coherent mask will always be able to set the same or a
+smaller mask as the streaming mask. However for the rare case that a
device driver only uses consistent allocations, one would have to
check the return value from dma_set_coherent_mask().
@@ -199,9 +206,9 @@
goto ignore_this_device;
}
-When dma_set_mask() is successful, and returns zero, the kernel saves
-away this mask you have provided. The kernel will use this
-information later when you make DMA mappings.
+When dma_set_mask() or dma_set_mask_and_coherent() is successful, and
+returns zero, the kernel saves away this mask you have provided. The
+kernel will use this information later when you make DMA mappings.
There is a case which we are aware of at this time, which is worth
mentioning in this documentation. If your device supports multiple
diff --git a/Documentation/DMA-API.txt b/Documentation/DMA-API.txt
index 78a6c56..e865279 100644
--- a/Documentation/DMA-API.txt
+++ b/Documentation/DMA-API.txt
@@ -142,6 +142,14 @@
driver writers.
int
+dma_set_mask_and_coherent(struct device *dev, u64 mask)
+
+Checks to see if the mask is possible and updates the device
+streaming and coherent DMA mask parameters if it is.
+
+Returns: 0 if successful and a negative error if not.
+
+int
dma_set_mask(struct device *dev, u64 mask)
Checks to see if the mask is possible and updates the device
diff --git a/Documentation/arm64/booting.txt b/Documentation/arm64/booting.txt
index 9c4d388..5273c4d 100644
--- a/Documentation/arm64/booting.txt
+++ b/Documentation/arm64/booting.txt
@@ -68,13 +68,23 @@
Requirement: MANDATORY
-The decompressed kernel image contains a 32-byte header as follows:
+The decompressed kernel image contains a 64-byte header as follows:
- u32 magic = 0x14000008; /* branch to stext, little-endian */
- u32 res0 = 0; /* reserved */
+ u32 code0; /* Executable code */
+ u32 code1; /* Executable code */
u64 text_offset; /* Image load offset */
+ u64 res0 = 0; /* reserved */
u64 res1 = 0; /* reserved */
u64 res2 = 0; /* reserved */
+ u64 res3 = 0; /* reserved */
+ u64 res4 = 0; /* reserved */
+ u32 magic = 0x644d5241; /* Magic number, little endian, "ARM\x64" */
+ u32 res5 = 0; /* reserved */
+
+
+Header notes:
+
+- code0/code1 are responsible for branching to stext.
The image must be placed at the specified offset (currently 0x80000)
from the start of the system RAM and called there. The start of the
diff --git a/Documentation/arm64/memory.txt b/Documentation/arm64/memory.txt
index 5f583af..c6941f8 100644
--- a/Documentation/arm64/memory.txt
+++ b/Documentation/arm64/memory.txt
@@ -21,7 +21,7 @@
TTBR0.
-AArch64 Linux memory layout:
+AArch64 Linux memory layout with 4KB pages:
Start End Size Use
-----------------------------------------------------------------------
@@ -35,17 +35,46 @@
ffffffbe00000000 ffffffbffbbfffff ~8GB [guard, future vmmemap]
-ffffffbffbc00000 ffffffbffbdfffff 2MB earlyprintk device
+ffffffbffa000000 ffffffbffaffffff 16MB PCI I/O space
-ffffffbffbe00000 ffffffbffbe0ffff 64KB PCI I/O space
+ffffffbffb000000 ffffffbffbbfffff 12MB [guard]
-ffffffbbffff0000 ffffffbcffffffff ~2MB [guard]
+ffffffbffbc00000 ffffffbffbdfffff 2MB fixed mappings
+
+ffffffbffbe00000 ffffffbffbffffff 2MB [guard]
ffffffbffc000000 ffffffbfffffffff 64MB modules
ffffffc000000000 ffffffffffffffff 256GB kernel logical memory map
+AArch64 Linux memory layout with 64KB pages:
+
+Start End Size Use
+-----------------------------------------------------------------------
+0000000000000000 000003ffffffffff 4TB user
+
+fffffc0000000000 fffffdfbfffeffff ~2TB vmalloc
+
+fffffdfbffff0000 fffffdfbffffffff 64KB [guard page]
+
+fffffdfc00000000 fffffdfdffffffff 8GB vmemmap
+
+fffffdfe00000000 fffffdfffbbfffff ~8GB [guard, future vmmemap]
+
+fffffdfffa000000 fffffdfffaffffff 16MB PCI I/O space
+
+fffffdfffb000000 fffffdfffbbfffff 12MB [guard]
+
+fffffdfffbc00000 fffffdfffbdfffff 2MB fixed mappings
+
+fffffdfffbe00000 fffffdfffbffffff 2MB [guard]
+
+fffffdfffc000000 fffffdffffffffff 64MB modules
+
+fffffe0000000000 ffffffffffffffff 2TB kernel logical memory map
+
+
Translation table lookup with 4KB pages:
+--------+--------+--------+--------+--------+--------+--------+--------+
diff --git a/arch/arc/mm/init.c b/arch/arc/mm/init.c
index 4a17736..7991e08 100644
--- a/arch/arc/mm/init.c
+++ b/arch/arc/mm/init.c
@@ -157,9 +157,8 @@
#endif
#ifdef CONFIG_OF_FLATTREE
-void __init early_init_dt_setup_initrd_arch(unsigned long start,
- unsigned long end)
+void __init early_init_dt_setup_initrd_arch(u64 start, u64 end)
{
- pr_err("%s(%lx, %lx)\n", __func__, start, end);
+ pr_err("%s(%llx, %llx)\n", __func__, start, end);
}
#endif /* CONFIG_OF_FLATTREE */
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 56ae8fa..bbb5fce 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -964,6 +964,8 @@
source "arch/arm/mach-integrator/Kconfig"
+source "arch/arm/mach-lionhead/Kconfig"
+
source "arch/arm/mach-iop32x/Kconfig"
source "arch/arm/mach-iop33x/Kconfig"
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index 314c7be..27ae207 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -21,6 +21,7 @@
OBJCOPYFLAGS :=-O binary -R .comment -S
GZFLAGS :=-9
#KBUILD_CFLAGS +=-pipe
+KBUILD_CFLAGS += -fno-pic
# Never generate .eh_frame
KBUILD_CFLAGS += $(call cc-option,-fno-dwarf2-cfi-asm)
@@ -155,6 +156,7 @@
machine-$(CONFIG_ARCH_IXP4XX) += ixp4xx
machine-$(CONFIG_ARCH_KIRKWOOD) += kirkwood
machine-$(CONFIG_ARCH_KS8695) += ks8695
+machine-$(CONFIG_ARCH_LIONHEAD) += lionhead
machine-$(CONFIG_ARCH_LPC32XX) += lpc32xx
machine-$(CONFIG_ARCH_MMP) += mmp
machine-$(CONFIG_ARCH_MSM) += msm
diff --git a/arch/arm/boot/dts/Makefile b/arch/arm/boot/dts/Makefile
index b83cc50..f48b9a1 100644
--- a/arch/arm/boot/dts/Makefile
+++ b/arch/arm/boot/dts/Makefile
@@ -62,6 +62,7 @@
ecx-2000.dtb
dtb-$(CONFIG_ARCH_INTEGRATOR) += integratorap.dtb \
integratorcp.dtb
+dtb-$(CONFIG_ARCH_LIONHEAD) += lionhead-a15.dtb
dtb-$(CONFIG_ARCH_LPC32XX) += ea3250.dtb phy3250.dtb
dtb-$(CONFIG_ARCH_KIRKWOOD) += kirkwood-cloudbox.dtb \
kirkwood-dns320.dtb \
diff --git a/arch/arm/boot/dts/lionhead-a15.dts b/arch/arm/boot/dts/lionhead-a15.dts
new file mode 100644
index 0000000..6174393
--- /dev/null
+++ b/arch/arm/boot/dts/lionhead-a15.dts
@@ -0,0 +1,271 @@
+/*
+ * ARM Cortex-A15 "lionhead" virtual platform
+ *
+ * based on CoreTile Express A15x2 (version with Test Chip 1)
+ * Cortex-A15 MPCore (V2P-CA15)
+ */
+
+/dts-v1/;
+
+/ {
+ model = "lionhead-a15";
+ arm,vexpress,site = <0xf>;
+ compatible = "generic,lionhead,a15", "generic,lionhead";
+ interrupt-parent = <&gic>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ chosen { };
+
+ aliases {
+ serial0 = &v2m_serial0;
+ serial1 = &v2m_serial1;
+ serial2 = &v2m_serial2;
+ serial3 = &v2m_serial3;
+ i2c0 = &v2m_i2c_pcie;
+ };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu@0 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a15";
+ reg = <0>;
+ };
+
+ cpu@1 {
+ device_type = "cpu";
+ compatible = "arm,cortex-a15";
+ reg = <1>;
+ };
+ };
+
+ memory@80000000 {
+ device_type = "memory";
+ reg = <0 0x80000000 0 0x40000000>;
+ };
+
+ memory-controller@2b0a0000 {
+ compatible = "arm,pl341", "arm,primecell";
+ reg = <0 0x2b0a0000 0 0x1000>;
+ clocks = <&oscclk7>;
+ clock-names = "apb_pclk";
+ };
+
+ wdt@2b060000 {
+ compatible = "arm,sp805", "arm,primecell";
+ status = "disabled";
+ reg = <0 0x2b060000 0 0x1000>;
+ interrupts = <0 98 4>;
+ clocks = <&oscclk7>;
+ clock-names = "apb_pclk";
+ };
+
+ gic: interrupt-controller@2c001000 {
+ compatible = "arm,cortex-a15-gic", "arm,cortex-a9-gic";
+ #interrupt-cells = <3>;
+ #address-cells = <0>;
+ interrupt-controller;
+ reg = <0 0x2c001000 0 0x1000>,
+ <0 0x2c002000 0 0x1000>,
+ <0 0x2c004000 0 0x2000>,
+ <0 0x2c006000 0 0x2000>;
+ interrupts = <1 9 0xf04>;
+ };
+
+ memory-controller@7ffd0000 {
+ compatible = "arm,pl354", "arm,primecell";
+ reg = <0 0x7ffd0000 0 0x1000>;
+ interrupts = <0 86 4>,
+ <0 87 4>;
+ clocks = <&oscclk7>;
+ clock-names = "apb_pclk";
+ };
+
+ dma@7ffb0000 {
+ compatible = "arm,pl330", "arm,primecell";
+ reg = <0 0x7ffb0000 0 0x1000>;
+ interrupts = <0 92 4>,
+ <0 88 4>,
+ <0 89 4>,
+ <0 90 4>,
+ <0 91 4>;
+ clocks = <&oscclk7>;
+ clock-names = "apb_pclk";
+ };
+
+ timer {
+ compatible = "arm,armv7-timer";
+ interrupts = <1 13 0xf08>,
+ <1 14 0xf08>,
+ <1 11 0xf08>,
+ <1 10 0xf08>;
+ };
+
+ pmu {
+ compatible = "arm,cortex-a15-pmu";
+ interrupts = <0 68 4>,
+ <0 69 4>;
+ };
+
+ dcc {
+ compatible = "arm,vexpress,config-bus";
+ arm,vexpress,config-bridge = <&v2m_sysreg>;
+
+ osc@0 {
+ /* CPU PLL reference clock */
+ compatible = "arm,vexpress-osc";
+ arm,vexpress-sysreg,func = <1 0>;
+ freq-range = <50000000 60000000>;
+ #clock-cells = <0>;
+ clock-output-names = "oscclk0";
+ };
+
+ osc@4 {
+ /* Multiplexed AXI master clock */
+ compatible = "arm,vexpress-osc";
+ arm,vexpress-sysreg,func = <1 4>;
+ freq-range = <20000000 40000000>;
+ #clock-cells = <0>;
+ clock-output-names = "oscclk4";
+ };
+
+ oscclk5: osc@5 {
+ /* HDLCD PLL reference clock */
+ compatible = "arm,vexpress-osc";
+ arm,vexpress-sysreg,func = <1 5>;
+ freq-range = <23750000 165000000>;
+ #clock-cells = <0>;
+ clock-output-names = "oscclk5";
+ };
+
+ smbclk: osc@6 {
+ /* SMB clock */
+ compatible = "arm,vexpress-osc";
+ arm,vexpress-sysreg,func = <1 6>;
+ freq-range = <20000000 50000000>;
+ #clock-cells = <0>;
+ clock-output-names = "oscclk6";
+ };
+
+ oscclk7: osc@7 {
+ /* SYS PLL reference clock */
+ compatible = "arm,vexpress-osc";
+ arm,vexpress-sysreg,func = <1 7>;
+ freq-range = <20000000 60000000>;
+ #clock-cells = <0>;
+ clock-output-names = "oscclk7";
+ };
+
+ osc@8 {
+ /* DDR2 PLL reference clock */
+ compatible = "arm,vexpress-osc";
+ arm,vexpress-sysreg,func = <1 8>;
+ freq-range = <40000000 40000000>;
+ #clock-cells = <0>;
+ clock-output-names = "oscclk8";
+ };
+
+ volt@0 {
+ /* CPU core voltage */
+ compatible = "arm,vexpress-volt";
+ arm,vexpress-sysreg,func = <2 0>;
+ regulator-name = "Cores";
+ regulator-min-microvolt = <800000>;
+ regulator-max-microvolt = <1050000>;
+ regulator-always-on;
+ label = "Cores";
+ };
+
+ amp@0 {
+ /* Total current for the two cores */
+ compatible = "arm,vexpress-amp";
+ arm,vexpress-sysreg,func = <3 0>;
+ label = "Cores";
+ };
+
+ temp@0 {
+ /* DCC internal temperature */
+ compatible = "arm,vexpress-temp";
+ arm,vexpress-sysreg,func = <4 0>;
+ label = "DCC";
+ };
+
+ power@0 {
+ /* Total power */
+ compatible = "arm,vexpress-power";
+ arm,vexpress-sysreg,func = <12 0>;
+ label = "Cores";
+ };
+
+ energy@0 {
+ /* Total energy */
+ compatible = "arm,vexpress-energy";
+ arm,vexpress-sysreg,func = <13 0>;
+ label = "Cores";
+ };
+ };
+
+ smb {
+ compatible = "simple-bus";
+
+ #address-cells = <2>;
+ #size-cells = <1>;
+ ranges = <0 0 0 0x08000000 0x04000000>,
+ <1 0 0 0x14000000 0x04000000>,
+ <2 0 0 0x18000000 0x04000000>,
+ <3 0 0 0x1c000000 0x04000000>,
+ <4 0 0 0x0c000000 0x04000000>,
+ <5 0 0 0x10000000 0x04000000>;
+
+ #interrupt-cells = <1>;
+ interrupt-map-mask = <0 0 63>;
+ interrupt-map = <0 0 0 &gic 0 0 4>,
+ <0 0 1 &gic 0 1 4>,
+ <0 0 2 &gic 0 2 4>,
+ <0 0 3 &gic 0 3 4>,
+ <0 0 4 &gic 0 4 4>,
+ <0 0 5 &gic 0 5 4>,
+ <0 0 6 &gic 0 6 4>,
+ <0 0 7 &gic 0 7 4>,
+ <0 0 8 &gic 0 8 4>,
+ <0 0 9 &gic 0 9 4>,
+ <0 0 10 &gic 0 10 4>,
+ <0 0 11 &gic 0 11 4>,
+ <0 0 12 &gic 0 12 4>,
+ <0 0 13 &gic 0 13 4>,
+ <0 0 14 &gic 0 14 4>,
+ <0 0 15 &gic 0 15 4>,
+ <0 0 16 &gic 0 16 4>,
+ <0 0 17 &gic 0 17 4>,
+ <0 0 18 &gic 0 18 4>,
+ <0 0 19 &gic 0 19 4>,
+ <0 0 20 &gic 0 20 4>,
+ <0 0 21 &gic 0 21 4>,
+ <0 0 22 &gic 0 22 4>,
+ <0 0 23 &gic 0 23 4>,
+ <0 0 24 &gic 0 24 4>,
+ <0 0 25 &gic 0 25 4>,
+ <0 0 26 &gic 0 26 4>,
+ <0 0 27 &gic 0 27 4>,
+ <0 0 28 &gic 0 28 4>,
+ <0 0 29 &gic 0 29 4>,
+ <0 0 30 &gic 0 30 4>,
+ <0 0 31 &gic 0 31 4>,
+ <0 0 32 &gic 0 32 4>,
+ <0 0 33 &gic 0 33 4>,
+ <0 0 34 &gic 0 34 4>,
+ <0 0 35 &gic 0 35 4>,
+ <0 0 36 &gic 0 36 4>,
+ <0 0 37 &gic 0 37 4>,
+ <0 0 38 &gic 0 38 4>,
+ <0 0 39 &gic 0 39 4>,
+ <0 0 40 &gic 0 40 4>,
+ <0 0 41 &gic 0 41 4>,
+ <0 0 42 &gic 0 42 4>;
+
+ /include/ "lionhead-motherboard.dtsi"
+ };
+};
diff --git a/arch/arm/boot/dts/lionhead-motherboard.dtsi b/arch/arm/boot/dts/lionhead-motherboard.dtsi
new file mode 100644
index 0000000..a1d1730
--- /dev/null
+++ b/arch/arm/boot/dts/lionhead-motherboard.dtsi
@@ -0,0 +1,291 @@
+/*
+ * ARM "lionhead" virtual platform motherboard
+ *
+ * based on ARM Ltd. Versatile Express
+ *
+ * Motherboard Express uATX
+ * V2M-P1
+ *
+ * HBI-0190D
+ *
+ * RS1 memory map ("ARM Cortex-A Series memory map" in the board's
+ * Technical Reference Manual)
+ */
+
+ motherboard {
+ model = "lionhead";
+ arm,vexpress,site = <0>;
+ compatible = "generic,lionhead,motherboard", "simple-bus";
+ #address-cells = <2>; /* SMB chipselect number and offset */
+ #size-cells = <1>;
+ #interrupt-cells = <1>;
+ ranges;
+
+ flash@0,00000000 {
+ compatible = "arm,vexpress-flash", "cfi-flash";
+ reg = <0 0x00000000 0x04000000>,
+ <4 0x00000000 0x04000000>;
+ bank-width = <4>;
+ };
+
+ psram@1,00000000 {
+ compatible = "arm,vexpress-psram", "mtd-ram";
+ reg = <1 0x00000000 0x02000000>;
+ bank-width = <4>;
+ };
+
+ ethernet@2,02000000 {
+ compatible = "smsc,lan9118", "smsc,lan9115";
+ reg = <2 0x02000000 0x10000>;
+ interrupts = <15>;
+ phy-mode = "mii";
+ reg-io-width = <4>;
+ smsc,irq-active-high;
+ smsc,irq-push-pull;
+ vdd33a-supply = <&v2m_fixed_3v3>;
+ vddvario-supply = <&v2m_fixed_3v3>;
+ };
+
+ usb@2,03000000 {
+ compatible = "nxp,usb-isp1761";
+ reg = <2 0x03000000 0x20000>;
+ interrupts = <16>;
+ port1-otg;
+ };
+
+ iofpga@3,00000000 {
+ compatible = "arm,amba-bus", "simple-bus";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ ranges = <0 3 0 0x200000>;
+
+ v2m_sysreg: sysreg@010000 {
+ compatible = "arm,vexpress-sysreg";
+ reg = <0x010000 0x1000>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ };
+
+ v2m_sysctl: sysctl@020000 {
+ compatible = "arm,sp810", "arm,primecell";
+ reg = <0x020000 0x1000>;
+ clocks = <&v2m_refclk32khz>, <&v2m_refclk1mhz>, <&smbclk>;
+ clock-names = "refclk", "timclk", "apb_pclk";
+ #clock-cells = <1>;
+ clock-output-names = "timerclken0", "timerclken1", "timerclken2", "timerclken3";
+ };
+
+ /* PCI-E I2C bus */
+ v2m_i2c_pcie: i2c@030000 {
+ compatible = "arm,versatile-i2c";
+ reg = <0x030000 0x1000>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ pcie-switch@60 {
+ compatible = "idt,89hpes32h8";
+ reg = <0x60>;
+ };
+ };
+
+ audio@040000 {
+ compatible = "generic,goldfish-audio";
+ reg = <0x040000 0x1000>;
+ interrupts = <11>;
+ };
+
+ goldfish_battery@050000 {
+ compatible = "generic,goldfish-battery";
+ reg = <050000 0x1000>;
+ interrupts = <9>;
+ };
+
+ kmi@060000 {
+ compatible = "arm,pl050", "arm,primecell";
+ reg = <0x060000 0x1000>;
+ interrupts = <12>;
+ clocks = <&v2m_clk24mhz>, <&smbclk>;
+ clock-names = "KMIREFCLK", "apb_pclk";
+ };
+
+ kmi@070000 {
+ compatible = "arm,pl050", "arm,primecell";
+ reg = <0x070000 0x1000>;
+ interrupts = <13>;
+ clocks = <&v2m_clk24mhz>, <&smbclk>;
+ clock-names = "KMIREFCLK", "apb_pclk";
+ };
+
+ v2m_serial0: uart@090000 {
+ compatible = "arm,pl011", "arm,primecell";
+ reg = <0x090000 0x1000>;
+ interrupts = <5>;
+ clocks = <&v2m_oscclk2>, <&smbclk>;
+ clock-names = "uartclk", "apb_pclk";
+ };
+
+ v2m_serial1: uart@0a0000 {
+ compatible = "arm,pl011", "arm,primecell";
+ reg = <0x0a0000 0x1000>;
+ interrupts = <6>;
+ clocks = <&v2m_oscclk2>, <&smbclk>;
+ clock-names = "uartclk", "apb_pclk";
+ };
+
+ v2m_serial2: uart@0b0000 {
+ compatible = "arm,pl011", "arm,primecell";
+ reg = <0x0b0000 0x1000>;
+ interrupts = <7>;
+ clocks = <&v2m_oscclk2>, <&smbclk>;
+ clock-names = "uartclk", "apb_pclk";
+ };
+
+ v2m_serial3: uart@0c0000 {
+ compatible = "arm,pl011", "arm,primecell";
+ reg = <0x0c0000 0x1000>;
+ interrupts = <8>;
+ clocks = <&v2m_oscclk2>, <&smbclk>;
+ clock-names = "uartclk", "apb_pclk";
+ };
+
+ wdt@0f0000 {
+ compatible = "arm,sp805", "arm,primecell";
+ reg = <0x0f0000 0x1000>;
+ interrupts = <0>;
+ clocks = <&v2m_refclk32khz>, <&smbclk>;
+ clock-names = "wdogclk", "apb_pclk";
+ };
+
+ v2m_timer01: timer@110000 {
+ compatible = "arm,sp804", "arm,primecell";
+ reg = <0x110000 0x1000>;
+ interrupts = <2>;
+ clocks = <&v2m_sysctl 0>, <&v2m_sysctl 1>, <&smbclk>;
+ clock-names = "timclken1", "timclken2", "apb_pclk";
+ };
+
+ v2m_timer23: timer@120000 {
+ compatible = "arm,sp804", "arm,primecell";
+ reg = <0x120000 0x1000>;
+ interrupts = <3>;
+ clocks = <&v2m_sysctl 2>, <&v2m_sysctl 3>, <&smbclk>;
+ clock-names = "timclken1", "timclken2", "apb_pclk";
+ };
+
+ goldfish_events@160000 {
+ compatible = "generic,goldfish-events-keypad";
+ reg = <0x160000 0x1000>;
+ interrupts = <10>;
+ };
+
+ rtc@170000 {
+ compatible = "arm,pl031", "arm,primecell";
+ reg = <0x170000 0x1000>;
+ interrupts = <4>;
+ clocks = <&smbclk>;
+ clock-names = "apb_pclk";
+ };
+
+ compact-flash@1a0000 {
+ compatible = "arm,vexpress-cf", "ata-generic";
+ reg = <0x1a0000 0x100
+ 0x1a0100 0xf00>;
+ reg-shift = <2>;
+ };
+
+ fb@1f0000 {
+ compatible = "generic,goldfish-fb";
+ reg = <0x1f0000 0x1000>;
+ interrupts = <14>;
+ };
+ };
+
+ v2m_fixed_3v3: fixedregulator@0 {
+ compatible = "regulator-fixed";
+ regulator-name = "3V3";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
+ v2m_clk24mhz: clk24mhz {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <24000000>;
+ clock-output-names = "v2m:clk24mhz";
+ };
+
+ v2m_refclk1mhz: refclk1mhz {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <1000000>;
+ clock-output-names = "v2m:refclk1mhz";
+ };
+
+ v2m_refclk32khz: refclk32khz {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <32768>;
+ clock-output-names = "v2m:refclk32khz";
+ };
+
+ mcc {
+ compatible = "arm,vexpress,config-bus";
+ arm,vexpress,config-bridge = <&v2m_sysreg>;
+
+ osc@0 {
+ /* MCC static memory clock */
+ compatible = "arm,vexpress-osc";
+ arm,vexpress-sysreg,func = <1 0>;
+ freq-range = <25000000 60000000>;
+ #clock-cells = <0>;
+ clock-output-names = "v2m:oscclk0";
+ };
+
+ v2m_oscclk2: osc@2 {
+ /* IO FPGA peripheral clock */
+ compatible = "arm,vexpress-osc";
+ arm,vexpress-sysreg,func = <1 2>;
+ freq-range = <24000000 24000000>;
+ #clock-cells = <0>;
+ clock-output-names = "v2m:oscclk2";
+ };
+
+ volt@0 {
+ /* Logic level voltage */
+ compatible = "arm,vexpress-volt";
+ arm,vexpress-sysreg,func = <2 0>;
+ regulator-name = "VIO";
+ regulator-always-on;
+ label = "VIO";
+ };
+
+ temp@0 {
+ /* MCC internal operating temperature */
+ compatible = "arm,vexpress-temp";
+ arm,vexpress-sysreg,func = <4 0>;
+ label = "MCC";
+ };
+
+ reset@0 {
+ compatible = "arm,vexpress-reset";
+ arm,vexpress-sysreg,func = <5 0>;
+ };
+
+ muxfpga@0 {
+ compatible = "arm,vexpress-muxfpga";
+ arm,vexpress-sysreg,func = <7 0>;
+ };
+
+ shutdown@0 {
+ compatible = "arm,vexpress-shutdown";
+ arm,vexpress-sysreg,func = <8 0>;
+ };
+
+ reboot@0 {
+ compatible = "arm,vexpress-reboot";
+ arm,vexpress-sysreg,func = <9 0>;
+ };
+ };
+ };
diff --git a/arch/arm/configs/lionhead_defconfig b/arch/arm/configs/lionhead_defconfig
new file mode 100644
index 0000000..36b660c
--- /dev/null
+++ b/arch/arm/configs/lionhead_defconfig
@@ -0,0 +1,145 @@
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_SYSVIPC=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_CGROUPS=y
+CONFIG_CPUSETS=y
+# CONFIG_UTS_NS is not set
+# CONFIG_IPC_NS is not set
+# CONFIG_PID_NS is not set
+# CONFIG_NET_NS is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_PROFILING=y
+CONFIG_OPROFILE=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_IOSCHED_DEADLINE is not set
+# CONFIG_IOSCHED_CFQ is not set
+CONFIG_ARCH_LIONHEAD=y
+CONFIG_ARM_ERRATA_720789=y
+CONFIG_PL310_ERRATA_753970=y
+CONFIG_ARM_PSCI=y
+CONFIG_AEABI=y
+# CONFIG_OABI_COMPAT is not set
+CONFIG_ARM_FLUSH_CONSOLE_ON_RESTART=y
+CONFIG_ZBOOT_ROM_TEXT=0x0
+CONFIG_ZBOOT_ROM_BSS=0x0
+CONFIG_CMDLINE="console=ttyAMA0"
+CONFIG_VFP=y
+CONFIG_NEON=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_SUSPEND_TIME=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+# CONFIG_INET_LRO is not set
+# CONFIG_IPV6 is not set
+# CONFIG_WIRELESS is not set
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_VIRTIO_BLK=y
+CONFIG_UID_STAT=y
+# CONFIG_SCSI_PROC_FS is not set
+CONFIG_BLK_DEV_SD=y
+CONFIG_SCSI_MULTI_LUN=y
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_VIRTIO=y
+CONFIG_ATA=y
+CONFIG_NETDEVICES=y
+CONFIG_MII=y
+CONFIG_VIRTIO_NET=y
+# CONFIG_ETHERNET is not set
+CONFIG_PHYLIB=y
+# CONFIG_WLAN is not set
+CONFIG_INPUT_EVDEV=y
+CONFIG_KEYBOARD_GOLDFISH_EVENTS=y
+# CONFIG_SERIO_SERPORT is not set
+CONFIG_SERIO_AMBAKMI=y
+CONFIG_LEGACY_PTY_COUNT=16
+# CONFIG_DEVMEM is not set
+CONFIG_SERIAL_AMBA_PL011=y
+CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
+# CONFIG_HW_RANDOM is not set
+CONFIG_SPI=y
+CONFIG_SPI_PL022=y
+CONFIG_GPIO_GENERIC_PLATFORM=y
+CONFIG_GPIO_PL061=y
+CONFIG_BATTERY_GOLDFISH=y
+# CONFIG_HWMON is not set
+CONFIG_DRM=y
+CONFIG_FB=y
+CONFIG_FB_MODE_HELPERS=y
+CONFIG_FB_ARMCLCD=y
+CONFIG_FB_GOLDFISH=y
+CONFIG_SOUND=y
+CONFIG_SND=y
+# CONFIG_SND_SUPPORT_OLD_API is not set
+# CONFIG_SND_DRIVERS is not set
+CONFIG_SND_ARMAACI=y
+CONFIG_HID_DRAGONRISE=y
+CONFIG_HID_GYRATION=y
+CONFIG_HID_TWINHAN=y
+CONFIG_HID_NTRIG=y
+CONFIG_HID_PANTHERLORD=y
+CONFIG_HID_PETALYNX=y
+CONFIG_HID_SAMSUNG=y
+CONFIG_HID_SUNPLUS=y
+CONFIG_HID_GREENASIA=y
+CONFIG_HID_SMARTJOYPLUS=y
+CONFIG_HID_TOPSEED=y
+CONFIG_HID_THRUSTMASTER=y
+CONFIG_HID_ZEROPLUS=y
+CONFIG_USB=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+CONFIG_USB_MON=y
+CONFIG_USB_ISP1760_HCD=y
+CONFIG_MMC=y
+CONFIG_MMC_UNSAFE_RESUME=y
+CONFIG_MMC_EMBEDDED_SDIO=y
+CONFIG_MMC_PARANOID_SD_INIT=y
+CONFIG_MMC_BLOCK_DEFERRED_RESUME=y
+CONFIG_MMC_ARMMMCI=y
+CONFIG_MMC_SDHCI=y
+CONFIG_SWITCH=y
+CONFIG_SWITCH_GPIO=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_PL031=y
+CONFIG_VIRTIO_MMIO=y
+CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y
+CONFIG_STAGING=y
+CONFIG_ANDROID=y
+CONFIG_ANDROID_BINDER_IPC=y
+CONFIG_ASHMEM=y
+CONFIG_ANDROID_LOGGER=y
+# CONFIG_ANDROID_TIMED_OUTPUT is not set
+CONFIG_ANDROID_INTF_ALARM_DEV=y
+CONFIG_SYNC=y
+CONFIG_GOLDFISH_AUDIO=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+# CONFIG_EXT3_FS_XATTR is not set
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS=y
+# CONFIG_NETWORK_FILESYSTEMS is not set
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_FS=y
+CONFIG_DEBUG_KERNEL=y
+CONFIG_DETECT_HUNG_TASK=y
+# CONFIG_SCHED_DEBUG is not set
+CONFIG_DEBUG_INFO=y
+CONFIG_DYNAMIC_DEBUG=y
+# CONFIG_ARM_UNWIND is not set
+CONFIG_DEBUG_USER=y
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+# CONFIG_CRYPTO_HW is not set
+CONFIG_VIRTUALIZATION=y
diff --git a/arch/arm/configs/ranchu_defconfig b/arch/arm/configs/ranchu_defconfig
new file mode 100644
index 0000000..818e8ab
--- /dev/null
+++ b/arch/arm/configs/ranchu_defconfig
@@ -0,0 +1,191 @@
+CONFIG_EXPERIMENTAL=y
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_SYSVIPC=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_CGROUPS=y
+CONFIG_CPUSETS=y
+# CONFIG_UTS_NS is not set
+# CONFIG_IPC_NS is not set
+# CONFIG_USER_NS is not set
+# CONFIG_PID_NS is not set
+# CONFIG_NET_NS is not set
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+CONFIG_PROFILING=y
+CONFIG_OPROFILE=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_IOSCHED_DEADLINE is not set
+# CONFIG_IOSCHED_CFQ is not set
+CONFIG_ARCH_VEXPRESS=y
+CONFIG_ARCH_VEXPRESS_CA9X4=y
+# CONFIG_SWP_EMULATE is not set
+CONFIG_HAVE_ARM_ARCH_TIMER=y
+CONFIG_SMP=y
+CONFIG_VMSPLIT_3G=y
+CONFIG_HIGHMEM=y
+CONFIG_HOTPLUG_CPU=y
+CONFIG_AEABI=y
+CONFIG_ZBOOT_ROM_TEXT=0x0
+CONFIG_ZBOOT_ROM_BSS=0x0
+CONFIG_CMDLINE="root=/dev/nfs nfsroot=10.1.69.3:/work/nfsroot ip=dhcp console=ttyAMA0 mem=128M"
+CONFIG_VFP=y
+CONFIG_NEON=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+# CONFIG_INET_LRO is not set
+# CONFIG_WIRELESS is not set
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_MTD=y
+CONFIG_MTD_CONCAT=y
+CONFIG_MTD_PARTITIONS=y
+CONFIG_MTD_CMDLINE_PARTS=y
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_CFI=y
+CONFIG_MTD_CFI_INTELEXT=y
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_MTD_ARM_INTEGRATOR=y
+CONFIG_MISC_DEVICES=y
+# CONFIG_SCSI_PROC_FS is not set
+CONFIG_BLK_DEV_SD=y
+# CONFIG_SCSI_LOWLEVEL is not set
+CONFIG_ATA=y
+# CONFIG_SATA_PMP is not set
+CONFIG_NETDEVICES=y
+CONFIG_NET_ETHERNET=y
+CONFIG_SMSC911X=y
+# CONFIG_NETDEV_1000 is not set
+# CONFIG_NETDEV_10000 is not set
+# CONFIG_WLAN is not set
+CONFIG_INPUT_EVDEV=y
+# CONFIG_SERIO_SERPORT is not set
+CONFIG_SERIO_AMBAKMI=y
+CONFIG_SERIAL_AMBA_PL011=y
+CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
+CONFIG_LEGACY_PTY_COUNT=16
+# CONFIG_HW_RANDOM is not set
+# CONFIG_HWMON is not set
+CONFIG_FB=y
+CONFIG_FB_ARMCLCD=y
+# CONFIG_FRAMEBUFFER_CONSOLE is not set
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_VGA16 is not set
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_MIXER_OSS=y
+CONFIG_SND_PCM_OSS=y
+# CONFIG_SND_DRIVERS is not set
+CONFIG_SND_ARMAACI=y
+CONFIG_HID_DRAGONRISE=y
+CONFIG_HID_GYRATION=y
+CONFIG_HID_TWINHAN=y
+CONFIG_HID_NTRIG=y
+CONFIG_HID_PANTHERLORD=y
+CONFIG_HID_PETALYNX=y
+CONFIG_HID_SAMSUNG=y
+CONFIG_HID_SONY=y
+CONFIG_HID_SUNPLUS=y
+CONFIG_HID_GREENASIA=y
+CONFIG_HID_SMARTJOYPLUS=y
+CONFIG_HID_TOPSEED=y
+CONFIG_HID_THRUSTMASTER=y
+CONFIG_HID_ZEROPLUS=y
+CONFIG_USB=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+# CONFIG_USB_DEVICE_CLASS is not set
+CONFIG_USB_MON=y
+CONFIG_USB_ISP1760_HCD=y
+CONFIG_USB_STORAGE=y
+CONFIG_MMC=y
+CONFIG_MMC_ARMMMCI=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_PL031=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+# CONFIG_EXT3_FS_XATTR is not set
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS=y
+CONFIG_JFFS2_FS=y
+CONFIG_CRAMFS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+CONFIG_ROOT_NFS=y
+# CONFIG_RPCSEC_GSS_KRB5 is not set
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_FS=y
+CONFIG_DEBUG_KERNEL=y
+CONFIG_DETECT_HUNG_TASK=y
+# CONFIG_SCHED_DEBUG is not set
+CONFIG_DEBUG_INFO=y
+# CONFIG_RCU_CPU_STALL_DETECTOR is not set
+CONFIG_DEBUG_USER=y
+CONFIG_DEBUG_ERRORS=y
+# CONFIG_DEBUG_LL is not set
+# CONFIG_EARLY_PRINTK is not set
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+# CONFIG_CRYPTO_HW is not set
+CONFIG_LBDAF=y
+CONFIG_ARCH_VIRT=y
+# CONFIG_ARM_LPAE is not set
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_LOOP_MIN_COUNT=8
+CONFIG_TUN=y
+CONFIG_VIRTUALIZATION=y
+CONFIG_VIRTIO_BLK=y
+CONFIG_SCSI_VIRTIO=y
+CONFIG_VIRTIO_NET=y
+CONFIG_VIRTIO_CONSOLE=y
+CONFIG_VIRTIO=y
+CONFIG_VIRTIO_BALLOON=y
+CONFIG_VIRTIO_MMIO=y
+
+CONFIG_IPV6=y
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_NETFILTER=y
+CONFIG_IP_NF_IPTABLES=y
+CONFIG_IP_NF_FILTER=y
+CONFIG_IP6_NF_IPTABLES=y
+CONFIG_IP6_NF_FILTER=y
+CONFIG_BRIDGE=y
+
+CONFIG_KEYBOARD_GOLDFISH_EVENTS=y
+
+CONFIG_BATTERY_GOLDFISH=y
+
+CONFIG_FB_GOLDFISH=y
+CONFIG_FB_SIMPLE=y
+
+CONFIG_STAGING=y
+CONFIG_ANDROID=y
+CONFIG_ANDROID_BINDER_IPC=y
+CONFIG_ASHMEM=y
+CONFIG_ANDROID_LOGGER=y
+CONFIG_ANDROID_TIMED_GPIO=y
+CONFIG_ANDROID_LOW_MEMORY_KILLER=y
+CONFIG_ANDROID_INTF_ALARM_DEV=y
+
+CONFIG_GOLDFISH=y
+CONFIG_GOLDFISH_PIPE=y
+
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_FUSE_FS=y
+
+CONFIG_AUDIT=y
+CONFIG_SECURITY=y
+CONFIG_SECURITY_NETWORK=y
+CONFIG_SECURITY_SELINUX=y
+
+CONFIG_PROC_DEVICETREE=y
diff --git a/arch/arm/include/asm/arch_timer.h b/arch/arm/include/asm/arch_timer.h
index 7c1bfc0..a60052b 100644
--- a/arch/arm/include/asm/arch_timer.h
+++ b/arch/arm/include/asm/arch_timer.h
@@ -80,15 +80,6 @@
return val;
}
-static inline u64 arch_counter_get_cntpct(void)
-{
- u64 cval;
-
- isb();
- asm volatile("mrrc p15, 0, %Q0, %R0, c14" : "=r" (cval));
- return cval;
-}
-
static inline u64 arch_counter_get_cntvct(void)
{
u64 cval;
@@ -98,17 +89,43 @@
return cval;
}
-static inline void __cpuinit arch_counter_set_user_access(void)
+static inline u32 arch_timer_get_cntkctl(void)
{
u32 cntkctl;
-
asm volatile("mrc p15, 0, %0, c14, c1, 0" : "=r" (cntkctl));
+ return cntkctl;
+}
- /* disable user access to everything */
- cntkctl &= ~((3 << 8) | (7 << 0));
-
+static inline void arch_timer_set_cntkctl(u32 cntkctl)
+{
asm volatile("mcr p15, 0, %0, c14, c1, 0" : : "r" (cntkctl));
}
+
+static inline void __cpuinit arch_counter_set_user_access(void)
+{
+ u32 cntkctl = arch_timer_get_cntkctl();
+
+ /* Disable user access to both physical/virtual counters/timers */
+ /* Also disable virtual event stream */
+ cntkctl &= ~(ARCH_TIMER_USR_PT_ACCESS_EN
+ | ARCH_TIMER_USR_VT_ACCESS_EN
+ | ARCH_TIMER_VIRT_EVT_EN
+ | ARCH_TIMER_USR_VCT_ACCESS_EN
+ | ARCH_TIMER_USR_PCT_ACCESS_EN);
+ arch_timer_set_cntkctl(cntkctl);
+}
+
+static inline void arch_timer_evtstrm_enable(int divider)
+{
+ u32 cntkctl = arch_timer_get_cntkctl();
+ cntkctl &= ~ARCH_TIMER_EVT_TRIGGER_MASK;
+ /* Set the divider and enable virtual event stream */
+ cntkctl |= (divider << ARCH_TIMER_EVT_TRIGGER_SHIFT)
+ | ARCH_TIMER_VIRT_EVT_EN;
+ arch_timer_set_cntkctl(cntkctl);
+ elf_hwcap |= HWCAP_EVTSTRM;
+}
+
#endif
#endif
diff --git a/arch/arm/include/asm/dma-contiguous.h b/arch/arm/include/asm/dma-contiguous.h
index 3ed37b4..4f8e9e5 100644
--- a/arch/arm/include/asm/dma-contiguous.h
+++ b/arch/arm/include/asm/dma-contiguous.h
@@ -2,10 +2,9 @@
#define ASMARM_DMA_CONTIGUOUS_H
#ifdef __KERNEL__
-#ifdef CONFIG_CMA
+#ifdef CONFIG_DMA_CMA
#include <linux/types.h>
-#include <asm-generic/dma-contiguous.h>
void dma_contiguous_early_fixup(phys_addr_t base, unsigned long size);
diff --git a/arch/arm/include/uapi/asm/hwcap.h b/arch/arm/include/uapi/asm/hwcap.h
index 3688fd1..7dcc10d 100644
--- a/arch/arm/include/uapi/asm/hwcap.h
+++ b/arch/arm/include/uapi/asm/hwcap.h
@@ -25,6 +25,7 @@
#define HWCAP_IDIVT (1 << 18)
#define HWCAP_VFPD32 (1 << 19) /* set if VFP has 32 regs (not 16) */
#define HWCAP_IDIV (HWCAP_IDIVA | HWCAP_IDIVT)
-
+#define HWCAP_LPAE (1 << 20)
+#define HWCAP_EVTSTRM (1 << 21)
#endif /* _UAPI__ASMARM_HWCAP_H */
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index b4b1d39..df81eaf 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -872,6 +872,9 @@
"vfpv4",
"idiva",
"idivt",
+ "vfpd32",
+ "lpae",
+ "evtstrm",
NULL
};
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index ef1703b..1d55afe 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -17,6 +17,7 @@
*/
#include <linux/cpu.h>
+#include <linux/cpu_pm.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/kvm_host.h>
@@ -835,6 +836,33 @@
.notifier_call = hyp_init_cpu_notify,
};
+#ifdef CONFIG_CPU_PM
+static int hyp_init_cpu_pm_notifier(struct notifier_block *self,
+ unsigned long cmd,
+ void *v)
+{
+ if (cmd == CPU_PM_EXIT) {
+ cpu_init_hyp_mode(NULL);
+ return NOTIFY_OK;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block hyp_init_cpu_pm_nb = {
+ .notifier_call = hyp_init_cpu_pm_notifier,
+};
+
+static void __init hyp_cpu_pm_init(void)
+{
+ cpu_pm_register_notifier(&hyp_init_cpu_pm_nb);
+}
+#else
+static inline void hyp_cpu_pm_init(void)
+{
+}
+#endif
+
/**
* Inits Hyp-mode on all online CPUs
*/
@@ -995,6 +1023,8 @@
goto out_err;
}
+ hyp_cpu_pm_init();
+
kvm_coproc_table_init();
return 0;
out_err:
diff --git a/arch/arm/mach-lionhead/Kconfig b/arch/arm/mach-lionhead/Kconfig
new file mode 100644
index 0000000..af57d25
--- /dev/null
+++ b/arch/arm/mach-lionhead/Kconfig
@@ -0,0 +1,30 @@
+config ARCH_LIONHEAD
+ bool "Lionhead virtual platform" if ARCH_MULTI_V7
+ select ARCH_REQUIRE_GPIOLIB
+ select ARM_AMBA
+ select ARM_GIC
+ select ARM_TIMER_SP804
+ select CLKDEV_LOOKUP
+ select COMMON_CLK
+ select COMMON_CLK_VERSATILE
+ select CPU_V7
+ select GENERIC_CLOCKEVENTS
+ select GOLDFISH
+ select HAVE_ARM_SCU if SMP
+ select HAVE_ARM_TWD if LOCAL_TIMERS
+ select HAVE_CLK
+ select HAVE_PATA_PLATFORM
+ select HAVE_SMP
+ select ICST
+ select MIGHT_HAVE_CACHE_L2X0
+ select NO_IOPORT
+ select PLAT_VERSATILE
+ select POWER_RESET
+ select POWER_RESET_VEXPRESS
+ select POWER_SUPPLY
+ select REGULATOR_FIXED_VOLTAGE if REGULATOR
+ select VEXPRESS_CONFIG
+ help
+ This option enables support for the "lionhead" emulated ARM platform.
+ lionhead is based on the ARM vexpress platform, with select devices
+ replaced by equivalent goldfish virtual devices.
diff --git a/arch/arm/mach-lionhead/Makefile b/arch/arm/mach-lionhead/Makefile
new file mode 100644
index 0000000..02aa88e
--- /dev/null
+++ b/arch/arm/mach-lionhead/Makefile
@@ -0,0 +1,9 @@
+#
+# Makefile for the linux kernel.
+#
+ccflags-$(CONFIG_ARCH_MULTIPLATFORM) := -I$(srctree)/$(src)/include \
+ -I$(srctree)/arch/arm/plat-versatile/include
+
+obj-y := lionhead.o
+obj-$(CONFIG_SMP) += platsmp.o
+obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o
diff --git a/arch/arm/mach-lionhead/core.h b/arch/arm/mach-lionhead/core.h
new file mode 100644
index 0000000..b4a9b5c
--- /dev/null
+++ b/arch/arm/mach-lionhead/core.h
@@ -0,0 +1,6 @@
+/* 2MB large area for motherboard's peripherals static mapping */
+#define LIONHEAD_PERIPH 0xf8000000
+
+extern struct smp_operations lionhead_smp_ops;
+
+extern void lionhead_cpu_die(unsigned int cpu);
diff --git a/arch/arm/mach-lionhead/hotplug.c b/arch/arm/mach-lionhead/hotplug.c
new file mode 100644
index 0000000..d82976b
--- /dev/null
+++ b/arch/arm/mach-lionhead/hotplug.c
@@ -0,0 +1,106 @@
+/*
+ * linux/arch/arm/mach-realview/hotplug.c
+ *
+ * Copyright (C) 2002 ARM Ltd.
+ * All Rights Reserved
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/smp.h>
+
+#include <asm/smp_plat.h>
+#include <asm/cp15.h>
+
+static inline void cpu_enter_lowpower(void)
+{
+ unsigned int v;
+
+ asm volatile(
+ "mcr p15, 0, %1, c7, c5, 0\n"
+ " mcr p15, 0, %1, c7, c10, 4\n"
+ /*
+ * Turn off coherency
+ */
+ " mrc p15, 0, %0, c1, c0, 1\n"
+ " bic %0, %0, %3\n"
+ " mcr p15, 0, %0, c1, c0, 1\n"
+ " mrc p15, 0, %0, c1, c0, 0\n"
+ " bic %0, %0, %2\n"
+ " mcr p15, 0, %0, c1, c0, 0\n"
+ : "=&r" (v)
+ : "r" (0), "Ir" (CR_C), "Ir" (0x40)
+ : "cc");
+}
+
+static inline void cpu_leave_lowpower(void)
+{
+ unsigned int v;
+
+ asm volatile(
+ "mrc p15, 0, %0, c1, c0, 0\n"
+ " orr %0, %0, %1\n"
+ " mcr p15, 0, %0, c1, c0, 0\n"
+ " mrc p15, 0, %0, c1, c0, 1\n"
+ " orr %0, %0, %2\n"
+ " mcr p15, 0, %0, c1, c0, 1\n"
+ : "=&r" (v)
+ : "Ir" (CR_C), "Ir" (0x40)
+ : "cc");
+}
+
+static inline void platform_do_lowpower(unsigned int cpu, int *spurious)
+{
+ /*
+ * there is no power-control hardware on this platform, so all
+ * we can do is put the core into WFI; this is safe as the calling
+ * code will have already disabled interrupts
+ */
+ for (;;) {
+ wfi();
+
+ if (pen_release == cpu_logical_map(cpu)) {
+ /*
+ * OK, proper wakeup, we're done
+ */
+ break;
+ }
+
+ /*
+ * Getting here, means that we have come out of WFI without
+ * having been woken up - this shouldn't happen
+ *
+ * Just note it happening - when we're woken, we can report
+ * its occurrence.
+ */
+ (*spurious)++;
+ }
+}
+
+/*
+ * platform-specific code to shutdown a CPU
+ *
+ * Called with IRQs disabled
+ */
+void __ref lionhead_cpu_die(unsigned int cpu)
+{
+ int spurious = 0;
+
+ /*
+ * we're ready for shutdown now, so do it
+ */
+ cpu_enter_lowpower();
+ platform_do_lowpower(cpu, &spurious);
+
+ /*
+ * bring this CPU back into the world of cache
+ * coherency, and then restore interrupts
+ */
+ cpu_leave_lowpower();
+
+ if (spurious)
+ pr_warn("CPU%u: %u spurious wakeup calls\n", cpu, spurious);
+}
diff --git a/arch/arm/mach-lionhead/include/mach/hardware.h b/arch/arm/mach-lionhead/include/mach/hardware.h
new file mode 100644
index 0000000..40a8c17
--- /dev/null
+++ b/arch/arm/mach-lionhead/include/mach/hardware.h
@@ -0,0 +1 @@
+/* empty */
diff --git a/arch/arm/mach-lionhead/include/mach/irqs.h b/arch/arm/mach-lionhead/include/mach/irqs.h
new file mode 100644
index 0000000..f8f7f78
--- /dev/null
+++ b/arch/arm/mach-lionhead/include/mach/irqs.h
@@ -0,0 +1,6 @@
+#define IRQ_LOCALTIMER 29
+#define IRQ_LOCALWDOG 30
+
+#ifndef CONFIG_SPARSE_IRQ
+#define NR_IRQS 256
+#endif
diff --git a/arch/arm/mach-lionhead/lionhead.c b/arch/arm/mach-lionhead/lionhead.c
new file mode 100644
index 0000000..0732067
--- /dev/null
+++ b/arch/arm/mach-lionhead/lionhead.c
@@ -0,0 +1,84 @@
+/*
+ * Versatile Express V2M Motherboard Support
+ */
+#include <linux/clocksource.h>
+#include <linux/irqchip.h>
+#include <linux/of_address.h>
+#include <linux/of_fdt.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/fixed.h>
+#include <linux/regulator/machine.h>
+#include <linux/vexpress.h>
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+
+#include <asm/mach-types.h>
+#include <asm/sizes.h>
+#include <asm/mach/arch.h>
+#include <asm/mach/map.h>
+#include <asm/mach/time.h>
+#include <asm/hardware/arm_timer.h>
+#include <asm/hardware/cache-l2x0.h>
+#include <asm/hardware/timer-sp.h>
+
+#include <plat/sched_clock.h>
+#include <plat/platsmp.h>
+
+#include "core.h"
+
+static struct map_desc lionhead_io_desc __initdata = {
+ .virtual = LIONHEAD_PERIPH,
+ .pfn = __phys_to_pfn(0x1c000000),
+ .length = SZ_2M,
+ .type = MT_DEVICE,
+};
+
+static void __init lionhead_map_io(void)
+{
+ iotable_init(&lionhead_io_desc, 1);
+}
+
+static void __init lionhead_init_early(void)
+{
+ vexpress_sysreg_of_early_init();
+}
+
+static void __init lionhead_timer_init(void)
+{
+ of_clk_init(NULL);
+
+ clocksource_of_init();
+
+ versatile_sched_clock_init(vexpress_get_24mhz_clock_base(),
+ 24000000);
+}
+
+static const struct of_device_id lionhead_dt_bus_match[] __initconst = {
+ { .compatible = "simple-bus", },
+ { .compatible = "arm,amba-bus", },
+ { .compatible = "arm,vexpress,config-bus", },
+ {}
+};
+
+static void __init lionhead_init(void)
+{
+ l2x0_of_init(0x00400000, 0xfe0fffff);
+ of_platform_populate(NULL, lionhead_dt_bus_match, NULL, NULL);
+}
+
+static const char * const lionhead_dt_match[] __initconst = {
+ "generic,lionhead",
+ NULL,
+};
+
+DT_MACHINE_START(VEXPRESS_DT, "lionhead virtual platform")
+ .dt_compat = lionhead_dt_match,
+ .smp = smp_ops(lionhead_smp_ops),
+ .map_io = lionhead_map_io,
+ .init_early = lionhead_init_early,
+ .init_irq = irqchip_init,
+ .init_time = lionhead_timer_init,
+ .init_machine = lionhead_init,
+MACHINE_END
diff --git a/arch/arm/mach-lionhead/platsmp.c b/arch/arm/mach-lionhead/platsmp.c
new file mode 100644
index 0000000..6654355
--- /dev/null
+++ b/arch/arm/mach-lionhead/platsmp.c
@@ -0,0 +1,127 @@
+/*
+ * linux/arch/arm/mach-vexpress/platsmp.c
+ *
+ * Copyright (C) 2002 ARM Ltd.
+ * All Rights Reserved
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/smp.h>
+#include <linux/io.h>
+#include <linux/of_fdt.h>
+#include <linux/vexpress.h>
+
+#include <asm/smp_scu.h>
+#include <asm/mach/map.h>
+
+#include <plat/platsmp.h>
+
+#include "core.h"
+
+#if defined(CONFIG_OF)
+
+static int __init lionhead_dt_cpus_num(unsigned long node, const char *uname,
+ int depth, void *data)
+{
+ static int prev_depth = -1;
+ static int nr_cpus = -1;
+
+ if (prev_depth > depth && nr_cpus > 0)
+ return nr_cpus;
+
+ if (nr_cpus < 0 && strcmp(uname, "cpus") == 0)
+ nr_cpus = 0;
+
+ if (nr_cpus >= 0) {
+ const char *device_type = of_get_flat_dt_prop(node,
+ "device_type", NULL);
+
+ if (device_type && strcmp(device_type, "cpu") == 0)
+ nr_cpus++;
+ }
+
+ prev_depth = depth;
+
+ return 0;
+}
+
+static void __init lionhead_dt_smp_init_cpus(void)
+{
+ int ncores = 0, i;
+
+ ncores = of_scan_flat_dt(lionhead_dt_cpus_num, NULL);
+
+ if (ncores < 2)
+ return;
+
+ if (ncores > nr_cpu_ids) {
+ pr_warn("SMP: %u cores greater than maximum (%u), clipping\n",
+ ncores, nr_cpu_ids);
+ ncores = nr_cpu_ids;
+ }
+
+ for (i = 0; i < ncores; ++i)
+ set_cpu_possible(i, true);
+}
+
+static void __init lionhead_dt_smp_prepare_cpus(unsigned int max_cpus)
+{
+ int i;
+
+ for (i = 0; i < max_cpus; i++)
+ set_cpu_present(i, true);
+}
+
+#else
+
+static void __init lionhead_dt_smp_init_cpus(void)
+{
+ WARN_ON(1);
+}
+
+void __init lionhead_dt_smp_prepare_cpus(unsigned int max_cpus)
+{
+ WARN_ON(1);
+}
+
+#endif
+
+/*
+ * Initialise the CPU possible map early - this describes the CPUs
+ * which may be present or become present in the system.
+ */
+static void __init lionhead_smp_init_cpus(void)
+{
+ lionhead_dt_smp_init_cpus();
+}
+
+static void __init lionhead_smp_prepare_cpus(unsigned int max_cpus)
+{
+ /*
+ * Initialise the present map, which describes the set of CPUs
+ * actually populated at the present time.
+ */
+ lionhead_dt_smp_prepare_cpus(max_cpus);
+
+ /*
+ * Write the address of secondary startup into the
+ * system-wide flags register. The boot monitor waits
+ * until it receives a soft interrupt, and then the
+ * secondary CPU branches to this address.
+ */
+ vexpress_flags_set(virt_to_phys(versatile_secondary_startup));
+}
+
+struct smp_operations __initdata lionhead_smp_ops = {
+ .smp_init_cpus = lionhead_smp_init_cpus,
+ .smp_prepare_cpus = lionhead_smp_prepare_cpus,
+ .smp_secondary_init = versatile_secondary_init,
+ .smp_boot_secondary = versatile_boot_secondary,
+#ifdef CONFIG_HOTPLUG_CPU
+ .cpu_die = lionhead_cpu_die,
+#endif
+};
diff --git a/arch/arm/mach-virt/Kconfig b/arch/arm/mach-virt/Kconfig
index 8958f0d..eafb6be 100644
--- a/arch/arm/mach-virt/Kconfig
+++ b/arch/arm/mach-virt/Kconfig
@@ -8,3 +8,4 @@
select CPU_V7
select SPARSE_IRQ
select USE_OF
+ select GENERIC_CLOCKEVENTS
diff --git a/arch/arm/mach-virt/virt.c b/arch/arm/mach-virt/virt.c
index 061f283..4f5b259 100644
--- a/arch/arm/mach-virt/virt.c
+++ b/arch/arm/mach-virt/virt.c
@@ -22,16 +22,19 @@
#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/smp.h>
+#include <linux/clk-provider.h>
#include <asm/mach/arch.h>
static void __init virt_init(void)
{
+ of_clk_init(NULL);
of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
}
static const char *virt_dt_match[] = {
"linux,dummy-virt",
+ "ranchu",
"xen,xenvm",
NULL
};
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index ef3e0f3..1fb40dc 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -358,7 +358,7 @@
if (!pages)
goto no_pages;
- if (IS_ENABLED(CONFIG_CMA))
+ if (IS_ENABLED(CONFIG_DMA_CMA))
ptr = __alloc_from_contiguous(NULL, pool->size, prot, &page,
atomic_pool_init);
else
@@ -670,7 +670,7 @@
addr = __alloc_simple_buffer(dev, size, gfp, &page);
else if (!(gfp & __GFP_WAIT))
addr = __alloc_from_pool(size, &page);
- else if (!IS_ENABLED(CONFIG_CMA))
+ else if (!IS_ENABLED(CONFIG_DMA_CMA))
addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller);
else
addr = __alloc_from_contiguous(dev, size, prot, &page, caller);
@@ -759,7 +759,7 @@
__dma_free_buffer(page, size);
} else if (__free_from_pool(cpu_addr, size)) {
return;
- } else if (!IS_ENABLED(CONFIG_CMA)) {
+ } else if (!IS_ENABLED(CONFIG_DMA_CMA)) {
__dma_free_remap(cpu_addr, size);
__dma_free_buffer(page, size);
} else {
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 9a5cdc0..afeaef7 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -76,7 +76,7 @@
__tagtable(ATAG_INITRD2, parse_tag_initrd2);
#ifdef CONFIG_OF_FLATTREE
-void __init early_init_dt_setup_initrd_arch(unsigned long start, unsigned long end)
+void __init early_init_dt_setup_initrd_arch(u64 start, u64 end)
{
phys_initrd_start = start;
phys_initrd_size = end - start;
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 3296521..2bab6cd 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -1,6 +1,8 @@
config ARM64
def_bool y
select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
+ select ARCH_USE_CMPXCHG_LOCKREF
+ select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
select ARCH_WANT_OPTIONAL_GPIOLIB
select ARCH_WANT_COMPAT_IPC_PARSE_VERSION
select ARCH_WANT_FRAME_POINTERS
@@ -8,29 +10,42 @@
select ARM_ARCH_TIMER
select ARM_GIC
select AUDIT_ARCH_COMPAT_GENERIC
+ select BUILDTIME_EXTABLE_SORT
select CLONE_BACKWARDS
select COMMON_CLK
+ select CPU_PM if (SUSPEND || CPU_IDLE)
+ select DCACHE_WORD_ACCESS
select GENERIC_CLOCKEVENTS
select GENERIC_CLOCKEVENTS_BROADCAST if SMP
select GENERIC_CPU_AUTOPROBE
+ select GENERIC_EARLY_IOREMAP
select GENERIC_IOMAP
select GENERIC_IRQ_PROBE
select GENERIC_IRQ_SHOW
select GENERIC_SMP_IDLE_THREAD
+ select GENERIC_STRNCPY_FROM_USER
+ select GENERIC_STRNLEN_USER
select GENERIC_TIME_VSYSCALL
select HARDIRQS_SW_RESEND
select HAVE_ARCH_AUDITSYSCALL
+ select HAVE_ARCH_KGDB
+ select HAVE_ARCH_JUMP_LABEL
select HAVE_ARCH_SECCOMP_FILTER
select HAVE_ARCH_TRACEHOOK
select HAVE_DEBUG_BUGVERBOSE
select HAVE_DEBUG_KMEMLEAK
select HAVE_DMA_API_DEBUG
select HAVE_DMA_ATTRS
+ select HAVE_DMA_CONTIGUOUS
+ select HAVE_EFFICIENT_UNALIGNED_ACCESS
select HAVE_GENERIC_DMA_COHERENT
select HAVE_GENERIC_HARDIRQS
select HAVE_HW_BREAKPOINT if PERF_EVENTS
select HAVE_MEMBLOCK
+ select HAVE_PATA_PLATFORM
select HAVE_PERF_EVENTS
+ select HAVE_PERF_REGS
+ select HAVE_PERF_USER_STACK_DUMP
select IRQ_DOMAIN
select MODULES_USE_ELF_RELA
select NO_BOOTMEM
@@ -70,11 +85,7 @@
config TRACE_IRQFLAGS_SUPPORT
def_bool y
-config GENERIC_LOCKBREAK
- def_bool y
- depends on SMP && PREEMPT
-
-config RWSEM_GENERIC_SPINLOCK
+config RWSEM_XCHGADD_ALGORITHM
def_bool y
config GENERIC_HWEIGHT
@@ -86,7 +97,7 @@
config GENERIC_CALIBRATE_DELAY
def_bool y
-config ZONE_DMA32
+config ZONE_DMA
def_bool y
config ARCH_DMA_ADDR_T_64BIT
@@ -104,6 +115,9 @@
config IOMMU_HELPER
def_bool SWIOTLB
+config FIX_EARLYCON_MEM
+ def_bool y
+
source "init/Kconfig"
source "kernel/Kconfig.freezer"
@@ -120,6 +134,11 @@
This enables support for the ARMv8 software model (Versatile
Express).
+config ARCH_XGENE
+ bool "AppliedMicro X-Gene SOC Family"
+ help
+ This enables support for AppliedMicro X-Gene SOC Family
+
endmenu
menu "Bus support"
@@ -153,11 +172,35 @@
If you don't know what to do here, say N.
+config SCHED_MC
+ bool "Multi-core scheduler support"
+ depends on SMP
+ help
+ Multi-core scheduler support improves the CPU scheduler's decision
+ making when dealing with multi-core CPU chips at a cost of slightly
+ increased overhead in some places. If unsure say N here.
+
+config SCHED_SMT
+ bool "SMT scheduler support"
+ depends on SMP
+ help
+ Improves the CPU scheduler's decision making when dealing with
+ MultiThreading at a cost of slightly increased overhead in some
+ places. If unsure say N here.
+
config NR_CPUS
int "Maximum number of CPUs (2-32)"
range 2 32
depends on SMP
- default "4"
+ # These have to remain sorted largest to smallest
+ default "8"
+
+config HOTPLUG_CPU
+ bool "Support for hot-pluggable CPUs"
+ depends on SMP
+ help
+ Say Y here to experiment with turning CPUs off and on. CPUs
+ can be controlled through /sys/devices/system/cpu.
config SWP_EMULATE
bool "Emulate SWP/SWPB instructions"
@@ -214,6 +257,18 @@
Enable hardware performance counter support for perf events. If
disabled, perf events will use software events only.
+config SYS_SUPPORTS_HUGETLBFS
+ def_bool y
+
+config ARCH_WANT_GENERAL_HUGETLB
+ def_bool y
+
+config ARCH_WANT_HUGE_PMD_SHARE
+ def_bool y if !ARM64_64K_PAGES
+
+config HAVE_ARCH_TRANSPARENT_HUGEPAGE
+ def_bool y
+
config ARMV7_COMPAT
bool "Kernel support for ARMv7 applications"
depends on COMPAT
@@ -250,6 +305,10 @@
and the task is only allowed to execute a few safe syscalls
defined by each seccomp mode.
+config FORCE_MAX_ZONEORDER
+ int
+ default "14" if (ARM64_64K_PAGES && TRANSPARENT_HUGEPAGE)
+ default "11"
endmenu
@@ -331,6 +390,25 @@
endmenu
+menu "Power management options"
+
+source "kernel/power/Kconfig"
+
+source "drivers/cpufreq/Kconfig"
+config ARCH_SUSPEND_POSSIBLE
+ def_bool y
+
+config ARM64_CPU_SUSPEND
+ def_bool PM_SLEEP
+
+endmenu
+
+menu "CPU Power Management"
+
+source "drivers/cpuidle/Kconfig"
+
+endmenu
+
source "net/Kconfig"
source "drivers/Kconfig"
diff --git a/arch/arm64/Kconfig.debug b/arch/arm64/Kconfig.debug
index 1a6bfe9..e1b0c46 100644
--- a/arch/arm64/Kconfig.debug
+++ b/arch/arm64/Kconfig.debug
@@ -13,6 +13,20 @@
Enables the display of the minimum amount of free stack which each
task has ever had available in the sysrq-T output.
+config STRICT_DEVMEM
+ bool "Filter access to /dev/mem"
+ depends on MMU
+ help
+ If this option is disabled, you allow userspace (root) access to all
+ of memory, including kernel and userspace memory. Accidental
+ access to this is obviously disastrous, but specific access can
+ be used by people debugging the kernel.
+
+ If this option is switched on, the /dev/mem file only allows
+ userspace access to memory mapped peripherals.
+
+ If in doubt, say Y.
+
config EARLY_PRINTK
bool "Early printk support"
default y
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index 2b27721..2924cd5 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -70,6 +70,10 @@
Image.gz-dtb: vmlinux scripts dtbs
$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
+PHONY += vdso_install
+vdso_install:
+ $(Q)$(MAKE) $(build)=arch/arm64/kernel/vdso $@
+
# We use MRPROPER_FILES and CLEAN_FILES now
archclean:
$(Q)$(MAKE) $(clean)=$(boot)
diff --git a/arch/arm64/boot/dts/Makefile b/arch/arm64/boot/dts/Makefile
index d58ea71..2f87014 100644
--- a/arch/arm64/boot/dts/Makefile
+++ b/arch/arm64/boot/dts/Makefile
@@ -1,4 +1,5 @@
dtb-$(CONFIG_ARCH_VEXPRESS) += rtsm_ve-aemv8a.dtb foundation-v8.dtb
+dtb-$(CONFIG_ARCH_XGENE) += apm-mustang.dtb
targets += dtbs
diff --git a/arch/arm64/boot/dts/apm-mustang.dts b/arch/arm64/boot/dts/apm-mustang.dts
new file mode 100644
index 0000000..1247ca1
--- /dev/null
+++ b/arch/arm64/boot/dts/apm-mustang.dts
@@ -0,0 +1,26 @@
+/*
+ * dts file for AppliedMicro (APM) Mustang Board
+ *
+ * Copyright (C) 2013, Applied Micro Circuits Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ */
+
+/dts-v1/;
+
+/include/ "apm-storm.dtsi"
+
+/ {
+ model = "APM X-Gene Mustang board";
+ compatible = "apm,mustang", "apm,xgene-storm";
+
+ chosen { };
+
+ memory {
+ device_type = "memory";
+ reg = < 0x1 0x00000000 0x0 0x80000000 >; /* Updated by bootloader */
+ };
+};
diff --git a/arch/arm64/boot/dts/apm-storm.dtsi b/arch/arm64/boot/dts/apm-storm.dtsi
new file mode 100644
index 0000000..4917f3b
--- /dev/null
+++ b/arch/arm64/boot/dts/apm-storm.dtsi
@@ -0,0 +1,364 @@
+/*
+ * dts file for AppliedMicro (APM) X-Gene Storm SOC
+ *
+ * Copyright (C) 2013, Applied Micro Circuits Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ */
+
+/ {
+ compatible = "apm,xgene-storm";
+ interrupt-parent = <&gic>;
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ cpus {
+ #address-cells = <2>;
+ #size-cells = <0>;
+
+ cpu@000 {
+ device_type = "cpu";
+ compatible = "apm,potenza", "arm,armv8";
+ reg = <0x0 0x000>;
+ enable-method = "spin-table";
+ cpu-release-addr = <0x1 0x0000fff8>;
+ };
+ cpu@001 {
+ device_type = "cpu";
+ compatible = "apm,potenza", "arm,armv8";
+ reg = <0x0 0x001>;
+ enable-method = "spin-table";
+ cpu-release-addr = <0x1 0x0000fff8>;
+ };
+ cpu@100 {
+ device_type = "cpu";
+ compatible = "apm,potenza", "arm,armv8";
+ reg = <0x0 0x100>;
+ enable-method = "spin-table";
+ cpu-release-addr = <0x1 0x0000fff8>;
+ };
+ cpu@101 {
+ device_type = "cpu";
+ compatible = "apm,potenza", "arm,armv8";
+ reg = <0x0 0x101>;
+ enable-method = "spin-table";
+ cpu-release-addr = <0x1 0x0000fff8>;
+ };
+ cpu@200 {
+ device_type = "cpu";
+ compatible = "apm,potenza", "arm,armv8";
+ reg = <0x0 0x200>;
+ enable-method = "spin-table";
+ cpu-release-addr = <0x1 0x0000fff8>;
+ };
+ cpu@201 {
+ device_type = "cpu";
+ compatible = "apm,potenza", "arm,armv8";
+ reg = <0x0 0x201>;
+ enable-method = "spin-table";
+ cpu-release-addr = <0x1 0x0000fff8>;
+ };
+ cpu@300 {
+ device_type = "cpu";
+ compatible = "apm,potenza", "arm,armv8";
+ reg = <0x0 0x300>;
+ enable-method = "spin-table";
+ cpu-release-addr = <0x1 0x0000fff8>;
+ };
+ cpu@301 {
+ device_type = "cpu";
+ compatible = "apm,potenza", "arm,armv8";
+ reg = <0x0 0x301>;
+ enable-method = "spin-table";
+ cpu-release-addr = <0x1 0x0000fff8>;
+ };
+ };
+
+ gic: interrupt-controller@78010000 {
+ compatible = "arm,cortex-a15-gic";
+ #interrupt-cells = <3>;
+ interrupt-controller;
+ reg = <0x0 0x78010000 0x0 0x1000>, /* GIC Dist */
+ <0x0 0x78020000 0x0 0x1000>, /* GIC CPU */
+ <0x0 0x78040000 0x0 0x2000>, /* GIC VCPU Control */
+ <0x0 0x78060000 0x0 0x2000>; /* GIC VCPU */
+ interrupts = <1 9 0xf04>; /* GIC Maintenence IRQ */
+ };
+
+ timer {
+ compatible = "arm,armv8-timer";
+ interrupts = <1 0 0xff01>, /* Secure Phys IRQ */
+ <1 13 0xff01>, /* Non-secure Phys IRQ */
+ <1 14 0xff01>, /* Virt IRQ */
+ <1 15 0xff01>; /* Hyp IRQ */
+ clock-frequency = <50000000>;
+ };
+
+ soc {
+ compatible = "simple-bus";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ clocks {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+ refclk: refclk {
+ compatible = "fixed-clock";
+ #clock-cells = <1>;
+ clock-frequency = <100000000>;
+ clock-output-names = "refclk";
+ };
+
+ pcppll: pcppll@17000100 {
+ compatible = "apm,xgene-pcppll-clock";
+ #clock-cells = <1>;
+ clocks = <&refclk 0>;
+ clock-names = "pcppll";
+ reg = <0x0 0x17000100 0x0 0x1000>;
+ clock-output-names = "pcppll";
+ type = <0>;
+ };
+
+ socpll: socpll@17000120 {
+ compatible = "apm,xgene-socpll-clock";
+ #clock-cells = <1>;
+ clocks = <&refclk 0>;
+ clock-names = "socpll";
+ reg = <0x0 0x17000120 0x0 0x1000>;
+ clock-output-names = "socpll";
+ type = <1>;
+ };
+
+ socplldiv2: socplldiv2 {
+ compatible = "fixed-factor-clock";
+ #clock-cells = <1>;
+ clocks = <&socpll 0>;
+ clock-names = "socplldiv2";
+ clock-mult = <1>;
+ clock-div = <2>;
+ clock-output-names = "socplldiv2";
+ };
+
+ qmlclk: qmlclk {
+ compatible = "apm,xgene-device-clock";
+ #clock-cells = <1>;
+ clocks = <&socplldiv2 0>;
+ clock-names = "qmlclk";
+ reg = <0x0 0x1703C000 0x0 0x1000>;
+ reg-names = "csr-reg";
+ clock-output-names = "qmlclk";
+ };
+
+ ethclk: ethclk {
+ compatible = "apm,xgene-device-clock";
+ #clock-cells = <1>;
+ clocks = <&socplldiv2 0>;
+ clock-names = "ethclk";
+ reg = <0x0 0x17000000 0x0 0x1000>;
+ reg-names = "div-reg";
+ divider-offset = <0x238>;
+ divider-width = <0x9>;
+ divider-shift = <0x0>;
+ clock-output-names = "ethclk";
+ };
+
+ eth8clk: eth8clk {
+ compatible = "apm,xgene-device-clock";
+ #clock-cells = <1>;
+ clocks = <ðclk 0>;
+ clock-names = "eth8clk";
+ reg = <0x0 0x1702C000 0x0 0x1000>;
+ reg-names = "csr-reg";
+ clock-output-names = "eth8clk";
+ };
+
+ sataphy1clk: sataphy1clk@1f21c000 {
+ compatible = "apm,xgene-device-clock";
+ #clock-cells = <1>;
+ clocks = <&socplldiv2 0>;
+ reg = <0x0 0x1f21c000 0x0 0x1000>;
+ reg-names = "csr-reg";
+ clock-output-names = "sataphy1clk";
+ status = "disabled";
+ csr-offset = <0x4>;
+ csr-mask = <0x00>;
+ enable-offset = <0x0>;
+ enable-mask = <0x06>;
+ };
+
+ sataphy2clk: sataphy1clk@1f22c000 {
+ compatible = "apm,xgene-device-clock";
+ #clock-cells = <1>;
+ clocks = <&socplldiv2 0>;
+ reg = <0x0 0x1f22c000 0x0 0x1000>;
+ reg-names = "csr-reg";
+ clock-output-names = "sataphy2clk";
+ status = "ok";
+ csr-offset = <0x4>;
+ csr-mask = <0x3a>;
+ enable-offset = <0x0>;
+ enable-mask = <0x06>;
+ };
+
+ sataphy3clk: sataphy1clk@1f23c000 {
+ compatible = "apm,xgene-device-clock";
+ #clock-cells = <1>;
+ clocks = <&socplldiv2 0>;
+ reg = <0x0 0x1f23c000 0x0 0x1000>;
+ reg-names = "csr-reg";
+ clock-output-names = "sataphy3clk";
+ status = "ok";
+ csr-offset = <0x4>;
+ csr-mask = <0x3a>;
+ enable-offset = <0x0>;
+ enable-mask = <0x06>;
+ };
+
+ sata01clk: sata01clk@1f21c000 {
+ compatible = "apm,xgene-device-clock";
+ #clock-cells = <1>;
+ clocks = <&socplldiv2 0>;
+ reg = <0x0 0x1f21c000 0x0 0x1000>;
+ reg-names = "csr-reg";
+ clock-output-names = "sata01clk";
+ csr-offset = <0x4>;
+ csr-mask = <0x05>;
+ enable-offset = <0x0>;
+ enable-mask = <0x39>;
+ };
+
+ sata23clk: sata23clk@1f22c000 {
+ compatible = "apm,xgene-device-clock";
+ #clock-cells = <1>;
+ clocks = <&socplldiv2 0>;
+ reg = <0x0 0x1f22c000 0x0 0x1000>;
+ reg-names = "csr-reg";
+ clock-output-names = "sata23clk";
+ csr-offset = <0x4>;
+ csr-mask = <0x05>;
+ enable-offset = <0x0>;
+ enable-mask = <0x39>;
+ };
+
+ sata45clk: sata45clk@1f23c000 {
+ compatible = "apm,xgene-device-clock";
+ #clock-cells = <1>;
+ clocks = <&socplldiv2 0>;
+ reg = <0x0 0x1f23c000 0x0 0x1000>;
+ reg-names = "csr-reg";
+ clock-output-names = "sata45clk";
+ csr-offset = <0x4>;
+ csr-mask = <0x05>;
+ enable-offset = <0x0>;
+ enable-mask = <0x39>;
+ };
+
+ rtcclk: rtcclk@17000000 {
+ compatible = "apm,xgene-device-clock";
+ #clock-cells = <1>;
+ clocks = <&socplldiv2 0>;
+ reg = <0x0 0x17000000 0x0 0x2000>;
+ reg-names = "csr-reg";
+ csr-offset = <0xc>;
+ csr-mask = <0x2>;
+ enable-offset = <0x10>;
+ enable-mask = <0x2>;
+ clock-output-names = "rtcclk";
+ };
+ };
+
+ serial0: serial@1c020000 {
+ device_type = "serial";
+ compatible = "ns16550";
+ reg = <0 0x1c020000 0x0 0x1000>;
+ reg-shift = <2>;
+ clock-frequency = <10000000>; /* Updated by bootloader */
+ interrupt-parent = <&gic>;
+ interrupts = <0x0 0x4c 0x4>;
+ };
+
+ phy1: phy@1f21a000 {
+ compatible = "apm,xgene-phy";
+ reg = <0x0 0x1f21a000 0x0 0x100>;
+ #phy-cells = <1>;
+ clocks = <&sataphy1clk 0>;
+ status = "disabled";
+ apm,tx-boost-gain = <30 30 30 30 30 30>;
+ apm,tx-eye-tuning = <2 10 10 2 10 10>;
+ };
+
+ phy2: phy@1f22a000 {
+ compatible = "apm,xgene-phy";
+ reg = <0x0 0x1f22a000 0x0 0x100>;
+ #phy-cells = <1>;
+ clocks = <&sataphy2clk 0>;
+ status = "ok";
+ apm,tx-boost-gain = <30 30 30 30 30 30>;
+ apm,tx-eye-tuning = <1 10 10 2 10 10>;
+ };
+
+ phy3: phy@1f23a000 {
+ compatible = "apm,xgene-phy";
+ reg = <0x0 0x1f23a000 0x0 0x100>;
+ #phy-cells = <1>;
+ clocks = <&sataphy3clk 0>;
+ status = "ok";
+ apm,tx-boost-gain = <31 31 31 31 31 31>;
+ apm,tx-eye-tuning = <2 10 10 2 10 10>;
+ };
+
+ sata1: sata@1a000000 {
+ compatible = "apm,xgene-ahci";
+ reg = <0x0 0x1a000000 0x0 0x1000>,
+ <0x0 0x1f210000 0x0 0x1000>,
+ <0x0 0x1f21d000 0x0 0x1000>,
+ <0x0 0x1f21e000 0x0 0x1000>,
+ <0x0 0x1f217000 0x0 0x1000>;
+ interrupts = <0x0 0x86 0x4>;
+ status = "disabled";
+ clocks = <&sata01clk 0>;
+ phys = <&phy1 0>;
+ phy-names = "sata-phy";
+ };
+
+ sata2: sata@1a400000 {
+ compatible = "apm,xgene-ahci";
+ reg = <0x0 0x1a400000 0x0 0x1000>,
+ <0x0 0x1f220000 0x0 0x1000>,
+ <0x0 0x1f22d000 0x0 0x1000>,
+ <0x0 0x1f22e000 0x0 0x1000>,
+ <0x0 0x1f227000 0x0 0x1000>;
+ interrupts = <0x0 0x87 0x4>;
+ status = "ok";
+ clocks = <&sata23clk 0>;
+ phys = <&phy2 0>;
+ phy-names = "sata-phy";
+ };
+
+ sata3: sata@1a800000 {
+ compatible = "apm,xgene-ahci";
+ reg = <0x0 0x1a800000 0x0 0x1000>,
+ <0x0 0x1f230000 0x0 0x1000>,
+ <0x0 0x1f23d000 0x0 0x1000>,
+ <0x0 0x1f23e000 0x0 0x1000>;
+ interrupts = <0x0 0x88 0x4>;
+ status = "ok";
+ clocks = <&sata45clk 0>;
+ phys = <&phy3 0>;
+ phy-names = "sata-phy";
+ };
+
+ rtc: rtc@10510000 {
+ compatible = "apm,xgene-rtc";
+ reg = <0x0 0x10510000 0x0 0x400>;
+ interrupts = <0x0 0x46 0x4>;
+ #clock-cells = <1>;
+ clocks = <&rtcclk 0>;
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/foundation-v8.dts b/arch/arm64/boot/dts/foundation-v8.dts
index 84fcc50..519c4b2 100644
--- a/arch/arm64/boot/dts/foundation-v8.dts
+++ b/arch/arm64/boot/dts/foundation-v8.dts
@@ -6,6 +6,8 @@
/dts-v1/;
+/memreserve/ 0x80000000 0x00010000;
+
/ {
model = "Foundation-v8A";
compatible = "arm,foundation-aarch64", "arm,vexpress";
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index 8fd6e02..5768499 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -1,4 +1,3 @@
-CONFIG_EXPERIMENTAL=y
# CONFIG_LOCALVERSION_AUTO is not set
# CONFIG_SWAP is not set
CONFIG_SYSVIPC=y
@@ -19,13 +18,17 @@
CONFIG_KALLSYMS_ALL=y
# CONFIG_COMPAT_BRK is not set
CONFIG_PROFILING=y
+CONFIG_JUMP_LABEL=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_DEADLINE is not set
CONFIG_ARCH_VEXPRESS=y
+CONFIG_ARCH_XGENE=y
CONFIG_SMP=y
+CONFIG_PREEMPT=y
CONFIG_PREEMPT_VOLUNTARY=y
+CONFIG_CMA=y
CONFIG_CMDLINE="console=ttyAMA0"
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_COMPAT=y
@@ -42,29 +45,42 @@
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_DEVTMPFS=y
# CONFIG_BLK_DEV is not set
+CONFIG_DMA_CMA=y
CONFIG_SCSI=y
# CONFIG_SCSI_PROC_FS is not set
CONFIG_BLK_DEV_SD=y
# CONFIG_SCSI_LOWLEVEL is not set
+CONFIG_ATA=y
+CONFIG_PATA_PLATFORM=y
+CONFIG_PATA_OF_PLATFORM=y
CONFIG_NETDEVICES=y
-CONFIG_MII=y
CONFIG_SMC91X=y
+CONFIG_SMSC911X=y
# CONFIG_WLAN is not set
CONFIG_INPUT_EVDEV=y
# CONFIG_SERIO_I8042 is not set
# CONFIG_SERIO_SERPORT is not set
CONFIG_LEGACY_PTY_COUNT=16
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_OF_PLATFORM=y
CONFIG_SERIAL_AMBA_PL011=y
CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
# CONFIG_HW_RANDOM is not set
# CONFIG_HWMON is not set
+CONFIG_REGULATOR=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
CONFIG_FB=y
# CONFIG_VGA_CONSOLE is not set
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_LOGO=y
# CONFIG_LOGO_LINUX_MONO is not set
# CONFIG_LOGO_LINUX_VGA16 is not set
-# CONFIG_USB_SUPPORT is not set
+CONFIG_USB=y
+CONFIG_USB_ISP1760_HCD=y
+CONFIG_USB_STORAGE=y
+CONFIG_MMC=y
+CONFIG_MMC_ARMMMCI=y
# CONFIG_IOMMU_SUPPORT is not set
CONFIG_EXT2_FS=y
# CONFIG_EXT3_FS is not set
diff --git a/arch/arm64/configs/ranchu_defconfig b/arch/arm64/configs/ranchu_defconfig
new file mode 100644
index 0000000..14bd1ec
--- /dev/null
+++ b/arch/arm64/configs/ranchu_defconfig
@@ -0,0 +1,228 @@
+# CONFIG_LOCALVERSION_AUTO is not set
+# CONFIG_SWAP is not set
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_AUDIT=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_BSD_PROCESS_ACCT_V3=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_CGROUP_DEBUG=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_RESOURCE_COUNTERS=y
+CONFIG_RT_GROUP_SCHED=y
+CONFIG_SCHED_AUTOGROUP=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_KALLSYMS_ALL=y
+CONFIG_EMBEDDED=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_PROFILING=y
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_IOSCHED_DEADLINE is not set
+# CONFIG_ARCH_VEXPRESS is not set
+CONFIG_SMP=y
+CONFIG_NR_CPUS=4
+CONFIG_PREEMPT=y
+CONFIG_ARMV7_COMPAT=y
+CONFIG_CMDLINE="console=ttyAMA0"
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_COMPAT=y
+CONFIG_PM_AUTOSLEEP=y
+CONFIG_PM_WAKELOCKS=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=y
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_INET_ESP=y
+# CONFIG_INET_LRO is not set
+CONFIG_IPV6_PRIVACY=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_IPV6_OPTIMISTIC_DAD=y
+CONFIG_INET6_AH=y
+CONFIG_INET6_ESP=y
+CONFIG_INET6_IPCOMP=y
+CONFIG_IPV6_MIP6=y
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_NETFILTER=y
+CONFIG_NF_CONNTRACK=y
+CONFIG_NF_CONNTRACK_SECMARK=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CT_PROTO_DCCP=y
+CONFIG_NF_CT_PROTO_SCTP=y
+CONFIG_NF_CT_PROTO_UDPLITE=y
+CONFIG_NF_CONNTRACK_AMANDA=y
+CONFIG_NF_CONNTRACK_FTP=y
+CONFIG_NF_CONNTRACK_H323=y
+CONFIG_NF_CONNTRACK_IRC=y
+CONFIG_NF_CONNTRACK_NETBIOS_NS=y
+CONFIG_NF_CONNTRACK_PPTP=y
+CONFIG_NF_CONNTRACK_SANE=y
+CONFIG_NF_CONNTRACK_TFTP=y
+CONFIG_NF_CT_NETLINK=y
+CONFIG_NETFILTER_TPROXY=y
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
+CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y
+CONFIG_NETFILTER_XT_TARGET_MARK=y
+CONFIG_NETFILTER_XT_TARGET_NFLOG=y
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
+CONFIG_NETFILTER_XT_TARGET_TPROXY=y
+CONFIG_NETFILTER_XT_TARGET_TRACE=y
+CONFIG_NETFILTER_XT_TARGET_SECMARK=y
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
+CONFIG_NETFILTER_XT_MATCH_COMMENT=y
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_HELPER=y
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
+CONFIG_NETFILTER_XT_MATCH_LENGTH=y
+CONFIG_NETFILTER_XT_MATCH_LIMIT=y
+CONFIG_NETFILTER_XT_MATCH_MAC=y
+CONFIG_NETFILTER_XT_MATCH_MARK=y
+CONFIG_NETFILTER_XT_MATCH_POLICY=y
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
+CONFIG_NETFILTER_XT_MATCH_QTAGUID=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA2=y
+CONFIG_NETFILTER_XT_MATCH_SOCKET=y
+CONFIG_NETFILTER_XT_MATCH_STATE=y
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=y
+CONFIG_NETFILTER_XT_MATCH_STRING=y
+CONFIG_NETFILTER_XT_MATCH_TIME=y
+CONFIG_NETFILTER_XT_MATCH_U32=y
+CONFIG_NF_CONNTRACK_IPV4=y
+CONFIG_IP_NF_IPTABLES=y
+CONFIG_IP_NF_MATCH_AH=y
+CONFIG_IP_NF_MATCH_ECN=y
+CONFIG_IP_NF_MATCH_RPFILTER=y
+CONFIG_IP_NF_MATCH_TTL=y
+CONFIG_IP_NF_FILTER=y
+CONFIG_IP_NF_TARGET_REJECT=y
+CONFIG_IP_NF_TARGET_REJECT_SKERR=y
+CONFIG_IP_NF_TARGET_ULOG=y
+CONFIG_IP_NF_MANGLE=y
+CONFIG_IP_NF_TARGET_ECN=y
+CONFIG_IP_NF_TARGET_TTL=y
+CONFIG_IP_NF_RAW=y
+CONFIG_IP_NF_SECURITY=y
+CONFIG_IP_NF_ARPTABLES=y
+CONFIG_IP_NF_ARPFILTER=y
+CONFIG_IP_NF_ARP_MANGLE=y
+CONFIG_NF_CONNTRACK_IPV6=y
+CONFIG_IP6_NF_IPTABLES=y
+CONFIG_IP6_NF_MATCH_AH=y
+CONFIG_IP6_NF_MATCH_EUI64=y
+CONFIG_IP6_NF_MATCH_FRAG=y
+CONFIG_IP6_NF_MATCH_OPTS=y
+CONFIG_IP6_NF_MATCH_HL=y
+CONFIG_IP6_NF_MATCH_IPV6HEADER=y
+CONFIG_IP6_NF_MATCH_MH=y
+CONFIG_IP6_NF_MATCH_RT=y
+CONFIG_IP6_NF_TARGET_HL=y
+CONFIG_IP6_NF_FILTER=y
+CONFIG_IP6_NF_TARGET_REJECT=y
+CONFIG_IP6_NF_TARGET_REJECT_SKERR=y
+CONFIG_IP6_NF_MANGLE=y
+CONFIG_IP6_NF_RAW=y
+CONFIG_BRIDGE=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_HTB=y
+CONFIG_NET_CLS_U32=y
+CONFIG_NET_EMATCH=y
+CONFIG_NET_EMATCH_U32=y
+CONFIG_NET_CLS_ACT=y
+# CONFIG_WIRELESS is not set
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_PROC_DEVICETREE=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_VIRTIO_BLK=y
+CONFIG_SCSI=y
+# CONFIG_SCSI_PROC_FS is not set
+CONFIG_BLK_DEV_SD=y
+# CONFIG_SCSI_LOWLEVEL is not set
+CONFIG_NETDEVICES=y
+CONFIG_TUN=y
+CONFIG_VIRTIO_NET=y
+CONFIG_SMC91X=y
+CONFIG_PPP=y
+CONFIG_PPP_BSDCOMP=y
+CONFIG_PPP_DEFLATE=y
+CONFIG_PPP_MPPE=y
+CONFIG_PPPOLAC=y
+CONFIG_PPPOPNS=y
+# CONFIG_WLAN is not set
+CONFIG_INPUT_EVDEV=y
+CONFIG_KEYBOARD_GOLDFISH_EVENTS=y
+# CONFIG_SERIO_I8042 is not set
+# CONFIG_SERIO_SERPORT is not set
+CONFIG_LEGACY_PTY_COUNT=16
+# CONFIG_DEVMEM is not set
+# CONFIG_DEVKMEM is not set
+CONFIG_SERIAL_AMBA_PL011=y
+CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
+CONFIG_VIRTIO_CONSOLE=y
+# CONFIG_HW_RANDOM is not set
+CONFIG_BATTERY_GOLDFISH=y
+# CONFIG_HWMON is not set
+CONFIG_FB=y
+CONFIG_FB_GOLDFISH=y
+CONFIG_FB_SIMPLE=y
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_LOGO=y
+# CONFIG_LOGO_LINUX_MONO is not set
+# CONFIG_LOGO_LINUX_VGA16 is not set
+CONFIG_SOUND=y
+CONFIG_SND=y
+# CONFIG_USB_SUPPORT is not set
+CONFIG_SWITCH=y
+CONFIG_RTC_CLASS=y
+CONFIG_VIRTIO_MMIO=y
+CONFIG_STAGING=y
+CONFIG_ANDROID=y
+CONFIG_ASHMEM=y
+CONFIG_ANDROID_TIMED_GPIO=y
+CONFIG_ANDROID_LOW_MEMORY_KILLER=y
+CONFIG_SYNC=y
+CONFIG_SW_SYNC=y
+CONFIG_SW_SYNC_USER=y
+CONFIG_ION=y
+CONFIG_GOLDFISH=y
+CONFIG_GOLDFISH_PIPE=y
+# CONFIG_IOMMU_SUPPORT is not set
+CONFIG_ANDROID_BINDER_IPC=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_FUSE_FS=y
+CONFIG_CUSE=y
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS=y
+# CONFIG_MISC_FILESYSTEMS is not set
+CONFIG_NFS_FS=y
+CONFIG_ROOT_NFS=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_FS=y
+# CONFIG_SCHED_DEBUG is not set
+CONFIG_DEBUG_INFO=y
+# CONFIG_FTRACE is not set
+CONFIG_ATOMIC64_SELFTEST=y
+CONFIG_SECURITY=y
+CONFIG_SECURITY_NETWORK=y
+CONFIG_SECURITY_SELINUX=y
diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild
index fb6e0c4..cfe9860 100644
--- a/arch/arm64/include/asm/Kbuild
+++ b/arch/arm64/include/asm/Kbuild
@@ -10,6 +10,7 @@
generic-y += div64.h
generic-y += dma.h
generic-y += emergency-restart.h
+generic-y += early_ioremap.h
generic-y += errno.h
generic-y += ftrace.h
generic-y += hw_irq.h
@@ -26,10 +27,10 @@
generic-y += msgbuf.h
generic-y += mutex.h
generic-y += pci.h
-generic-y += percpu.h
generic-y += poll.h
generic-y += posix_types.h
generic-y += resource.h
+generic-y += rwsem.h
generic-y += scatterlist.h
generic-y += sections.h
generic-y += segment.h
diff --git a/arch/arm64/include/asm/arch_timer.h b/arch/arm64/include/asm/arch_timer.h
index bf6ab242..cb2be3b 100644
--- a/arch/arm64/include/asm/arch_timer.h
+++ b/arch/arm64/include/asm/arch_timer.h
@@ -97,27 +97,47 @@
return val;
}
-static inline void __cpuinit arch_counter_set_user_access(void)
+static inline u32 arch_timer_get_cntkctl(void)
{
u32 cntkctl;
-
- /* Disable user access to the timers and the physical counter. */
asm volatile("mrs %0, cntkctl_el1" : "=r" (cntkctl));
- cntkctl &= ~((3 << 8) | (1 << 0));
+ return cntkctl;
+}
- /* Enable user access to the virtual counter and frequency. */
- cntkctl |= (1 << 1);
+static inline void arch_timer_set_cntkctl(u32 cntkctl)
+{
asm volatile("msr cntkctl_el1, %0" : : "r" (cntkctl));
}
-static inline u64 arch_counter_get_cntpct(void)
+static inline void __cpuinit arch_counter_set_user_access(void)
{
- u64 cval;
+ u32 cntkctl = arch_timer_get_cntkctl();
- isb();
- asm volatile("mrs %0, cntpct_el0" : "=r" (cval));
+ /* Disable user access to the timers and the physical counter */
+ /* Also disable virtual event stream */
+ cntkctl &= ~(ARCH_TIMER_USR_PT_ACCESS_EN
+ | ARCH_TIMER_USR_VT_ACCESS_EN
+ | ARCH_TIMER_VIRT_EVT_EN
+ | ARCH_TIMER_USR_PCT_ACCESS_EN);
- return cval;
+ /* Enable user access to the virtual counter */
+ cntkctl |= ARCH_TIMER_USR_VCT_ACCESS_EN;
+
+ arch_timer_set_cntkctl(cntkctl);
+}
+
+static inline void arch_timer_evtstrm_enable(int divider)
+{
+ u32 cntkctl = arch_timer_get_cntkctl();
+ cntkctl &= ~ARCH_TIMER_EVT_TRIGGER_MASK;
+ /* Set the divider and enable virtual event stream */
+ cntkctl |= (divider << ARCH_TIMER_EVT_TRIGGER_SHIFT)
+ | ARCH_TIMER_VIRT_EVT_EN;
+ arch_timer_set_cntkctl(cntkctl);
+ elf_hwcap |= HWCAP_EVTSTRM;
+#ifdef CONFIG_COMPAT
+ compat_elf_hwcap |= COMPAT_HWCAP_EVTSTRM;
+#endif
}
static inline u64 arch_counter_get_cntvct(void)
diff --git a/arch/arm64/include/asm/atomic.h b/arch/arm64/include/asm/atomic.h
index 8363644..736c591 100644
--- a/arch/arm64/include/asm/atomic.h
+++ b/arch/arm64/include/asm/atomic.h
@@ -54,8 +54,7 @@
" stxr %w1, %w0, %2\n"
" cbnz %w1, 1b"
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
- : "Ir" (i)
- : "cc");
+ : "Ir" (i));
}
static inline int atomic_add_return(int i, atomic_t *v)
@@ -70,7 +69,7 @@
" cbnz %w1, 1b"
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
: "Ir" (i)
- : "cc", "memory");
+ : "memory");
return result;
}
@@ -86,8 +85,7 @@
" stxr %w1, %w0, %2\n"
" cbnz %w1, 1b"
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
- : "Ir" (i)
- : "cc");
+ : "Ir" (i));
}
static inline int atomic_sub_return(int i, atomic_t *v)
@@ -102,7 +100,7 @@
" cbnz %w1, 1b"
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
: "Ir" (i)
- : "cc", "memory");
+ : "memory");
return result;
}
@@ -121,7 +119,7 @@
"2:"
: "=&r" (tmp), "=&r" (oldval), "+Q" (ptr->counter)
: "Ir" (old), "r" (new)
- : "cc", "memory");
+ : "cc");
return oldval;
}
@@ -173,7 +171,7 @@
*/
#define ATOMIC64_INIT(i) { (i) }
-#define atomic64_read(v) (*(volatile long long *)&(v)->counter)
+#define atomic64_read(v) (*(volatile long *)&(v)->counter)
#define atomic64_set(v,i) (((v)->counter) = (i))
static inline void atomic64_add(u64 i, atomic64_t *v)
@@ -187,8 +185,7 @@
" stxr %w1, %0, %2\n"
" cbnz %w1, 1b"
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
- : "Ir" (i)
- : "cc");
+ : "Ir" (i));
}
static inline long atomic64_add_return(long i, atomic64_t *v)
@@ -203,7 +200,7 @@
" cbnz %w1, 1b"
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
: "Ir" (i)
- : "cc", "memory");
+ : "memory");
return result;
}
@@ -219,8 +216,7 @@
" stxr %w1, %0, %2\n"
" cbnz %w1, 1b"
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
- : "Ir" (i)
- : "cc");
+ : "Ir" (i));
}
static inline long atomic64_sub_return(long i, atomic64_t *v)
@@ -235,7 +231,7 @@
" cbnz %w1, 1b"
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
: "Ir" (i)
- : "cc", "memory");
+ : "memory");
return result;
}
@@ -254,7 +250,7 @@
"2:"
: "=&r" (res), "=&r" (oldval), "+Q" (ptr->counter)
: "Ir" (old), "r" (new)
- : "cc", "memory");
+ : "cc");
return oldval;
}
diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
index 78e20ba..5d69edd 100644
--- a/arch/arm64/include/asm/barrier.h
+++ b/arch/arm64/include/asm/barrier.h
@@ -25,9 +25,10 @@
#define wfi() asm volatile("wfi" : : : "memory")
#define isb() asm volatile("isb" : : : "memory")
-#define dsb() asm volatile("dsb sy" : : : "memory")
+#define dmb(opt) asm volatile("dmb sy" : : : "memory")
+#define dsb(opt) asm volatile("dsb sy" : : : "memory")
-#define mb() dsb()
+#define mb() dsb(sy)
#define rmb() asm volatile("dsb ld" : : : "memory")
#define wmb() asm volatile("dsb st" : : : "memory")
diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h
index 3300cbd..a5176cf 100644
--- a/arch/arm64/include/asm/cacheflush.h
+++ b/arch/arm64/include/asm/cacheflush.h
@@ -85,6 +85,13 @@
}
/*
+ * Cache maintenance functions used by the DMA API. No to be used directly.
+ */
+extern void __dma_map_area(const void *, size_t, int);
+extern void __dma_unmap_area(const void *, size_t, int);
+extern void __dma_flush_range(const void *, const void *);
+
+/*
* Copy user data from/to a page which is mapped into a different
* processes address space. Really, we want to allow our "user
* space" model to handle this.
@@ -116,6 +123,7 @@
static inline void __flush_icache_all(void)
{
asm("ic ialluis");
+ dsb(ish);
}
#define flush_dcache_mmap_lock(mapping) \
@@ -123,9 +131,6 @@
#define flush_dcache_mmap_unlock(mapping) \
spin_unlock_irq(&(mapping)->tree_lock)
-#define flush_icache_user_range(vma,page,addr,len) \
- flush_dcache_page(page)
-
/*
* We don't appear to need to do anything here. In fact, if we did, we'd
* duplicate cache flushing elsewhere performed by flush_dcache_page().
@@ -145,7 +150,7 @@
* set_pte_at() called from vmap_pte_range() does not
* have a DSB after cleaning the cache line.
*/
- dsb();
+ dsb(ish);
}
static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h
index a8b44ca..73eb073 100644
--- a/arch/arm64/include/asm/cmpxchg.h
+++ b/arch/arm64/include/asm/cmpxchg.h
@@ -34,7 +34,7 @@
" cbnz %w1, 1b\n"
: "=&r" (ret), "=&r" (tmp), "+Q" (*(u8 *)ptr)
: "r" (x)
- : "cc", "memory");
+ : "memory");
break;
case 2:
asm volatile("// __xchg2\n"
@@ -43,7 +43,7 @@
" cbnz %w1, 1b\n"
: "=&r" (ret), "=&r" (tmp), "+Q" (*(u16 *)ptr)
: "r" (x)
- : "cc", "memory");
+ : "memory");
break;
case 4:
asm volatile("// __xchg4\n"
@@ -52,7 +52,7 @@
" cbnz %w1, 1b\n"
: "=&r" (ret), "=&r" (tmp), "+Q" (*(u32 *)ptr)
: "r" (x)
- : "cc", "memory");
+ : "memory");
break;
case 8:
asm volatile("// __xchg8\n"
@@ -61,7 +61,7 @@
" cbnz %w1, 1b\n"
: "=&r" (ret), "=&r" (tmp), "+Q" (*(u64 *)ptr)
: "r" (x)
- : "cc", "memory");
+ : "memory");
break;
default:
BUILD_BUG();
@@ -71,7 +71,12 @@
}
#define xchg(ptr,x) \
- ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
+({ \
+ __typeof__(*(ptr)) __ret; \
+ __ret = (__typeof__(*(ptr))) \
+ __xchg((unsigned long)(x), (ptr), sizeof(*(ptr))); \
+ __ret; \
+})
static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
unsigned long new, int size)
@@ -179,4 +184,6 @@
#define cmpxchg64(ptr,o,n) cmpxchg((ptr),(o),(n))
#define cmpxchg64_local(ptr,o,n) cmpxchg_local((ptr),(o),(n))
+#define cmpxchg64_relaxed(ptr,o,n) cmpxchg_local((ptr),(o),(n))
+
#endif /* __ASM_CMPXCHG_H */
diff --git a/arch/arm64/include/asm/cpu_ops.h b/arch/arm64/include/asm/cpu_ops.h
new file mode 100644
index 0000000..15241307
--- /dev/null
+++ b/arch/arm64/include/asm/cpu_ops.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2013 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef __ASM_CPU_OPS_H
+#define __ASM_CPU_OPS_H
+
+#include <linux/init.h>
+#include <linux/threads.h>
+
+struct device_node;
+
+/**
+ * struct cpu_operations - Callback operations for hotplugging CPUs.
+ *
+ * @name: Name of the property as appears in a devicetree cpu node's
+ * enable-method property.
+ * @cpu_init: Reads any data necessary for a specific enable-method from the
+ * devicetree, for a given cpu node and proposed logical id.
+ * @cpu_prepare: Early one-time preparation step for a cpu. If there is a
+ * mechanism for doing so, tests whether it is possible to boot
+ * the given CPU.
+ * @cpu_boot: Boots a cpu into the kernel.
+ * @cpu_postboot: Optionally, perform any post-boot cleanup or necesary
+ * synchronisation. Called from the cpu being booted.
+ * @cpu_disable: Prepares a cpu to die. May fail for some mechanism-specific
+ * reason, which will cause the hot unplug to be aborted. Called
+ * from the cpu to be killed.
+ * @cpu_die: Makes a cpu leave the kernel. Must not fail. Called from the
+ * cpu being killed.
+ * @cpu_suspend: Suspends a cpu and saves the required context. May fail owing
+ * to wrong parameters or error conditions. Called from the
+ * CPU being suspended. Must be called with IRQs disabled.
+ */
+struct cpu_operations {
+ const char *name;
+ int (*cpu_init)(struct device_node *, unsigned int);
+ int (*cpu_prepare)(unsigned int);
+ int (*cpu_boot)(unsigned int);
+ void (*cpu_postboot)(void);
+#ifdef CONFIG_HOTPLUG_CPU
+ int (*cpu_disable)(unsigned int cpu);
+ void (*cpu_die)(unsigned int cpu);
+#endif
+#ifdef CONFIG_ARM64_CPU_SUSPEND
+ int (*cpu_suspend)(unsigned long);
+#endif
+};
+
+extern const struct cpu_operations *cpu_ops[NR_CPUS];
+extern int __init cpu_read_ops(struct device_node *dn, int cpu);
+extern void __init cpu_read_bootcpu_ops(void);
+
+#endif /* ifndef __ASM_CPU_OPS_H */
diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
index 261db27..c404fb0 100644
--- a/arch/arm64/include/asm/cputype.h
+++ b/arch/arm64/include/asm/cputype.h
@@ -20,6 +20,16 @@
#define MPIDR_HWID_BITMASK 0xff00ffffff
+#define MPIDR_LEVEL_BITS_SHIFT 3
+#define MPIDR_LEVEL_BITS (1 << MPIDR_LEVEL_BITS_SHIFT)
+#define MPIDR_LEVEL_MASK ((1 << MPIDR_LEVEL_BITS) - 1)
+
+#define MPIDR_LEVEL_SHIFT(level) \
+ (((1 << level) >> 1) << MPIDR_LEVEL_BITS_SHIFT)
+
+#define MPIDR_AFFINITY_LEVEL(mpidr, level) \
+ ((mpidr >> MPIDR_LEVEL_SHIFT(level)) & MPIDR_LEVEL_MASK)
+
#define read_cpuid(reg) ({ \
u64 __val; \
asm("mrs %0, " #reg : "=r" (__val)); \
@@ -27,11 +37,14 @@
})
#define ARM_CPU_IMP_ARM 0x41
+#define ARM_CPU_IMP_APM 0x50
#define ARM_CPU_PART_AEM_V8 0xD0F0
#define ARM_CPU_PART_FOUNDATION 0xD000
#define ARM_CPU_PART_CORTEX_A57 0xD070
+#define APM_CPU_PART_POTENZA 0x0000
+
#ifndef __ASSEMBLY__
/*
diff --git a/arch/arm64/include/asm/debug-monitors.h b/arch/arm64/include/asm/debug-monitors.h
index ef8235c..7c951a5 100644
--- a/arch/arm64/include/asm/debug-monitors.h
+++ b/arch/arm64/include/asm/debug-monitors.h
@@ -26,6 +26,53 @@
#define DBG_ESR_EVT_HWWP 0x2
#define DBG_ESR_EVT_BRK 0x6
+/*
+ * Break point instruction encoding
+ */
+#define BREAK_INSTR_SIZE 4
+
+/*
+ * ESR values expected for dynamic and compile time BRK instruction
+ */
+#define DBG_ESR_VAL_BRK(x) (0xf2000000 | ((x) & 0xfffff))
+
+/*
+ * #imm16 values used for BRK instruction generation
+ * Allowed values for kgbd are 0x400 - 0x7ff
+ * 0x400: for dynamic BRK instruction
+ * 0x401: for compile time BRK instruction
+ */
+#define KGDB_DYN_DGB_BRK_IMM 0x400
+#define KDBG_COMPILED_DBG_BRK_IMM 0x401
+
+/*
+ * BRK instruction encoding
+ * The #imm16 value should be placed at bits[20:5] within BRK ins
+ */
+#define AARCH64_BREAK_MON 0xd4200000
+
+/*
+ * Extract byte from BRK instruction
+ */
+#define KGDB_DYN_DGB_BRK_INS_BYTE(x) \
+ ((((AARCH64_BREAK_MON) & 0xffe0001f) >> (x * 8)) & 0xff)
+
+/*
+ * Extract byte from BRK #imm16
+ */
+#define KGBD_DYN_DGB_BRK_IMM_BYTE(x) \
+ (((((KGDB_DYN_DGB_BRK_IMM) & 0xffff) << 5) >> (x * 8)) & 0xff)
+
+#define KGDB_DYN_DGB_BRK_BYTE(x) \
+ (KGDB_DYN_DGB_BRK_INS_BYTE(x) | KGBD_DYN_DGB_BRK_IMM_BYTE(x))
+
+#define KGDB_DYN_BRK_INS_BYTE0 KGDB_DYN_DGB_BRK_BYTE(0)
+#define KGDB_DYN_BRK_INS_BYTE1 KGDB_DYN_DGB_BRK_BYTE(1)
+#define KGDB_DYN_BRK_INS_BYTE2 KGDB_DYN_DGB_BRK_BYTE(2)
+#define KGDB_DYN_BRK_INS_BYTE3 KGDB_DYN_DGB_BRK_BYTE(3)
+
+#define CACHE_FLUSH_IS_SAFE 1
+
enum debug_el {
DBG_ACTIVE_EL0 = 0,
DBG_ACTIVE_EL1,
@@ -43,25 +90,29 @@
#ifndef __ASSEMBLY__
struct task_struct;
-#define local_dbg_save(flags) \
- do { \
- typecheck(unsigned long, flags); \
- asm volatile( \
- "mrs %0, daif // local_dbg_save\n" \
- "msr daifset, #8" \
- : "=r" (flags) : : "memory"); \
- } while (0)
-
-#define local_dbg_restore(flags) \
- do { \
- typecheck(unsigned long, flags); \
- asm volatile( \
- "msr daif, %0 // local_dbg_restore\n" \
- : : "r" (flags) : "memory"); \
- } while (0)
-
#define DBG_ARCH_ID_RESERVED 0 /* In case of ptrace ABI updates. */
+#define DBG_HOOK_HANDLED 0
+#define DBG_HOOK_ERROR 1
+
+struct step_hook {
+ struct list_head node;
+ int (*fn)(struct pt_regs *regs, unsigned int esr);
+};
+
+void register_step_hook(struct step_hook *hook);
+void unregister_step_hook(struct step_hook *hook);
+
+struct break_hook {
+ struct list_head node;
+ u32 esr_val;
+ u32 esr_mask;
+ int (*fn)(struct pt_regs *regs, unsigned int esr);
+};
+
+void register_break_hook(struct break_hook *hook);
+void unregister_break_hook(struct break_hook *hook);
+
u8 debug_monitors_arch(void);
void enable_debug_monitors(enum debug_el el);
diff --git a/arch/arm64/include/asm/device.h b/arch/arm64/include/asm/device.h
index 0d8453c..cf98b36 100644
--- a/arch/arm64/include/asm/device.h
+++ b/arch/arm64/include/asm/device.h
@@ -18,6 +18,9 @@
struct dev_archdata {
struct dma_map_ops *dma_ops;
+#ifdef CONFIG_IOMMU_API
+ void *iommu; /* private IOMMU data */
+#endif
};
struct pdev_archdata {
diff --git a/arch/arm64/include/asm/dma-contiguous.h b/arch/arm64/include/asm/dma-contiguous.h
new file mode 100644
index 0000000..14c4c0c
--- /dev/null
+++ b/arch/arm64/include/asm/dma-contiguous.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _ASM_DMA_CONTIGUOUS_H
+#define _ASM_DMA_CONTIGUOUS_H
+
+#ifdef __KERNEL__
+#ifdef CONFIG_DMA_CMA
+
+#include <linux/types.h>
+
+static inline void
+dma_contiguous_early_fixup(phys_addr_t base, unsigned long size) { }
+
+#endif
+#endif
+
+#endif
diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h
index 9947768..00a41aa 100644
--- a/arch/arm64/include/asm/dma-mapping.h
+++ b/arch/arm64/include/asm/dma-mapping.h
@@ -25,7 +25,10 @@
#define ARCH_HAS_DMA_GET_REQUIRED_MASK
+#define DMA_ERROR_CODE (~(dma_addr_t)0)
extern struct dma_map_ops *dma_ops;
+extern struct dma_map_ops coherent_swiotlb_dma_ops;
+extern struct dma_map_ops noncoherent_swiotlb_dma_ops;
static inline struct dma_map_ops *get_dma_ops(struct device *dev)
{
@@ -35,6 +38,11 @@
return dev->archdata.dma_ops;
}
+static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
+{
+ dev->archdata.dma_ops = ops;
+}
+
#include <asm-generic/dma-mapping-common.h>
static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
@@ -81,8 +89,12 @@
{
}
-static inline void *dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flags)
+#define dma_alloc_coherent(d, s, h, f) dma_alloc_attrs(d, s, h, f, NULL)
+#define dma_free_coherent(d, s, h, f) dma_free_attrs(d, s, h, f, NULL)
+
+static inline void *dma_alloc_attrs(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flags,
+ struct dma_attrs *attrs)
{
struct dma_map_ops *ops = get_dma_ops(dev);
void *vaddr;
@@ -90,13 +102,14 @@
if (dma_alloc_from_coherent(dev, size, dma_handle, &vaddr))
return vaddr;
- vaddr = ops->alloc(dev, size, dma_handle, flags, NULL);
+ vaddr = ops->alloc(dev, size, dma_handle, flags, attrs);
debug_dma_alloc_coherent(dev, size, *dma_handle, vaddr);
return vaddr;
}
-static inline void dma_free_coherent(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dev_addr)
+static inline void dma_free_attrs(struct device *dev, size_t size,
+ void *vaddr, dma_addr_t dev_addr,
+ struct dma_attrs *attrs)
{
struct dma_map_ops *ops = get_dma_ops(dev);
@@ -104,7 +117,7 @@
return;
debug_dma_free_coherent(dev, size, vaddr, dev_addr);
- ops->free(dev, size, vaddr, dev_addr, NULL);
+ ops->free(dev, size, vaddr, dev_addr, attrs);
}
/*
diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h
index 7883412..c4a7f94 100644
--- a/arch/arm64/include/asm/esr.h
+++ b/arch/arm64/include/asm/esr.h
@@ -42,7 +42,7 @@
#define ESR_EL1_EC_SP_ALIGN (0x26)
#define ESR_EL1_EC_FP_EXC32 (0x28)
#define ESR_EL1_EC_FP_EXC64 (0x2C)
-#define ESR_EL1_EC_SERRROR (0x2F)
+#define ESR_EL1_EC_SERROR (0x2F)
#define ESR_EL1_EC_BREAKPT_EL0 (0x30)
#define ESR_EL1_EC_BREAKPT_EL1 (0x31)
#define ESR_EL1_EC_SOFTSTP_EL0 (0x32)
diff --git a/arch/arm64/include/asm/fixmap.h b/arch/arm64/include/asm/fixmap.h
new file mode 100644
index 0000000..5f7bfe6
--- /dev/null
+++ b/arch/arm64/include/asm/fixmap.h
@@ -0,0 +1,67 @@
+/*
+ * fixmap.h: compile-time virtual memory allocation
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1998 Ingo Molnar
+ * Copyright (C) 2013 Mark Salter <msalter@redhat.com>
+ *
+ * Adapted from arch/x86_64 version.
+ *
+ */
+
+#ifndef _ASM_ARM64_FIXMAP_H
+#define _ASM_ARM64_FIXMAP_H
+
+#ifndef __ASSEMBLY__
+#include <linux/kernel.h>
+#include <asm/page.h>
+
+/*
+ * Here we define all the compile-time 'special' virtual
+ * addresses. The point is to have a constant address at
+ * compile time, but to set the physical address only
+ * in the boot process.
+ *
+ * These 'compile-time allocated' memory buffers are
+ * page-sized. Use set_fixmap(idx,phys) to associate
+ * physical memory with fixmap indices.
+ *
+ */
+enum fixed_addresses {
+ FIX_EARLYCON_MEM_BASE,
+ __end_of_permanent_fixed_addresses,
+
+ /*
+ * Temporary boot-time mappings, used by early_ioremap(),
+ * before ioremap() is functional.
+ */
+#ifdef CONFIG_ARM64_64K_PAGES
+#define NR_FIX_BTMAPS 4
+#else
+#define NR_FIX_BTMAPS 64
+#endif
+#define FIX_BTMAPS_SLOTS 7
+#define TOTAL_FIX_BTMAPS (NR_FIX_BTMAPS * FIX_BTMAPS_SLOTS)
+
+ FIX_BTMAP_END = __end_of_permanent_fixed_addresses,
+ FIX_BTMAP_BEGIN = FIX_BTMAP_END + TOTAL_FIX_BTMAPS - 1,
+ __end_of_fixed_addresses
+};
+
+#define FIXADDR_SIZE (__end_of_permanent_fixed_addresses << PAGE_SHIFT)
+#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
+
+#define FIXMAP_PAGE_IO __pgprot(PROT_DEVICE_nGnRE)
+
+extern void __early_set_fixmap(enum fixed_addresses idx,
+ phys_addr_t phys, pgprot_t flags);
+
+#define __set_fixmap __early_set_fixmap
+
+#include <asm-generic/fixmap.h>
+
+#endif /* !__ASSEMBLY__ */
+#endif /* _ASM_ARM64_FIXMAP_H */
diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h
index c582fa3..6230bab 100644
--- a/arch/arm64/include/asm/futex.h
+++ b/arch/arm64/include/asm/futex.h
@@ -30,6 +30,7 @@
" cbnz %w3, 1b\n" \
"3:\n" \
" .pushsection .fixup,\"ax\"\n" \
+" .align 2\n" \
"4: mov %w0, %w5\n" \
" b 3b\n" \
" .popsection\n" \
@@ -39,7 +40,7 @@
" .popsection\n" \
: "=&r" (ret), "=&r" (oldval), "+Q" (*uaddr), "=&r" (tmp) \
: "r" (oparg), "Ir" (-EFAULT) \
- : "cc", "memory")
+ : "memory")
static inline int
futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
@@ -126,7 +127,7 @@
" .popsection\n"
: "+r" (ret), "=&r" (val), "+Q" (*uaddr), "=&r" (tmp)
: "r" (oldval), "r" (newval), "Ir" (-EFAULT)
- : "cc", "memory");
+ : "memory");
*uval = val;
return ret;
diff --git a/arch/arm64/include/asm/hardirq.h b/arch/arm64/include/asm/hardirq.h
index 990c051..ae4801d 100644
--- a/arch/arm64/include/asm/hardirq.h
+++ b/arch/arm64/include/asm/hardirq.h
@@ -20,7 +20,7 @@
#include <linux/threads.h>
#include <asm/irq.h>
-#define NR_IPI 4
+#define NR_IPI 5
typedef struct {
unsigned int __softirq_pending;
diff --git a/arch/arm64/include/asm/hugetlb.h b/arch/arm64/include/asm/hugetlb.h
new file mode 100644
index 0000000..5b7ca8a
--- /dev/null
+++ b/arch/arm64/include/asm/hugetlb.h
@@ -0,0 +1,117 @@
+/*
+ * arch/arm64/include/asm/hugetlb.h
+ *
+ * Copyright (C) 2013 Linaro Ltd.
+ *
+ * Based on arch/x86/include/asm/hugetlb.h
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef __ASM_HUGETLB_H
+#define __ASM_HUGETLB_H
+
+#include <asm-generic/hugetlb.h>
+#include <asm/page.h>
+
+static inline pte_t huge_ptep_get(pte_t *ptep)
+{
+ return *ptep;
+}
+
+static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pte)
+{
+ set_pte_at(mm, addr, ptep, pte);
+}
+
+static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep)
+{
+ ptep_clear_flush(vma, addr, ptep);
+}
+
+static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep)
+{
+ ptep_set_wrprotect(mm, addr, ptep);
+}
+
+static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep)
+{
+ return ptep_get_and_clear(mm, addr, ptep);
+}
+
+static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep,
+ pte_t pte, int dirty)
+{
+ return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
+}
+
+static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
+ unsigned long addr, unsigned long end,
+ unsigned long floor,
+ unsigned long ceiling)
+{
+ free_pgd_range(tlb, addr, end, floor, ceiling);
+}
+
+static inline int is_hugepage_only_range(struct mm_struct *mm,
+ unsigned long addr, unsigned long len)
+{
+ return 0;
+}
+
+static inline int prepare_hugepage_range(struct file *file,
+ unsigned long addr, unsigned long len)
+{
+ struct hstate *h = hstate_file(file);
+ if (len & ~huge_page_mask(h))
+ return -EINVAL;
+ if (addr & ~huge_page_mask(h))
+ return -EINVAL;
+ return 0;
+}
+
+static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm)
+{
+}
+
+static inline int huge_pte_none(pte_t pte)
+{
+ return pte_none(pte);
+}
+
+static inline pte_t huge_pte_wrprotect(pte_t pte)
+{
+ return pte_wrprotect(pte);
+}
+
+static inline int arch_prepare_hugepage(struct page *page)
+{
+ return 0;
+}
+
+static inline void arch_release_hugepage(struct page *page)
+{
+}
+
+static inline void arch_clear_hugepage_flags(struct page *page)
+{
+ clear_bit(PG_dcache_clean, &page->flags);
+}
+
+#endif /* __ASM_HUGETLB_H */
diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h
new file mode 100644
index 0000000..c44ad39
--- /dev/null
+++ b/arch/arm64/include/asm/insn.h
@@ -0,0 +1,108 @@
+/*
+ * Copyright (C) 2013 Huawei Ltd.
+ * Author: Jiang Liu <liuj97@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef __ASM_INSN_H
+#define __ASM_INSN_H
+#include <linux/types.h>
+
+/* A64 instructions are always 32 bits. */
+#define AARCH64_INSN_SIZE 4
+
+/*
+ * ARM Architecture Reference Manual for ARMv8 Profile-A, Issue A.a
+ * Section C3.1 "A64 instruction index by encoding":
+ * AArch64 main encoding table
+ * Bit position
+ * 28 27 26 25 Encoding Group
+ * 0 0 - - Unallocated
+ * 1 0 0 - Data processing, immediate
+ * 1 0 1 - Branch, exception generation and system instructions
+ * - 1 - 0 Loads and stores
+ * - 1 0 1 Data processing - register
+ * 0 1 1 1 Data processing - SIMD and floating point
+ * 1 1 1 1 Data processing - SIMD and floating point
+ * "-" means "don't care"
+ */
+enum aarch64_insn_encoding_class {
+ AARCH64_INSN_CLS_UNKNOWN, /* UNALLOCATED */
+ AARCH64_INSN_CLS_DP_IMM, /* Data processing - immediate */
+ AARCH64_INSN_CLS_DP_REG, /* Data processing - register */
+ AARCH64_INSN_CLS_DP_FPSIMD, /* Data processing - SIMD and FP */
+ AARCH64_INSN_CLS_LDST, /* Loads and stores */
+ AARCH64_INSN_CLS_BR_SYS, /* Branch, exception generation and
+ * system instructions */
+};
+
+enum aarch64_insn_hint_op {
+ AARCH64_INSN_HINT_NOP = 0x0 << 5,
+ AARCH64_INSN_HINT_YIELD = 0x1 << 5,
+ AARCH64_INSN_HINT_WFE = 0x2 << 5,
+ AARCH64_INSN_HINT_WFI = 0x3 << 5,
+ AARCH64_INSN_HINT_SEV = 0x4 << 5,
+ AARCH64_INSN_HINT_SEVL = 0x5 << 5,
+};
+
+enum aarch64_insn_imm_type {
+ AARCH64_INSN_IMM_ADR,
+ AARCH64_INSN_IMM_26,
+ AARCH64_INSN_IMM_19,
+ AARCH64_INSN_IMM_16,
+ AARCH64_INSN_IMM_14,
+ AARCH64_INSN_IMM_12,
+ AARCH64_INSN_IMM_9,
+ AARCH64_INSN_IMM_MAX
+};
+
+enum aarch64_insn_branch_type {
+ AARCH64_INSN_BRANCH_NOLINK,
+ AARCH64_INSN_BRANCH_LINK,
+};
+
+#define __AARCH64_INSN_FUNCS(abbr, mask, val) \
+static __always_inline bool aarch64_insn_is_##abbr(u32 code) \
+{ return (code & (mask)) == (val); } \
+static __always_inline u32 aarch64_insn_get_##abbr##_value(void) \
+{ return (val); }
+
+__AARCH64_INSN_FUNCS(b, 0xFC000000, 0x14000000)
+__AARCH64_INSN_FUNCS(bl, 0xFC000000, 0x94000000)
+__AARCH64_INSN_FUNCS(svc, 0xFFE0001F, 0xD4000001)
+__AARCH64_INSN_FUNCS(hvc, 0xFFE0001F, 0xD4000002)
+__AARCH64_INSN_FUNCS(smc, 0xFFE0001F, 0xD4000003)
+__AARCH64_INSN_FUNCS(brk, 0xFFE0001F, 0xD4200000)
+__AARCH64_INSN_FUNCS(hint, 0xFFFFF01F, 0xD503201F)
+
+#undef __AARCH64_INSN_FUNCS
+
+bool aarch64_insn_is_nop(u32 insn);
+
+int aarch64_insn_read(void *addr, u32 *insnp);
+int aarch64_insn_write(void *addr, u32 insn);
+enum aarch64_insn_encoding_class aarch64_get_insn_class(u32 insn);
+u32 aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type,
+ u32 insn, u64 imm);
+u32 aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr,
+ enum aarch64_insn_branch_type type);
+u32 aarch64_insn_gen_hint(enum aarch64_insn_hint_op op);
+u32 aarch64_insn_gen_nop(void);
+
+bool aarch64_insn_hotpatch_safe(u32 old_insn, u32 new_insn);
+
+int aarch64_insn_patch_text_nosync(void *addr, u32 insn);
+int aarch64_insn_patch_text_sync(void *addrs[], u32 insns[], int cnt);
+int aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt);
+
+#endif /* __ASM_INSN_H */
diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h
index 2e12258..e1018b7 100644
--- a/arch/arm64/include/asm/io.h
+++ b/arch/arm64/include/asm/io.h
@@ -26,6 +26,7 @@
#include <asm/byteorder.h>
#include <asm/barrier.h>
#include <asm/pgtable.h>
+#include <asm/early_ioremap.h>
/*
* Generic IO read/write. These perform native-endian accesses.
@@ -118,7 +119,7 @@
* I/O port access primitives.
*/
#define IO_SPACE_LIMIT 0xffff
-#define PCI_IOBASE ((void __iomem *)(MODULES_VADDR - SZ_2M))
+#define PCI_IOBASE ((void __iomem *)(MODULES_VADDR - SZ_32M))
static inline u8 inb(unsigned long addr)
{
@@ -225,18 +226,11 @@
extern void __iomem *__ioremap(phys_addr_t phys_addr, size_t size, pgprot_t prot);
extern void __iounmap(volatile void __iomem *addr);
-#define PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_DIRTY)
-#define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_DEVICE_nGnRE))
-#define PROT_NORMAL_NC (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL_NC))
-
#define ioremap(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE))
#define ioremap_nocache(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE))
#define ioremap_wc(addr, size) __ioremap((addr), (size), __pgprot(PROT_NORMAL_NC))
#define iounmap __iounmap
-#define PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF)
-#define PROT_SECT_DEVICE_nGnRE (PROT_SECT_DEFAULT | PTE_PXN | PTE_UXN | PMD_ATTRINDX(MT_DEVICE_nGnRE))
-
#define ARCH_HAS_IOREMAP_WC
#include <asm-generic/iomap.h>
diff --git a/arch/arm64/include/asm/irq.h b/arch/arm64/include/asm/irq.h
index 0332fc0..e1f7ecd 100644
--- a/arch/arm64/include/asm/irq.h
+++ b/arch/arm64/include/asm/irq.h
@@ -4,6 +4,7 @@
#include <asm-generic/irq.h>
extern void (*handle_arch_irq)(struct pt_regs *);
+extern void migrate_irqs(void);
extern void set_handle_irq(void (*handle_irq)(struct pt_regs *));
#endif
diff --git a/arch/arm64/include/asm/irqflags.h b/arch/arm64/include/asm/irqflags.h
index aa11943..0ed52c6 100644
--- a/arch/arm64/include/asm/irqflags.h
+++ b/arch/arm64/include/asm/irqflags.h
@@ -87,5 +87,28 @@
return flags & PSR_I_BIT;
}
+/*
+ * save and restore debug state
+ */
+#define local_dbg_save(flags) \
+ do { \
+ typecheck(unsigned long, flags); \
+ asm volatile( \
+ "mrs %0, daif // local_dbg_save\n" \
+ "msr daifset, #8" \
+ : "=r" (flags) : : "memory"); \
+ } while (0)
+
+#define local_dbg_restore(flags) \
+ do { \
+ typecheck(unsigned long, flags); \
+ asm volatile( \
+ "msr daif, %0 // local_dbg_restore\n" \
+ : : "r" (flags) : "memory"); \
+ } while (0)
+
+#define local_dbg_enable() asm("msr daifclr, #8" : : : "memory")
+#define local_dbg_disable() asm("msr daifset, #8" : : : "memory")
+
#endif
#endif
diff --git a/arch/arm64/include/asm/jump_label.h b/arch/arm64/include/asm/jump_label.h
new file mode 100644
index 0000000..076a1c7
--- /dev/null
+++ b/arch/arm64/include/asm/jump_label.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2013 Huawei Ltd.
+ * Author: Jiang Liu <liuj97@gmail.com>
+ *
+ * Based on arch/arm/include/asm/jump_label.h
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef __ASM_JUMP_LABEL_H
+#define __ASM_JUMP_LABEL_H
+#include <linux/types.h>
+#include <asm/insn.h>
+
+#ifdef __KERNEL__
+
+#define JUMP_LABEL_NOP_SIZE AARCH64_INSN_SIZE
+
+static __always_inline bool arch_static_branch(struct static_key *key)
+{
+ asm goto("1: nop\n\t"
+ ".pushsection __jump_table, \"aw\"\n\t"
+ ".align 3\n\t"
+ ".quad 1b, %l[l_yes], %c0\n\t"
+ ".popsection\n\t"
+ : : "i"(key) : : l_yes);
+
+ return false;
+l_yes:
+ return true;
+}
+
+#endif /* __KERNEL__ */
+
+typedef u64 jump_label_t;
+
+struct jump_entry {
+ jump_label_t code;
+ jump_label_t target;
+ jump_label_t key;
+};
+
+#endif /* __ASM_JUMP_LABEL_H */
diff --git a/arch/arm64/include/asm/kgdb.h b/arch/arm64/include/asm/kgdb.h
new file mode 100644
index 0000000..3c8aafc
--- /dev/null
+++ b/arch/arm64/include/asm/kgdb.h
@@ -0,0 +1,84 @@
+/*
+ * AArch64 KGDB support
+ *
+ * Based on arch/arm/include/kgdb.h
+ *
+ * Copyright (C) 2013 Cavium Inc.
+ * Author: Vijaya Kumar K <vijaya.kumar@caviumnetworks.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ARM_KGDB_H
+#define __ARM_KGDB_H
+
+#include <linux/ptrace.h>
+#include <asm/debug-monitors.h>
+
+#ifndef __ASSEMBLY__
+
+static inline void arch_kgdb_breakpoint(void)
+{
+ asm ("brk %0" : : "I" (KDBG_COMPILED_DBG_BRK_IMM));
+}
+
+extern void kgdb_handle_bus_error(void);
+extern int kgdb_fault_expected;
+
+#endif /* !__ASSEMBLY__ */
+
+/*
+ * gdb is expecting the following registers layout.
+ *
+ * General purpose regs:
+ * r0-r30: 64 bit
+ * sp,pc : 64 bit
+ * pstate : 64 bit
+ * Total: 34
+ * FPU regs:
+ * f0-f31: 128 bit
+ * Total: 32
+ * Extra regs
+ * fpsr & fpcr: 32 bit
+ * Total: 2
+ *
+ */
+
+#define _GP_REGS 34
+#define _FP_REGS 32
+#define _EXTRA_REGS 2
+/*
+ * general purpose registers size in bytes.
+ * pstate is only 4 bytes. subtract 4 bytes
+ */
+#define GP_REG_BYTES (_GP_REGS * 8)
+#define DBG_MAX_REG_NUM (_GP_REGS + _FP_REGS + _EXTRA_REGS)
+
+/*
+ * Size of I/O buffer for gdb packet.
+ * considering to hold all register contents, size is set
+ */
+
+#define BUFMAX 2048
+
+/*
+ * Number of bytes required for gdb_regs buffer.
+ * _GP_REGS: 8 bytes, _FP_REGS: 16 bytes and _EXTRA_REGS: 4 bytes each
+ * GDB fails to connect for size beyond this with error
+ * "'g' packet reply is too long"
+ */
+
+#define NUMREGBYTES ((_GP_REGS * 8) + (_FP_REGS * 16) + \
+ (_EXTRA_REGS * 4))
+
+#endif /* __ASM_KGDB_H */
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index 381f556..83138d2 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -33,18 +33,23 @@
#define UL(x) _AC(x, UL)
/*
- * PAGE_OFFSET - the virtual address of the start of the kernel image.
+ * PAGE_OFFSET - the virtual address of the start of the kernel image (top
+ * (VA_BITS - 1))
* VA_BITS - the maximum number of bits for virtual addresses.
* TASK_SIZE - the maximum size of a user space task.
* TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area.
* The module space lives between the addresses given by TASK_SIZE
* and PAGE_OFFSET - it must be within 128MB of the kernel text.
*/
-#define PAGE_OFFSET UL(0xffffffc000000000)
+#ifdef CONFIG_ARM64_64K_PAGES
+#define VA_BITS (42)
+#else
+#define VA_BITS (39)
+#endif
+#define PAGE_OFFSET (UL(0xffffffffffffffff) << (VA_BITS - 1))
#define MODULES_END (PAGE_OFFSET)
#define MODULES_VADDR (MODULES_END - SZ_64M)
-#define EARLYCON_IOBASE (MODULES_VADDR - SZ_4M)
-#define VA_BITS (39)
+#define FIXADDR_TOP (MODULES_VADDR - SZ_2M - PAGE_SIZE)
#define TASK_SIZE_64 (UL(1) << VA_BITS)
#ifdef CONFIG_COMPAT
diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h
index 2494fc0..aff0292 100644
--- a/arch/arm64/include/asm/mmu.h
+++ b/arch/arm64/include/asm/mmu.h
@@ -22,10 +22,14 @@
void *vdso;
} mm_context_t;
+#define INIT_MM_CONTEXT(name) \
+ .context.id_lock = __RAW_SPIN_LOCK_UNLOCKED(name.context.id_lock),
+
#define ASID(mm) ((mm)->context.id & 0xffff)
extern void paging_init(void);
extern void setup_mm_for_reboot(void);
extern void __iomem *early_io_map(phys_addr_t phys, unsigned long virt);
+extern void init_mem_pgprot(void);
#endif
diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h
index e2bc385..a9eee33 100644
--- a/arch/arm64/include/asm/mmu_context.h
+++ b/arch/arm64/include/asm/mmu_context.h
@@ -151,12 +151,6 @@
{
unsigned int cpu = smp_processor_id();
-#ifdef CONFIG_SMP
- /* check for possible thread migration */
- if (!cpumask_empty(mm_cpumask(next)) &&
- !cpumask_test_cpu(cpu, mm_cpumask(next)))
- __flush_icache_all();
-#endif
if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next)
check_and_switch_context(next, tsk);
}
diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h
new file mode 100644
index 0000000..453a179
--- /dev/null
+++ b/arch/arm64/include/asm/percpu.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2013 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef __ASM_PERCPU_H
+#define __ASM_PERCPU_H
+
+#ifdef CONFIG_SMP
+
+static inline void set_my_cpu_offset(unsigned long off)
+{
+ asm volatile("msr tpidr_el1, %0" :: "r" (off) : "memory");
+}
+
+static inline unsigned long __my_cpu_offset(void)
+{
+ unsigned long off;
+ register unsigned long *sp asm ("sp");
+
+ /*
+ * We want to allow caching the value, so avoid using volatile and
+ * instead use a fake stack read to hazard against barrier().
+ */
+ asm("mrs %0, tpidr_el1" : "=r" (off) : "Q" (*sp));
+
+ return off;
+}
+#define __my_cpu_offset __my_cpu_offset()
+
+#else /* !CONFIG_SMP */
+
+#define set_my_cpu_offset(x) do { } while (0)
+
+#endif /* CONFIG_SMP */
+
+#include <asm-generic/percpu.h>
+
+#endif /* __ASM_PERCPU_H */
diff --git a/arch/arm64/include/asm/pgtable-2level-hwdef.h b/arch/arm64/include/asm/pgtable-2level-hwdef.h
index 0a8ed3f..2593b49 100644
--- a/arch/arm64/include/asm/pgtable-2level-hwdef.h
+++ b/arch/arm64/include/asm/pgtable-2level-hwdef.h
@@ -21,10 +21,10 @@
* 8192 entries of 8 bytes each, occupying a 64KB page. Levels 0 and 1 are not
* used. The 2nd level table (PGD for Linux) can cover a range of 4TB, each
* entry representing 512MB. The user and kernel address spaces are limited to
- * 512GB and therefore we only use 1024 entries in the PGD.
+ * 4TB in the 64KB page configuration.
*/
#define PTRS_PER_PTE 8192
-#define PTRS_PER_PGD 1024
+#define PTRS_PER_PGD 8192
/*
* PGDIR_SHIFT determines the size a top-level page table entry can map.
diff --git a/arch/arm64/include/asm/pgtable-2level-types.h b/arch/arm64/include/asm/pgtable-2level-types.h
index 3c3ca7d..5f101e6 100644
--- a/arch/arm64/include/asm/pgtable-2level-types.h
+++ b/arch/arm64/include/asm/pgtable-2level-types.h
@@ -16,6 +16,8 @@
#ifndef __ASM_PGTABLE_2LEVEL_TYPES_H
#define __ASM_PGTABLE_2LEVEL_TYPES_H
+#include <asm/types.h>
+
typedef u64 pteval_t;
typedef u64 pgdval_t;
typedef pgdval_t pmdval_t;
diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h
index 7eeed1a..d259917 100644
--- a/arch/arm64/include/asm/pgtable-hwdef.h
+++ b/arch/arm64/include/asm/pgtable-hwdef.h
@@ -25,16 +25,27 @@
/*
* Hardware page table definitions.
*
+ * Level 1 descriptor (PUD).
+ */
+
+#define PUD_TABLE_BIT (_AT(pgdval_t, 1) << 1)
+
+/*
* Level 2 descriptor (PMD).
*/
#define PMD_TYPE_MASK (_AT(pmdval_t, 3) << 0)
#define PMD_TYPE_FAULT (_AT(pmdval_t, 0) << 0)
#define PMD_TYPE_TABLE (_AT(pmdval_t, 3) << 0)
#define PMD_TYPE_SECT (_AT(pmdval_t, 1) << 0)
+#define PMD_TABLE_BIT (_AT(pmdval_t, 1) << 1)
/*
* Section
*/
+#define PMD_SECT_VALID (_AT(pmdval_t, 1) << 0)
+#define PMD_SECT_PROT_NONE (_AT(pmdval_t, 1) << 58)
+#define PMD_SECT_USER (_AT(pmdval_t, 1) << 6) /* AP[1] */
+#define PMD_SECT_RDONLY (_AT(pmdval_t, 1) << 7) /* AP[2] */
#define PMD_SECT_S (_AT(pmdval_t, 3) << 8)
#define PMD_SECT_AF (_AT(pmdval_t, 1) << 10)
#define PMD_SECT_NG (_AT(pmdval_t, 1) << 11)
@@ -53,6 +64,7 @@
#define PTE_TYPE_MASK (_AT(pteval_t, 3) << 0)
#define PTE_TYPE_FAULT (_AT(pteval_t, 0) << 0)
#define PTE_TYPE_PAGE (_AT(pteval_t, 3) << 0)
+#define PTE_TABLE_BIT (_AT(pteval_t, 1) << 1)
#define PTE_USER (_AT(pteval_t, 1) << 6) /* AP[1] */
#define PTE_RDONLY (_AT(pteval_t, 1) << 7) /* AP[2] */
#define PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index e333a24..3ed4424 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -25,15 +25,16 @@
* Software defined PTE bits definition.
*/
#define PTE_VALID (_AT(pteval_t, 1) << 0)
-#define PTE_PROT_NONE (_AT(pteval_t, 1) << 1) /* only when !PTE_VALID */
#define PTE_FILE (_AT(pteval_t, 1) << 2) /* only when !pte_present() */
#define PTE_DIRTY (_AT(pteval_t, 1) << 55)
#define PTE_SPECIAL (_AT(pteval_t, 1) << 56)
+#define PTE_WRITE (_AT(pteval_t, 1) << 57)
+#define PTE_PROT_NONE (_AT(pteval_t, 1) << 58) /* only when !PTE_VALID */
/*
* VMALLOC and SPARSEMEM_VMEMMAP ranges.
*/
-#define VMALLOC_START UL(0xffffff8000000000)
+#define VMALLOC_START (UL(0xffffffffffffffff) << VA_BITS)
#define VMALLOC_END (PAGE_OFFSET - UL(0x400000000) - SZ_64K)
#define vmemmap ((struct page *)(VMALLOC_END + SZ_64K))
@@ -51,60 +52,60 @@
#endif
#define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd_val(pgd))
-/*
- * The pgprot_* and protection_map entries will be fixed up at runtime to
- * include the cachable and bufferable bits based on memory policy, as well as
- * any architecture dependent bits like global/ASID and SMP shared mapping
- * bits.
- */
-#define _PAGE_DEFAULT PTE_TYPE_PAGE | PTE_AF
+#ifdef CONFIG_SMP
+#define PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED)
+#define PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S)
+#else
+#define PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF)
+#define PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF)
+#endif
-extern pgprot_t pgprot_default;
+#define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_DEVICE_nGnRE))
+#define PROT_NORMAL_NC (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_NORMAL_NC))
+#define PROT_NORMAL (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_NORMAL))
-#define __pgprot_modify(prot,mask,bits) \
- __pgprot((pgprot_val(prot) & ~(mask)) | (bits))
+#define PROT_SECT_DEVICE_nGnRE (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_DEVICE_nGnRE))
+#define PROT_SECT_NORMAL (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL))
+#define PROT_SECT_NORMAL_EXEC (PROT_SECT_DEFAULT | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL))
-#define _MOD_PROT(p, b) __pgprot_modify(p, 0, b)
+#define _PAGE_DEFAULT (PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL))
-#define PAGE_NONE __pgprot_modify(pgprot_default, PTE_TYPE_MASK, PTE_PROT_NONE)
-#define PAGE_SHARED _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN)
-#define PAGE_SHARED_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN)
-#define PAGE_COPY _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY)
-#define PAGE_COPY_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_RDONLY)
-#define PAGE_READONLY _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY)
-#define PAGE_READONLY_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_RDONLY)
-#define PAGE_KERNEL _MOD_PROT(pgprot_default, PTE_PXN | PTE_UXN | PTE_DIRTY)
-#define PAGE_KERNEL_EXEC _MOD_PROT(pgprot_default, PTE_UXN | PTE_DIRTY)
+#define PAGE_KERNEL __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE)
+#define PAGE_KERNEL_EXEC __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE)
-#define __PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_TYPE_MASK) | PTE_PROT_NONE)
-#define __PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN)
-#define __PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN)
-#define __PAGE_COPY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY)
-#define __PAGE_COPY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_RDONLY)
-#define __PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY)
-#define __PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_RDONLY)
+#define PAGE_HYP __pgprot(_PAGE_DEFAULT | PTE_HYP)
+#define PAGE_HYP_DEVICE __pgprot(PROT_DEVICE_nGnRE | PTE_HYP)
-#endif /* __ASSEMBLY__ */
+#define PAGE_S2 __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY)
+#define PAGE_S2_DEVICE __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_DEVICE_nGnRE) | PTE_S2_RDWR | PTE_UXN)
-#define __P000 __PAGE_NONE
-#define __P001 __PAGE_READONLY
-#define __P010 __PAGE_COPY
-#define __P011 __PAGE_COPY
-#define __P100 __PAGE_READONLY_EXEC
-#define __P101 __PAGE_READONLY_EXEC
-#define __P110 __PAGE_COPY_EXEC
-#define __P111 __PAGE_COPY_EXEC
+#define PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_TYPE_MASK) | PTE_PROT_NONE | PTE_PXN | PTE_UXN)
+#define PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE)
+#define PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_WRITE)
+#define PAGE_COPY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN)
+#define PAGE_COPY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN)
+#define PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN)
+#define PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN)
+#define PAGE_EXECONLY __pgprot(_PAGE_DEFAULT | PTE_NG | PTE_PXN)
-#define __S000 __PAGE_NONE
-#define __S001 __PAGE_READONLY
-#define __S010 __PAGE_SHARED
-#define __S011 __PAGE_SHARED
-#define __S100 __PAGE_READONLY_EXEC
-#define __S101 __PAGE_READONLY_EXEC
-#define __S110 __PAGE_SHARED_EXEC
-#define __S111 __PAGE_SHARED_EXEC
+#define __P000 PAGE_NONE
+#define __P001 PAGE_READONLY
+#define __P010 PAGE_COPY
+#define __P011 PAGE_COPY
+#define __P100 PAGE_EXECONLY
+#define __P101 PAGE_READONLY_EXEC
+#define __P110 PAGE_COPY_EXEC
+#define __P111 PAGE_COPY_EXEC
-#ifndef __ASSEMBLY__
+#define __S000 PAGE_NONE
+#define __S001 PAGE_READONLY
+#define __S010 PAGE_SHARED
+#define __S011 PAGE_SHARED
+#define __S100 PAGE_EXECONLY
+#define __S101 PAGE_READONLY_EXEC
+#define __S110 PAGE_SHARED_EXEC
+#define __S111 PAGE_SHARED_EXEC
+
/*
* ZERO_PAGE is a global shared page that is always zero: used
* for zero-mapped memory areas etc..
@@ -119,7 +120,7 @@
#define pte_none(pte) (!pte_val(pte))
#define pte_clear(mm,addr,ptep) set_pte(ptep, __pte(0))
#define pte_page(pte) (pfn_to_page(pte_pfn(pte)))
-#define pte_offset_kernel(dir,addr) (pmd_page_vaddr(*(dir)) + __pte_index(addr))
+#define pte_offset_kernel(dir,addr) (pmd_page_vaddr(*(dir)) + pte_index(addr))
#define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr))
#define pte_offset_map_nested(dir,addr) pte_offset_kernel((dir), (addr))
@@ -129,26 +130,57 @@
/*
* The following only work if pte_present(). Undefined behaviour otherwise.
*/
-#define pte_present(pte) (pte_val(pte) & (PTE_VALID | PTE_PROT_NONE))
-#define pte_dirty(pte) (pte_val(pte) & PTE_DIRTY)
-#define pte_young(pte) (pte_val(pte) & PTE_AF)
-#define pte_special(pte) (pte_val(pte) & PTE_SPECIAL)
-#define pte_write(pte) (!(pte_val(pte) & PTE_RDONLY))
+#define pte_present(pte) (!!(pte_val(pte) & (PTE_VALID | PTE_PROT_NONE)))
+#define pte_dirty(pte) (!!(pte_val(pte) & PTE_DIRTY))
+#define pte_young(pte) (!!(pte_val(pte) & PTE_AF))
+#define pte_special(pte) (!!(pte_val(pte) & PTE_SPECIAL))
+#define pte_write(pte) (!!(pte_val(pte) & PTE_WRITE))
#define pte_exec(pte) (!(pte_val(pte) & PTE_UXN))
-#define pte_valid_user(pte) \
- ((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER))
+#define pte_valid_ng(pte) \
+ ((pte_val(pte) & (PTE_VALID | PTE_NG)) == (PTE_VALID | PTE_NG))
-#define PTE_BIT_FUNC(fn,op) \
-static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; }
+static inline pte_t pte_wrprotect(pte_t pte)
+{
+ pte_val(pte) &= ~PTE_WRITE;
+ return pte;
+}
-PTE_BIT_FUNC(wrprotect, |= PTE_RDONLY);
-PTE_BIT_FUNC(mkwrite, &= ~PTE_RDONLY);
-PTE_BIT_FUNC(mkclean, &= ~PTE_DIRTY);
-PTE_BIT_FUNC(mkdirty, |= PTE_DIRTY);
-PTE_BIT_FUNC(mkold, &= ~PTE_AF);
-PTE_BIT_FUNC(mkyoung, |= PTE_AF);
-PTE_BIT_FUNC(mkspecial, |= PTE_SPECIAL);
+static inline pte_t pte_mkwrite(pte_t pte)
+{
+ pte_val(pte) |= PTE_WRITE;
+ return pte;
+}
+
+static inline pte_t pte_mkclean(pte_t pte)
+{
+ pte_val(pte) &= ~PTE_DIRTY;
+ return pte;
+}
+
+static inline pte_t pte_mkdirty(pte_t pte)
+{
+ pte_val(pte) |= PTE_DIRTY;
+ return pte;
+}
+
+static inline pte_t pte_mkold(pte_t pte)
+{
+ pte_val(pte) &= ~PTE_AF;
+ return pte;
+}
+
+static inline pte_t pte_mkyoung(pte_t pte)
+{
+ pte_val(pte) |= PTE_AF;
+ return pte;
+}
+
+static inline pte_t pte_mkspecial(pte_t pte)
+{
+ pte_val(pte) |= PTE_SPECIAL;
+ return pte;
+}
static inline void set_pte(pte_t *ptep, pte_t pte)
{
@@ -160,11 +192,13 @@
static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte)
{
- if (pte_valid_user(pte)) {
- if (pte_exec(pte))
+ if (pte_valid_ng(pte)) {
+ if (!pte_special(pte) && pte_exec(pte))
__sync_icache_dcache(pte, addr);
- if (!pte_dirty(pte))
- pte = pte_wrprotect(pte);
+ if (pte_dirty(pte) && pte_write(pte))
+ pte_val(pte) &= ~PTE_RDONLY;
+ else
+ pte_val(pte) |= PTE_RDONLY;
}
set_pte(ptep, pte);
@@ -173,20 +207,78 @@
/*
* Huge pte definitions.
*/
-#define pte_huge(pte) ((pte_val(pte) & PTE_TYPE_MASK) == PTE_TYPE_HUGEPAGE)
-#define pte_mkhuge(pte) (__pte((pte_val(pte) & ~PTE_TYPE_MASK) | PTE_TYPE_HUGEPAGE))
+#define pte_huge(pte) (!(pte_val(pte) & PTE_TABLE_BIT))
+#define pte_mkhuge(pte) (__pte(pte_val(pte) & ~PTE_TABLE_BIT))
+
+/*
+ * Hugetlb definitions.
+ */
+#define HUGE_MAX_HSTATE 2
+#define HPAGE_SHIFT PMD_SHIFT
+#define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT)
+#define HPAGE_MASK (~(HPAGE_SIZE - 1))
+#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
#define __HAVE_ARCH_PTE_SPECIAL
+static inline pte_t pmd_pte(pmd_t pmd)
+{
+ return __pte(pmd_val(pmd));
+}
+
+static inline pmd_t pte_pmd(pte_t pte)
+{
+ return __pmd(pte_val(pte));
+}
+
+/*
+ * THP definitions.
+ */
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#define pmd_trans_huge(pmd) (pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT))
+#define pmd_trans_splitting(pmd) pte_special(pmd_pte(pmd))
+#endif
+
+#define pmd_young(pmd) pte_young(pmd_pte(pmd))
+#define pmd_wrprotect(pmd) pte_pmd(pte_wrprotect(pmd_pte(pmd)))
+#define pmd_mksplitting(pmd) pte_pmd(pte_mkspecial(pmd_pte(pmd)))
+#define pmd_mkold(pmd) pte_pmd(pte_mkold(pmd_pte(pmd)))
+#define pmd_mkwrite(pmd) pte_pmd(pte_mkwrite(pmd_pte(pmd)))
+#define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd)))
+#define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd)))
+#define pmd_mknotpresent(pmd) (__pmd(pmd_val(pmd) &= ~PMD_TYPE_MASK))
+
+#define __HAVE_ARCH_PMD_WRITE
+#define pmd_write(pmd) pte_write(pmd_pte(pmd))
+
+#define pmd_mkhuge(pmd) (__pmd(pmd_val(pmd) & ~PMD_TABLE_BIT))
+
+#define pmd_pfn(pmd) (((pmd_val(pmd) & PMD_MASK) & PHYS_MASK) >> PAGE_SHIFT)
+#define pfn_pmd(pfn,prot) (__pmd(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)))
+#define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot)
+
+#define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK))
+
+#define set_pmd_at(mm, addr, pmdp, pmd) set_pte_at(mm, addr, (pte_t *)pmdp, pmd_pte(pmd))
+
+static inline int has_transparent_hugepage(void)
+{
+ return 1;
+}
+
+#define __pgprot_modify(prot,mask,bits) \
+ __pgprot((pgprot_val(prot) & ~(mask)) | (bits))
+
/*
* Mark the prot value as uncacheable and unbufferable.
*/
#define pgprot_noncached(prot) \
- __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRnE))
+ __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRnE) | PTE_PXN | PTE_UXN)
#define pgprot_writecombine(prot) \
- __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_GRE))
+ __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN)
#define pgprot_dmacoherent(prot) \
- __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC))
+ __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN)
#define __HAVE_PHYS_MEM_ACCESS_PROT
struct file;
extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
@@ -200,7 +292,7 @@
static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
{
*pmdp = pmd;
- dsb();
+ dsb(ishst);
}
static inline void pmd_clear(pmd_t *pmdp)
@@ -230,7 +322,7 @@
static inline void set_pud(pud_t *pudp, pud_t pud)
{
*pudp = pud;
- dsb();
+ dsb(ishst);
}
static inline void pud_clear(pud_t *pudp)
@@ -263,16 +355,21 @@
#endif
/* Find an entry in the third-level page table.. */
-#define __pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
+#define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY |
- PTE_PROT_NONE | PTE_VALID;
+ PTE_PROT_NONE | PTE_VALID | PTE_WRITE;
pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
return pte;
}
+static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
+{
+ return pte_pmd(pte_modify(pmd_pte(pmd), newprot));
+}
+
extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
@@ -284,15 +381,17 @@
* bits 0-1: present (must be zero)
* bit 2: PTE_FILE
* bits 3-8: swap type
- * bits 9-63: swap offset
+ * bits 9-57: swap offset
*/
#define __SWP_TYPE_SHIFT 3
#define __SWP_TYPE_BITS 6
+#define __SWP_OFFSET_BITS 49
#define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1)
#define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
+#define __SWP_OFFSET_MASK ((1UL << __SWP_OFFSET_BITS) - 1)
#define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK)
-#define __swp_offset(x) ((x).val >> __SWP_OFFSET_SHIFT)
+#define __swp_offset(x) (((x).val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK)
#define __swp_entry(type,offset) ((swp_entry_t) { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) })
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
@@ -300,7 +399,7 @@
/*
* Ensure that there are not more swap files than can be encoded in the kernel
- * the PTEs.
+ * PTEs.
*/
#define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS)
@@ -308,13 +407,13 @@
* Encode and decode a file entry:
* bits 0-1: present (must be zero)
* bit 2: PTE_FILE
- * bits 3-63: file offset / PAGE_SIZE
+ * bits 3-57: file offset / PAGE_SIZE
*/
#define pte_file(pte) (pte_val(pte) & PTE_FILE)
#define pte_to_pgoff(x) (pte_val(x) >> 3)
#define pgoff_to_pte(x) __pte(((x) << 3) | PTE_FILE)
-#define PTE_FILE_MAX_BITS 61
+#define PTE_FILE_MAX_BITS 55
extern int kern_addr_valid(unsigned long addr);
diff --git a/arch/arm64/include/asm/proc-fns.h b/arch/arm64/include/asm/proc-fns.h
index 7cdf466..0c657bb 100644
--- a/arch/arm64/include/asm/proc-fns.h
+++ b/arch/arm64/include/asm/proc-fns.h
@@ -26,11 +26,14 @@
#include <asm/page.h>
struct mm_struct;
+struct cpu_suspend_ctx;
extern void cpu_cache_off(void);
extern void cpu_do_idle(void);
extern void cpu_do_switch_mm(unsigned long pgd_phys, struct mm_struct *mm);
extern void cpu_reset(unsigned long addr) __attribute__((noreturn));
+extern void cpu_do_suspend(struct cpu_suspend_ctx *ptr);
+extern u64 cpu_do_resume(phys_addr_t ptr, u64 idmap_ttbr);
#include <asm/memory.h>
diff --git a/arch/arm64/include/asm/psci.h b/arch/arm64/include/asm/psci.h
index 0604237..d15ab8b4 100644
--- a/arch/arm64/include/asm/psci.h
+++ b/arch/arm64/include/asm/psci.h
@@ -14,25 +14,6 @@
#ifndef __ASM_PSCI_H
#define __ASM_PSCI_H
-#define PSCI_POWER_STATE_TYPE_STANDBY 0
-#define PSCI_POWER_STATE_TYPE_POWER_DOWN 1
-
-struct psci_power_state {
- u16 id;
- u8 type;
- u8 affinity_level;
-};
-
-struct psci_operations {
- int (*cpu_suspend)(struct psci_power_state state,
- unsigned long entry_point);
- int (*cpu_off)(struct psci_power_state state);
- int (*cpu_on)(unsigned long cpuid, unsigned long entry_point);
- int (*migrate)(unsigned long cpuid);
-};
-
-extern struct psci_operations psci_ops;
-
-int psci_init(void);
+void psci_init(void);
#endif /* __ASM_PSCI_H */
diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h
index 5876877..f124c0f 100644
--- a/arch/arm64/include/asm/ptrace.h
+++ b/arch/arm64/include/asm/ptrace.h
@@ -76,6 +76,7 @@
/* Architecturally defined mapping between AArch32 and AArch64 registers */
#define compat_usr(x) regs[(x)]
+#define compat_fp regs[11]
#define compat_sp regs[13]
#define compat_lr regs[14]
#define compat_sp_hyp regs[15]
@@ -177,7 +178,7 @@
return 0;
}
-#define instruction_pointer(regs) (regs)->pc
+#define instruction_pointer(regs) ((unsigned long)(regs)->pc)
#ifdef CONFIG_SMP
extern unsigned long profile_pc(struct pt_regs *regs);
diff --git a/arch/arm64/include/asm/sigcontext.h b/arch/arm64/include/asm/sigcontext.h
deleted file mode 100644
index dca1094..0000000
--- a/arch/arm64/include/asm/sigcontext.h
+++ /dev/null
@@ -1,31 +0,0 @@
-/*
- * Copyright (C) 2012 ARM Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-#ifndef __ASM_SIGCONTEXT_H
-#define __ASM_SIGCONTEXT_H
-
-#include <uapi/asm/sigcontext.h>
-
-/*
- * Auxiliary context saved in the sigcontext.__reserved array. Not exported to
- * user space as it will change with the addition of new context. User space
- * should check the magic/size information.
- */
-struct aux_context {
- struct fpsimd_context fpsimd;
- /* additional context to be added before "end" */
- struct _aarch64_ctx end;
-};
-#endif
diff --git a/arch/arm64/include/asm/smp.h b/arch/arm64/include/asm/smp.h
index 4b8023c..a498f2c 100644
--- a/arch/arm64/include/asm/smp.h
+++ b/arch/arm64/include/asm/smp.h
@@ -60,21 +60,14 @@
void *stack;
};
extern struct secondary_data secondary_data;
-extern void secondary_holding_pen(void);
-extern volatile unsigned long secondary_holding_pen_release;
+extern void secondary_entry(void);
extern void arch_send_call_function_single_ipi(int cpu);
extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
-struct device_node;
+extern int __cpu_disable(void);
-struct smp_enable_ops {
- const char *name;
- int (*init_cpu)(struct device_node *, int);
- int (*prepare_cpu)(int);
-};
-
-extern const struct smp_enable_ops smp_spin_table_ops;
-extern const struct smp_enable_ops smp_psci_ops;
+extern void __cpu_die(unsigned int cpu);
+extern void cpu_die(void);
#endif /* ifndef __ASM_SMP_H */
diff --git a/arch/arm64/include/asm/smp_plat.h b/arch/arm64/include/asm/smp_plat.h
index ed43a0d..59e2823 100644
--- a/arch/arm64/include/asm/smp_plat.h
+++ b/arch/arm64/include/asm/smp_plat.h
@@ -21,6 +21,19 @@
#include <asm/types.h>
+struct mpidr_hash {
+ u64 mask;
+ u32 shift_aff[4];
+ u32 bits;
+};
+
+extern struct mpidr_hash mpidr_hash;
+
+static inline u32 mpidr_hash_size(void)
+{
+ return 1 << mpidr_hash.bits;
+}
+
/*
* Logical CPU mapping.
*/
diff --git a/arch/arm64/include/asm/spinlock.h b/arch/arm64/include/asm/spinlock.h
index 7065e92..c45b7b1 100644
--- a/arch/arm64/include/asm/spinlock.h
+++ b/arch/arm64/include/asm/spinlock.h
@@ -22,17 +22,10 @@
/*
* Spinlock implementation.
*
- * The old value is read exclusively and the new one, if unlocked, is written
- * exclusively. In case of failure, the loop is restarted.
- *
* The memory barriers are implicit with the load-acquire and store-release
* instructions.
- *
- * Unlocked value: 0
- * Locked value: 1
*/
-#define arch_spin_is_locked(x) ((x)->lock != 0)
#define arch_spin_unlock_wait(lock) \
do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0)
@@ -41,31 +34,51 @@
static inline void arch_spin_lock(arch_spinlock_t *lock)
{
unsigned int tmp;
+ arch_spinlock_t lockval, newval;
asm volatile(
- " sevl\n"
- "1: wfe\n"
- "2: ldaxr %w0, %1\n"
- " cbnz %w0, 1b\n"
- " stxr %w0, %w2, %1\n"
- " cbnz %w0, 2b\n"
- : "=&r" (tmp), "+Q" (lock->lock)
- : "r" (1)
- : "cc", "memory");
+ /* Atomically increment the next ticket. */
+" prfm pstl1strm, %3\n"
+"1: ldaxr %w0, %3\n"
+" add %w1, %w0, %w5\n"
+" stxr %w2, %w1, %3\n"
+" cbnz %w2, 1b\n"
+ /* Did we get the lock? */
+" eor %w1, %w0, %w0, ror #16\n"
+" cbz %w1, 3f\n"
+ /*
+ * No: spin on the owner. Send a local event to avoid missing an
+ * unlock before the exclusive load.
+ */
+" sevl\n"
+"2: wfe\n"
+" ldaxrh %w2, %4\n"
+" eor %w1, %w2, %w0, lsr #16\n"
+" cbnz %w1, 2b\n"
+ /* We got the lock. Critical section starts here. */
+"3:"
+ : "=&r" (lockval), "=&r" (newval), "=&r" (tmp), "+Q" (*lock)
+ : "Q" (lock->owner), "I" (1 << TICKET_SHIFT)
+ : "memory");
}
static inline int arch_spin_trylock(arch_spinlock_t *lock)
{
unsigned int tmp;
+ arch_spinlock_t lockval;
asm volatile(
- " ldaxr %w0, %1\n"
- " cbnz %w0, 1f\n"
- " stxr %w0, %w2, %1\n"
- "1:\n"
- : "=&r" (tmp), "+Q" (lock->lock)
- : "r" (1)
- : "cc", "memory");
+" prfm pstl1strm, %2\n"
+"1: ldaxr %w0, %2\n"
+" eor %w1, %w0, %w0, ror #16\n"
+" cbnz %w1, 2f\n"
+" add %w0, %w0, %3\n"
+" stxr %w1, %w0, %2\n"
+" cbnz %w1, 1b\n"
+"2:"
+ : "=&r" (lockval), "=&r" (tmp), "+Q" (*lock)
+ : "I" (1 << TICKET_SHIFT)
+ : "memory");
return !tmp;
}
@@ -73,10 +86,29 @@
static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
asm volatile(
- " stlr %w1, %0\n"
- : "=Q" (lock->lock) : "r" (0) : "memory");
+" stlrh %w1, %0\n"
+ : "=Q" (lock->owner)
+ : "r" (lock->owner + 1)
+ : "memory");
}
+static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
+{
+ return lock.owner == lock.next;
+}
+
+static inline int arch_spin_is_locked(arch_spinlock_t *lock)
+{
+ return !arch_spin_value_unlocked(ACCESS_ONCE(*lock));
+}
+
+static inline int arch_spin_is_contended(arch_spinlock_t *lock)
+{
+ arch_spinlock_t lockval = ACCESS_ONCE(*lock);
+ return (lockval.next - lockval.owner) > 1;
+}
+#define arch_spin_is_contended arch_spin_is_contended
+
/*
* Write lock implementation.
*
@@ -100,7 +132,7 @@
" cbnz %w0, 2b\n"
: "=&r" (tmp), "+Q" (rw->lock)
: "r" (0x80000000)
- : "cc", "memory");
+ : "memory");
}
static inline int arch_write_trylock(arch_rwlock_t *rw)
@@ -114,7 +146,7 @@
"1:\n"
: "=&r" (tmp), "+Q" (rw->lock)
: "r" (0x80000000)
- : "cc", "memory");
+ : "memory");
return !tmp;
}
@@ -155,7 +187,7 @@
" cbnz %w1, 2b\n"
: "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock)
:
- : "cc", "memory");
+ : "memory");
}
static inline void arch_read_unlock(arch_rwlock_t *rw)
@@ -169,7 +201,7 @@
" cbnz %w1, 1b\n"
: "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock)
:
- : "cc", "memory");
+ : "memory");
}
static inline int arch_read_trylock(arch_rwlock_t *rw)
@@ -184,7 +216,7 @@
"1:\n"
: "=&r" (tmp), "+r" (tmp2), "+Q" (rw->lock)
:
- : "cc", "memory");
+ : "memory");
return !tmp2;
}
diff --git a/arch/arm64/include/asm/spinlock_types.h b/arch/arm64/include/asm/spinlock_types.h
index 9a49434..8769275 100644
--- a/arch/arm64/include/asm/spinlock_types.h
+++ b/arch/arm64/include/asm/spinlock_types.h
@@ -20,14 +20,14 @@
# error "please don't include this file directly"
#endif
-/* We only require natural alignment for exclusive accesses. */
-#define __lock_aligned
+#define TICKET_SHIFT 16
typedef struct {
- volatile unsigned int lock;
-} arch_spinlock_t;
+ u16 owner;
+ u16 next;
+} __aligned(4) arch_spinlock_t;
-#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
+#define __ARCH_SPIN_LOCK_UNLOCKED { 0 , 0 }
typedef struct {
volatile unsigned int lock;
diff --git a/arch/arm64/include/asm/suspend.h b/arch/arm64/include/asm/suspend.h
new file mode 100644
index 0000000..e9c149c
--- /dev/null
+++ b/arch/arm64/include/asm/suspend.h
@@ -0,0 +1,27 @@
+#ifndef __ASM_SUSPEND_H
+#define __ASM_SUSPEND_H
+
+#define NR_CTX_REGS 11
+
+/*
+ * struct cpu_suspend_ctx must be 16-byte aligned since it is allocated on
+ * the stack, which must be 16-byte aligned on v8
+ */
+struct cpu_suspend_ctx {
+ /*
+ * This struct must be kept in sync with
+ * cpu_do_{suspend/resume} in mm/proc.S
+ */
+ u64 ctx_regs[NR_CTX_REGS];
+ u64 sp;
+} __aligned(16);
+
+struct sleep_save_sp {
+ phys_addr_t *save_ptr_stash;
+ phys_addr_t save_ptr_stash_phys;
+};
+
+extern void cpu_resume(void);
+extern int cpu_suspend(unsigned long);
+
+#endif
diff --git a/arch/arm64/include/asm/syscall.h b/arch/arm64/include/asm/syscall.h
index d7de933..35cfb7e 100644
--- a/arch/arm64/include/asm/syscall.h
+++ b/arch/arm64/include/asm/syscall.h
@@ -61,6 +61,9 @@
unsigned int i, unsigned int n,
unsigned long *args)
{
+ if (n == 0)
+ return;
+
if (i + n > SYSCALL_MAX_ARGS) {
unsigned long *args_bad = args + SYSCALL_MAX_ARGS - i;
unsigned int n_bad = n + i - SYSCALL_MAX_ARGS;
@@ -84,6 +87,9 @@
unsigned int i, unsigned int n,
const unsigned long *args)
{
+ if (n == 0)
+ return;
+
if (i + n > SYSCALL_MAX_ARGS) {
pr_warning("%s called with max args %d, handling only %d\n",
__func__, i + n, SYSCALL_MAX_ARGS);
diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h
index c77b13b..c09cbf6 100644
--- a/arch/arm64/include/asm/thread_info.h
+++ b/arch/arm64/include/asm/thread_info.h
@@ -24,10 +24,10 @@
#include <linux/compiler.h>
#ifndef CONFIG_ARM64_64K_PAGES
-#define THREAD_SIZE_ORDER 1
+#define THREAD_SIZE_ORDER 2
#endif
-#define THREAD_SIZE 8192
+#define THREAD_SIZE 16384
#define THREAD_START_SP (THREAD_SIZE - 16)
#ifndef __ASSEMBLY__
diff --git a/arch/arm64/include/asm/timex.h b/arch/arm64/include/asm/timex.h
index b24a31a..81a076e 100644
--- a/arch/arm64/include/asm/timex.h
+++ b/arch/arm64/include/asm/timex.h
@@ -16,14 +16,14 @@
#ifndef __ASM_TIMEX_H
#define __ASM_TIMEX_H
+#include <asm/arch_timer.h>
+
/*
* Use the current timer as a cycle counter since this is what we use for
* the delay loop.
*/
-#define get_cycles() ({ cycles_t c; read_current_timer(&c); c; })
+#define get_cycles() arch_counter_get_cntvct()
#include <asm-generic/timex.h>
-#define ARCH_HAS_READ_CURRENT_TIMER
-
#endif
diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h
index 654f096..46b3beb 100644
--- a/arch/arm64/include/asm/tlb.h
+++ b/arch/arm64/include/asm/tlb.h
@@ -187,4 +187,10 @@
#define tlb_migrate_finish(mm) do { } while (0)
+static inline void
+tlb_remove_pmd_tlb_entry(struct mmu_gather *tlb, pmd_t *pmdp, unsigned long addr)
+{
+ tlb_add_flush(tlb, addr);
+}
+
#endif
diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h
index 122d632..3083a08 100644
--- a/arch/arm64/include/asm/tlbflush.h
+++ b/arch/arm64/include/asm/tlbflush.h
@@ -72,9 +72,9 @@
*/
static inline void flush_tlb_all(void)
{
- dsb();
+ dsb(ishst);
asm("tlbi vmalle1is");
- dsb();
+ dsb(ish);
isb();
}
@@ -82,9 +82,9 @@
{
unsigned long asid = (unsigned long)ASID(mm) << 48;
- dsb();
+ dsb(ishst);
asm("tlbi aside1is, %0" : : "r" (asid));
- dsb();
+ dsb(ish);
}
static inline void flush_tlb_page(struct vm_area_struct *vma,
@@ -93,9 +93,9 @@
unsigned long addr = uaddr >> 12 |
((unsigned long)ASID(vma->vm_mm) << 48);
- dsb();
+ dsb(ishst);
asm("tlbi vae1is, %0" : : "r" (addr));
- dsb();
+ dsb(ish);
}
/*
@@ -114,9 +114,11 @@
* set_pte() does not have a DSB, so make sure that the page table
* write is visible.
*/
- dsb();
+ dsb(ishst);
}
+#define update_mmu_cache_pmd(vma, address, pmd) do { } while (0)
+
#endif
#endif
diff --git a/arch/arm64/include/asm/topology.h b/arch/arm64/include/asm/topology.h
new file mode 100644
index 0000000..0172e6d
--- /dev/null
+++ b/arch/arm64/include/asm/topology.h
@@ -0,0 +1,39 @@
+#ifndef __ASM_TOPOLOGY_H
+#define __ASM_TOPOLOGY_H
+
+#ifdef CONFIG_SMP
+
+#include <linux/cpumask.h>
+
+struct cpu_topology {
+ int thread_id;
+ int core_id;
+ int cluster_id;
+ cpumask_t thread_sibling;
+ cpumask_t core_sibling;
+};
+
+extern struct cpu_topology cpu_topology[NR_CPUS];
+
+#define topology_physical_package_id(cpu) (cpu_topology[cpu].cluster_id)
+#define topology_core_id(cpu) (cpu_topology[cpu].core_id)
+#define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_sibling)
+#define topology_thread_cpumask(cpu) (&cpu_topology[cpu].thread_sibling)
+
+#define mc_capable() (cpu_topology[0].cluster_id != -1)
+#define smt_capable() (cpu_topology[0].thread_id != -1)
+
+void init_cpu_topology(void);
+void store_cpu_topology(unsigned int cpuid);
+const struct cpumask *cpu_coregroup_mask(int cpu);
+
+#else
+
+static inline void init_cpu_topology(void) { }
+static inline void store_cpu_topology(unsigned int cpuid) { }
+
+#endif
+
+#include <asm-generic/topology.h>
+
+#endif /* _ASM_ARM_TOPOLOGY_H */
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index 008f848..8e8df01 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -83,7 +83,7 @@
* Returns 1 if the range is valid, 0 otherwise.
*
* This is equivalent to the following test:
- * (u65)addr + (u65)size < (u65)current->addr_limit
+ * (u65)addr + (u65)size <= current->addr_limit
*
* This needs 65-bit arithmetic.
*/
@@ -91,7 +91,7 @@
({ \
unsigned long flag, roksum; \
__chk_user_ptr(addr); \
- asm("adds %1, %1, %3; ccmp %1, %4, #2, cc; cset %0, cc" \
+ asm("adds %1, %1, %3; ccmp %1, %4, #2, cc; cset %0, ls" \
: "=&r" (flag), "=&r" (roksum) \
: "1" (addr), "Ir" (size), \
"r" (current_thread_info()->addr_limit) \
@@ -100,6 +100,7 @@
})
#define access_ok(type, addr, size) __range_ok(addr, size)
+#define user_addr_max get_fs
/*
* The "__xxx" versions of the user access functions do not verify the address
@@ -238,9 +239,6 @@
extern unsigned long __must_check __copy_in_user(void __user *to, const void __user *from, unsigned long n);
extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
-extern unsigned long __must_check __strncpy_from_user(char *to, const char __user *from, unsigned long count);
-extern unsigned long __must_check __strnlen_user(const char __user *s, long n);
-
static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
{
if (access_ok(VERIFY_READ, from, n))
@@ -274,24 +272,9 @@
return n;
}
-static inline long __must_check strncpy_from_user(char *dst, const char __user *src, long count)
-{
- long res = -EFAULT;
- if (access_ok(VERIFY_READ, src, 1))
- res = __strncpy_from_user(dst, src, count);
- return res;
-}
+extern long strncpy_from_user(char *dest, const char __user *src, long count);
-#define strlen_user(s) strnlen_user(s, ~0UL >> 1)
-
-static inline long __must_check strnlen_user(const char __user *s, long n)
-{
- unsigned long res = 0;
-
- if (__addr_ok(s))
- res = __strnlen_user(s, n);
-
- return res;
-}
+extern __must_check long strlen_user(const char __user *str);
+extern __must_check long strnlen_user(const char __user *str, long n);
#endif /* __ASM_UACCESS_H */
diff --git a/arch/arm64/include/asm/word-at-a-time.h b/arch/arm64/include/asm/word-at-a-time.h
new file mode 100644
index 0000000..aab5bf0
--- /dev/null
+++ b/arch/arm64/include/asm/word-at-a-time.h
@@ -0,0 +1,94 @@
+/*
+ * Copyright (C) 2013 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef __ASM_WORD_AT_A_TIME_H
+#define __ASM_WORD_AT_A_TIME_H
+
+#ifndef __AARCH64EB__
+
+#include <linux/kernel.h>
+
+struct word_at_a_time {
+ const unsigned long one_bits, high_bits;
+};
+
+#define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
+
+static inline unsigned long has_zero(unsigned long a, unsigned long *bits,
+ const struct word_at_a_time *c)
+{
+ unsigned long mask = ((a - c->one_bits) & ~a) & c->high_bits;
+ *bits = mask;
+ return mask;
+}
+
+#define prep_zero_mask(a, bits, c) (bits)
+
+static inline unsigned long create_zero_mask(unsigned long bits)
+{
+ bits = (bits - 1) & ~bits;
+ return bits >> 7;
+}
+
+static inline unsigned long find_zero(unsigned long mask)
+{
+ return fls64(mask) >> 3;
+}
+
+#define zero_bytemask(mask) (mask)
+
+#else /* __AARCH64EB__ */
+#include <asm-generic/word-at-a-time.h>
+#endif
+
+/*
+ * Load an unaligned word from kernel space.
+ *
+ * In the (very unlikely) case of the word being a page-crosser
+ * and the next page not being mapped, take the exception and
+ * return zeroes in the non-existing part.
+ */
+static inline unsigned long load_unaligned_zeropad(const void *addr)
+{
+ unsigned long ret, offset;
+
+ /* Load word from unaligned pointer addr */
+ asm(
+ "1: ldr %0, %3\n"
+ "2:\n"
+ " .pushsection .fixup,\"ax\"\n"
+ " .align 2\n"
+ "3: and %1, %2, #0x7\n"
+ " bic %2, %2, #0x7\n"
+ " ldr %0, [%2]\n"
+ " lsl %1, %1, #0x3\n"
+#ifndef __AARCH64EB__
+ " lsr %0, %0, %1\n"
+#else
+ " lsl %0, %0, %1\n"
+#endif
+ " b 2b\n"
+ " .popsection\n"
+ " .pushsection __ex_table,\"a\"\n"
+ " .align 3\n"
+ " .quad 1b, 3b\n"
+ " .popsection"
+ : "=&r" (ret), "=&r" (offset)
+ : "r" (addr), "Q" (*(unsigned long *)addr));
+
+ return ret;
+}
+
+#endif /* __ASM_WORD_AT_A_TIME_H */
diff --git a/arch/arm64/include/uapi/asm/Kbuild b/arch/arm64/include/uapi/asm/Kbuild
index e4b78bd..942376d 100644
--- a/arch/arm64/include/uapi/asm/Kbuild
+++ b/arch/arm64/include/uapi/asm/Kbuild
@@ -9,6 +9,7 @@
header-y += fcntl.h
header-y += hwcap.h
header-y += kvm_para.h
+header-y += perf_regs.h
header-y += param.h
header-y += ptrace.h
header-y += setup.h
diff --git a/arch/arm64/include/uapi/asm/perf_regs.h b/arch/arm64/include/uapi/asm/perf_regs.h
new file mode 100644
index 0000000..172b831
--- /dev/null
+++ b/arch/arm64/include/uapi/asm/perf_regs.h
@@ -0,0 +1,40 @@
+#ifndef _ASM_ARM64_PERF_REGS_H
+#define _ASM_ARM64_PERF_REGS_H
+
+enum perf_event_arm_regs {
+ PERF_REG_ARM64_X0,
+ PERF_REG_ARM64_X1,
+ PERF_REG_ARM64_X2,
+ PERF_REG_ARM64_X3,
+ PERF_REG_ARM64_X4,
+ PERF_REG_ARM64_X5,
+ PERF_REG_ARM64_X6,
+ PERF_REG_ARM64_X7,
+ PERF_REG_ARM64_X8,
+ PERF_REG_ARM64_X9,
+ PERF_REG_ARM64_X10,
+ PERF_REG_ARM64_X11,
+ PERF_REG_ARM64_X12,
+ PERF_REG_ARM64_X13,
+ PERF_REG_ARM64_X14,
+ PERF_REG_ARM64_X15,
+ PERF_REG_ARM64_X16,
+ PERF_REG_ARM64_X17,
+ PERF_REG_ARM64_X18,
+ PERF_REG_ARM64_X19,
+ PERF_REG_ARM64_X20,
+ PERF_REG_ARM64_X21,
+ PERF_REG_ARM64_X22,
+ PERF_REG_ARM64_X23,
+ PERF_REG_ARM64_X24,
+ PERF_REG_ARM64_X25,
+ PERF_REG_ARM64_X26,
+ PERF_REG_ARM64_X27,
+ PERF_REG_ARM64_X28,
+ PERF_REG_ARM64_X29,
+ PERF_REG_ARM64_LR,
+ PERF_REG_ARM64_SP,
+ PERF_REG_ARM64_PC,
+ PERF_REG_ARM64_MAX,
+};
+#endif /* _ASM_ARM64_PERF_REGS_H */
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
index 3e67066..abb33d7 100644
--- a/arch/arm64/kernel/Makefile
+++ b/arch/arm64/kernel/Makefile
@@ -9,16 +9,20 @@
arm64-obj-y := cputable.o debug-monitors.o entry.o irq.o fpsimd.o \
entry-fpsimd.o process.o ptrace.o setup.o signal.o \
sys.o stacktrace.o time.o traps.o io.o vdso.o \
- hyp-stub.o psci.o opcodes.o
+ hyp-stub.o psci.o cpu_ops.o insn.o opcodes.o
arm64-obj-$(CONFIG_COMPAT) += sys32.o kuser32.o signal32.o \
sys_compat.o
arm64-obj-$(CONFIG_MODULES) += arm64ksyms.o module.o
-arm64-obj-$(CONFIG_SMP) += smp.o smp_spin_table.o smp_psci.o
+arm64-obj-$(CONFIG_SMP) += smp.o smp_spin_table.o
+arm64-obj-$(CONFIG_SMP) += topology.o
+arm64-obj-$(CONFIG_PERF_EVENTS) += perf_regs.o
arm64-obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o
-arm64-obj-$(CONFIG_HAVE_HW_BREAKPOINT)+= hw_breakpoint.o
+arm64-obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
arm64-obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
-arm64-obj-$(CONFIG_ARM_CPU_SUSPEND) += sleep.o suspend.o
+arm64-obj-$(CONFIG_ARM64_CPU_SUSPEND) += sleep.o suspend.o
+arm64-obj-$(CONFIG_KGDB) += kgdb.o
+arm64-obj-$(CONFIG_JUMP_LABEL) += jump_label.o
obj-$(CONFIG_SWP_EMULATE) += swp_emulate.o
diff --git a/arch/arm64/kernel/arm64ksyms.c b/arch/arm64/kernel/arm64ksyms.c
index 41b4f62..338b568 100644
--- a/arch/arm64/kernel/arm64ksyms.c
+++ b/arch/arm64/kernel/arm64ksyms.c
@@ -29,16 +29,14 @@
#include <asm/checksum.h>
- /* user mem (segment) */
-EXPORT_SYMBOL(__strnlen_user);
-EXPORT_SYMBOL(__strncpy_from_user);
-
EXPORT_SYMBOL(copy_page);
EXPORT_SYMBOL(clear_page);
+ /* user mem (segment) */
EXPORT_SYMBOL(__copy_from_user);
EXPORT_SYMBOL(__copy_to_user);
EXPORT_SYMBOL(__clear_user);
+EXPORT_SYMBOL(__copy_in_user);
/* physical memory */
EXPORT_SYMBOL(memstart_addr);
diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
index a2a4d81..c481a11 100644
--- a/arch/arm64/kernel/asm-offsets.c
+++ b/arch/arm64/kernel/asm-offsets.c
@@ -24,6 +24,8 @@
#include <asm/thread_info.h>
#include <asm/memory.h>
#include <asm/cputable.h>
+#include <asm/smp_plat.h>
+#include <asm/suspend.h>
#include <asm/vdso_datapage.h>
#include <linux/kbuild.h>
@@ -104,5 +106,47 @@
BLANK();
DEFINE(TZ_MINWEST, offsetof(struct timezone, tz_minuteswest));
DEFINE(TZ_DSTTIME, offsetof(struct timezone, tz_dsttime));
+ BLANK();
+#ifdef CONFIG_KVM_ARM_HOST
+ DEFINE(VCPU_CONTEXT, offsetof(struct kvm_vcpu, arch.ctxt));
+ DEFINE(CPU_GP_REGS, offsetof(struct kvm_cpu_context, gp_regs));
+ DEFINE(CPU_USER_PT_REGS, offsetof(struct kvm_regs, regs));
+ DEFINE(CPU_FP_REGS, offsetof(struct kvm_regs, fp_regs));
+ DEFINE(CPU_SP_EL1, offsetof(struct kvm_regs, sp_el1));
+ DEFINE(CPU_ELR_EL1, offsetof(struct kvm_regs, elr_el1));
+ DEFINE(CPU_SPSR, offsetof(struct kvm_regs, spsr));
+ DEFINE(CPU_SYSREGS, offsetof(struct kvm_cpu_context, sys_regs));
+ DEFINE(VCPU_ESR_EL2, offsetof(struct kvm_vcpu, arch.fault.esr_el2));
+ DEFINE(VCPU_FAR_EL2, offsetof(struct kvm_vcpu, arch.fault.far_el2));
+ DEFINE(VCPU_HPFAR_EL2, offsetof(struct kvm_vcpu, arch.fault.hpfar_el2));
+ DEFINE(VCPU_HCR_EL2, offsetof(struct kvm_vcpu, arch.hcr_el2));
+ DEFINE(VCPU_IRQ_LINES, offsetof(struct kvm_vcpu, arch.irq_lines));
+ DEFINE(VCPU_HOST_CONTEXT, offsetof(struct kvm_vcpu, arch.host_cpu_context));
+ DEFINE(VCPU_TIMER_CNTV_CTL, offsetof(struct kvm_vcpu, arch.timer_cpu.cntv_ctl));
+ DEFINE(VCPU_TIMER_CNTV_CVAL, offsetof(struct kvm_vcpu, arch.timer_cpu.cntv_cval));
+ DEFINE(KVM_TIMER_CNTVOFF, offsetof(struct kvm, arch.timer.cntvoff));
+ DEFINE(KVM_TIMER_ENABLED, offsetof(struct kvm, arch.timer.enabled));
+ DEFINE(VCPU_KVM, offsetof(struct kvm_vcpu, kvm));
+ DEFINE(VCPU_VGIC_CPU, offsetof(struct kvm_vcpu, arch.vgic_cpu));
+ DEFINE(VGIC_CPU_HCR, offsetof(struct vgic_cpu, vgic_hcr));
+ DEFINE(VGIC_CPU_VMCR, offsetof(struct vgic_cpu, vgic_vmcr));
+ DEFINE(VGIC_CPU_MISR, offsetof(struct vgic_cpu, vgic_misr));
+ DEFINE(VGIC_CPU_EISR, offsetof(struct vgic_cpu, vgic_eisr));
+ DEFINE(VGIC_CPU_ELRSR, offsetof(struct vgic_cpu, vgic_elrsr));
+ DEFINE(VGIC_CPU_APR, offsetof(struct vgic_cpu, vgic_apr));
+ DEFINE(VGIC_CPU_LR, offsetof(struct vgic_cpu, vgic_lr));
+ DEFINE(VGIC_CPU_NR_LR, offsetof(struct vgic_cpu, nr_lr));
+ DEFINE(KVM_VTTBR, offsetof(struct kvm, arch.vttbr));
+ DEFINE(KVM_VGIC_VCTRL, offsetof(struct kvm, arch.vgic.vctrl_base));
+#endif
+#ifdef CONFIG_ARM64_CPU_SUSPEND
+ DEFINE(CPU_SUSPEND_SZ, sizeof(struct cpu_suspend_ctx));
+ DEFINE(CPU_CTX_SP, offsetof(struct cpu_suspend_ctx, sp));
+ DEFINE(MPIDR_HASH_MASK, offsetof(struct mpidr_hash, mask));
+ DEFINE(MPIDR_HASH_SHIFTS, offsetof(struct mpidr_hash, shift_aff));
+ DEFINE(SLEEP_SAVE_SP_SZ, sizeof(struct sleep_save_sp));
+ DEFINE(SLEEP_SAVE_SP_PHYS, offsetof(struct sleep_save_sp, save_ptr_stash_phys));
+ DEFINE(SLEEP_SAVE_SP_VIRT, offsetof(struct sleep_save_sp, save_ptr_stash));
+#endif
return 0;
}
diff --git a/arch/arm64/kernel/cpu_ops.c b/arch/arm64/kernel/cpu_ops.c
new file mode 100644
index 0000000..04efea8
--- /dev/null
+++ b/arch/arm64/kernel/cpu_ops.c
@@ -0,0 +1,99 @@
+/*
+ * CPU kernel entry/exit control
+ *
+ * Copyright (C) 2013 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <asm/cpu_ops.h>
+#include <asm/smp_plat.h>
+#include <linux/errno.h>
+#include <linux/of.h>
+#include <linux/string.h>
+
+extern const struct cpu_operations smp_spin_table_ops;
+extern const struct cpu_operations cpu_psci_ops;
+
+const struct cpu_operations *cpu_ops[NR_CPUS];
+
+static const struct cpu_operations *supported_cpu_ops[] __initconst = {
+#ifdef CONFIG_SMP
+ &smp_spin_table_ops,
+ &cpu_psci_ops,
+#endif
+ NULL,
+};
+
+static const struct cpu_operations * __init cpu_get_ops(const char *name)
+{
+ const struct cpu_operations **ops = supported_cpu_ops;
+
+ while (*ops) {
+ if (!strcmp(name, (*ops)->name))
+ return *ops;
+
+ ops++;
+ }
+
+ return NULL;
+}
+
+/*
+ * Read a cpu's enable method from the device tree and record it in cpu_ops.
+ */
+int __init cpu_read_ops(struct device_node *dn, int cpu)
+{
+ const char *enable_method = of_get_property(dn, "enable-method", NULL);
+ if (!enable_method) {
+ /*
+ * The boot CPU may not have an enable method (e.g. when
+ * spin-table is used for secondaries). Don't warn spuriously.
+ */
+ if (cpu != 0)
+ pr_err("%s: missing enable-method property\n",
+ dn->full_name);
+ return -ENOENT;
+ }
+
+ cpu_ops[cpu] = cpu_get_ops(enable_method);
+ if (!cpu_ops[cpu]) {
+ pr_warn("%s: unsupported enable-method property: %s\n",
+ dn->full_name, enable_method);
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+void __init cpu_read_bootcpu_ops(void)
+{
+ struct device_node *dn = NULL;
+ u64 mpidr = cpu_logical_map(0);
+
+ while ((dn = of_find_node_by_type(dn, "cpu"))) {
+ u64 hwid;
+ const __be32 *prop;
+
+ prop = of_get_property(dn, "reg", NULL);
+ if (!prop)
+ continue;
+
+ hwid = of_read_number(prop, of_n_addr_cells(dn));
+ if (hwid == mpidr) {
+ cpu_read_ops(dn, 0);
+ of_node_put(dn);
+ return;
+ }
+ }
+}
diff --git a/arch/arm64/kernel/cputable.c b/arch/arm64/kernel/cputable.c
index 63cfc4a..fd3993c 100644
--- a/arch/arm64/kernel/cputable.c
+++ b/arch/arm64/kernel/cputable.c
@@ -22,7 +22,7 @@
extern unsigned long __cpu_setup(void);
-struct cpu_info __initdata cpu_table[] = {
+struct cpu_info cpu_table[] = {
{
.cpu_id_val = 0x000f0000,
.cpu_id_mask = 0x000f0000,
diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
index 08018e3..7f66fe1 100644
--- a/arch/arm64/kernel/debug-monitors.c
+++ b/arch/arm64/kernel/debug-monitors.c
@@ -138,7 +138,6 @@
static void clear_os_lock(void *unused)
{
asm volatile("msr oslar_el1, %0" : : "r" (0));
- isb();
}
static int __cpuinit os_lock_notify(struct notifier_block *self,
@@ -157,8 +156,9 @@
static int __cpuinit debug_monitors_init(void)
{
/* Clear the OS lock. */
- smp_call_function(clear_os_lock, NULL, 1);
- clear_os_lock(NULL);
+ on_each_cpu(clear_os_lock, NULL, 1);
+ isb();
+ local_dbg_enable();
/* Register hotplug handler. */
register_cpu_notifier(&os_lock_nb);
@@ -188,6 +188,48 @@
regs->pstate = spsr;
}
+/* EL1 Single Step Handler hooks */
+static LIST_HEAD(step_hook);
+static DEFINE_RWLOCK(step_hook_lock);
+
+void register_step_hook(struct step_hook *hook)
+{
+ write_lock(&step_hook_lock);
+ list_add(&hook->node, &step_hook);
+ write_unlock(&step_hook_lock);
+}
+
+void unregister_step_hook(struct step_hook *hook)
+{
+ write_lock(&step_hook_lock);
+ list_del(&hook->node);
+ write_unlock(&step_hook_lock);
+}
+
+/*
+ * Call registered single step handers
+ * There is no Syndrome info to check for determining the handler.
+ * So we call all the registered handlers, until the right handler is
+ * found which returns zero.
+ */
+static int call_step_hook(struct pt_regs *regs, unsigned int esr)
+{
+ struct step_hook *hook;
+ int retval = DBG_HOOK_ERROR;
+
+ read_lock(&step_hook_lock);
+
+ list_for_each_entry(hook, &step_hook, node) {
+ retval = hook->fn(regs, esr);
+ if (retval == DBG_HOOK_HANDLED)
+ break;
+ }
+
+ read_unlock(&step_hook_lock);
+
+ return retval;
+}
+
static int single_step_handler(unsigned long addr, unsigned int esr,
struct pt_regs *regs)
{
@@ -215,7 +257,9 @@
*/
user_rewind_single_step(current);
} else {
- /* TODO: route to KGDB */
+ if (call_step_hook(regs, esr) == DBG_HOOK_HANDLED)
+ return 0;
+
pr_warning("Unexpected kernel single-step exception at EL1\n");
/*
* Re-enable stepping since we know that we will be
@@ -227,11 +271,50 @@
return 0;
}
+/*
+ * Breakpoint handler is re-entrant as another breakpoint can
+ * hit within breakpoint handler, especically in kprobes.
+ * Use reader/writer locks instead of plain spinlock.
+ */
+static LIST_HEAD(break_hook);
+static DEFINE_RWLOCK(break_hook_lock);
+
+void register_break_hook(struct break_hook *hook)
+{
+ write_lock(&break_hook_lock);
+ list_add(&hook->node, &break_hook);
+ write_unlock(&break_hook_lock);
+}
+
+void unregister_break_hook(struct break_hook *hook)
+{
+ write_lock(&break_hook_lock);
+ list_del(&hook->node);
+ write_unlock(&break_hook_lock);
+}
+
+static int call_break_hook(struct pt_regs *regs, unsigned int esr)
+{
+ struct break_hook *hook;
+ int (*fn)(struct pt_regs *regs, unsigned int esr) = NULL;
+
+ read_lock(&break_hook_lock);
+ list_for_each_entry(hook, &break_hook, node)
+ if ((esr & hook->esr_mask) == hook->esr_val)
+ fn = hook->fn;
+ read_unlock(&break_hook_lock);
+
+ return fn ? fn(regs, esr) : DBG_HOOK_ERROR;
+}
+
static int brk_handler(unsigned long addr, unsigned int esr,
struct pt_regs *regs)
{
siginfo_t info;
+ if (call_break_hook(regs, esr) == DBG_HOOK_HANDLED)
+ return 0;
+
if (!user_mode(regs))
return -EFAULT;
diff --git a/arch/arm64/kernel/early_printk.c b/arch/arm64/kernel/early_printk.c
index fbb6e18..ffbbdde 100644
--- a/arch/arm64/kernel/early_printk.c
+++ b/arch/arm64/kernel/early_printk.c
@@ -26,6 +26,8 @@
#include <linux/amba/serial.h>
#include <linux/serial_reg.h>
+#include <asm/fixmap.h>
+
static void __iomem *early_base;
static void (*printch)(char ch);
@@ -141,8 +143,10 @@
}
/* no options parsing yet */
- if (paddr)
- early_base = early_io_map(paddr, EARLYCON_IOBASE);
+ if (paddr) {
+ set_fixmap_io(FIX_EARLYCON_MEM_BASE, paddr);
+ early_base = (void __iomem *)fix_to_virt(FIX_EARLYCON_MEM_BASE);
+ }
printch = match->printch;
early_console = &early_console_dev;
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index 8927e52..53531c1 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -121,7 +121,7 @@
.macro get_thread_info, rd
mov \rd, sp
- and \rd, \rd, #~((1 << 13) - 1) // top of 8K stack
+ and \rd, \rd, #~(THREAD_SIZE - 1) // top of stack
.endm
/*
@@ -288,6 +288,8 @@
/*
* Debug exception handling
*/
+ cmp x24, #ESR_EL1_EC_BRK64 // if BRK64
+ cinc x24, x24, eq // set bit '0'
tbz x24, #0, el1_inv // EL1 only
mrs x0, far_el1
mov x2, sp // struct pt_regs
@@ -311,14 +313,14 @@
#endif
#ifdef CONFIG_PREEMPT
get_thread_info tsk
- ldr x24, [tsk, #TI_PREEMPT] // get preempt count
- add x0, x24, #1 // increment it
- str x0, [tsk, #TI_PREEMPT]
+ ldr w24, [tsk, #TI_PREEMPT] // get preempt count
+ add w0, w24, #1 // increment it
+ str w0, [tsk, #TI_PREEMPT]
#endif
irq_handler
#ifdef CONFIG_PREEMPT
- str x24, [tsk, #TI_PREEMPT] // restore preempt count
- cbnz x24, 1f // preempt count != 0
+ str w24, [tsk, #TI_PREEMPT] // restore preempt count
+ cbnz w24, 1f // preempt count != 0
ldr x0, [tsk, #TI_FLAGS] // get flags
tbz x0, #TIF_NEED_RESCHED, 1f // needs rescheduling?
bl el1_preempt
@@ -477,6 +479,8 @@
* Undefined instruction
*/
mov x0, sp
+ // enable interrupts before calling the main handler
+ enable_irq
b do_undefinstr
el0_dbg:
/*
@@ -507,15 +511,15 @@
#endif
get_thread_info tsk
#ifdef CONFIG_PREEMPT
- ldr x24, [tsk, #TI_PREEMPT] // get preempt count
- add x23, x24, #1 // increment it
- str x23, [tsk, #TI_PREEMPT]
+ ldr w24, [tsk, #TI_PREEMPT] // get preempt count
+ add w23, w24, #1 // increment it
+ str w23, [tsk, #TI_PREEMPT]
#endif
irq_handler
#ifdef CONFIG_PREEMPT
- ldr x0, [tsk, #TI_PREEMPT]
- str x24, [tsk, #TI_PREEMPT]
- cmp x0, x23
+ ldr w0, [tsk, #TI_PREEMPT]
+ str w24, [tsk, #TI_PREEMPT]
+ cmp w0, w23
b.eq 1f
mov x1, #0
str x1, [x1] // BUG
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
index 093bee27..2aadc12 100644
--- a/arch/arm64/kernel/fpsimd.c
+++ b/arch/arm64/kernel/fpsimd.c
@@ -17,10 +17,12 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/cpu_pm.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/signal.h>
+#include <linux/hardirq.h>
#include <asm/fpsimd.h>
#include <asm/cputype.h>
@@ -312,6 +314,8 @@
else
elf_hwcap |= HWCAP_ASIMD;
+ fpsimd_pm_init();
+
return 0;
}
-late_initcall(fpsimd_init);
+late_initcall(fpsimd_init);
\ No newline at end of file
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 53dcae4..1445f78 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -112,6 +112,14 @@
.quad TEXT_OFFSET // Image load offset from start of RAM
.quad 0 // reserved
.quad 0 // reserved
+ .quad 0 // reserved
+ .quad 0 // reserved
+ .quad 0 // reserved
+ .byte 0x41 // Magic number, "ARM\x64"
+ .byte 0x52
+ .byte 0x4d
+ .byte 0x64
+ .word 0 // reserved
ENTRY(stext)
mov x21, x0 // x21=FDT
@@ -217,7 +225,6 @@
.quad PAGE_OFFSET
#ifdef CONFIG_SMP
- .pushsection .smp.pen.text, "ax"
.align 3
1: .quad .
.quad secondary_holding_pen_release
@@ -242,7 +249,16 @@
wfe
b pen
ENDPROC(secondary_holding_pen)
- .popsection
+
+ /*
+ * Secondary entry point that jumps straight into the kernel. Only to
+ * be used where CPUs are brought online dynamically by the kernel.
+ */
+ENTRY(secondary_entry)
+ bl __calc_phys_offset // x2=phys offset
+ bl el2_setup // Drop to EL1
+ b secondary_startup
+ENDPROC(secondary_entry)
ENTRY(secondary_startup)
/*
@@ -340,26 +356,18 @@
* Preserves: tbl, flags
* Corrupts: phys, start, end, pstate
*/
- .macro create_block_map, tbl, flags, phys, start, end, idmap=0
+ .macro create_block_map, tbl, flags, phys, start, end
lsr \phys, \phys, #BLOCK_SHIFT
- .if \idmap
- and \start, \phys, #PTRS_PER_PTE - 1 // table index
- .else
lsr \start, \start, #BLOCK_SHIFT
and \start, \start, #PTRS_PER_PTE - 1 // table index
- .endif
orr \phys, \flags, \phys, lsl #BLOCK_SHIFT // table entry
- .ifnc \start,\end
lsr \end, \end, #BLOCK_SHIFT
and \end, \end, #PTRS_PER_PTE - 1 // table end index
- .endif
9999: str \phys, [\tbl, \start, lsl #3] // store the entry
- .ifnc \start,\end
add \start, \start, #1 // next entry
add \phys, \phys, #BLOCK_SIZE // next block
cmp \start, \end
b.ls 9999b
- .endif
.endm
/*
@@ -368,7 +376,7 @@
* - identity mapping to enable the MMU (low address, TTBR0)
* - first few MB of the kernel linear mapping to jump to once the MMU has
* been enabled, including the FDT blob (TTBR1)
- * - UART mapping if CONFIG_EARLY_PRINTK is enabled (TTBR1)
+ * - pgd entry for fixed mappings (TTBR1)
*/
__create_page_tables:
pgtbl x25, x26, x24 // idmap_pg_dir and swapper_pg_dir addresses
@@ -391,9 +399,13 @@
* Create the identity mapping.
*/
add x0, x25, #PAGE_SIZE // section table address
- adr x3, __turn_mmu_on // virtual/physical address
+ ldr x3, =KERNEL_START
+ add x3, x3, x28 // __pa(KERNEL_START)
create_pgd_entry x25, x0, x3, x5, x6
- create_block_map x0, x7, x3, x5, x5, idmap=1
+ ldr x6, =KERNEL_END
+ mov x5, x3 // __pa(KERNEL_START)
+ add x6, x6, x28 // __pa(KERNEL_END)
+ create_block_map x0, x7, x3, x5, x6
/*
* Map the kernel image (starting with PHYS_OFFSET).
@@ -401,7 +413,7 @@
add x0, x26, #PAGE_SIZE // section table address
mov x5, #PAGE_OFFSET
create_pgd_entry x26, x0, x5, x3, x6
- ldr x6, =KERNEL_END - 1
+ ldr x6, =KERNEL_END
mov x3, x24 // phys offset
create_block_map x0, x7, x3, x5, x6
@@ -421,15 +433,12 @@
sub x6, x6, #1 // inclusive range
create_block_map x0, x7, x3, x5, x6
1:
-#ifdef CONFIG_EARLY_PRINTK
/*
- * Create the pgd entry for the UART mapping. The full mapping is done
- * later based earlyprintk kernel parameter.
+ * Create the pgd entry for the fixed mappings.
*/
- ldr x5, =EARLYCON_IOBASE // UART virtual address
+ ldr x5, =FIXADDR_TOP // Fixed mapping virtual address
add x0, x26, #2 * PAGE_SIZE // section table address
create_pgd_entry x26, x0, x5, x6, x7
-#endif
ret
ENDPROC(__create_page_tables)
.ltorg
@@ -438,8 +447,6 @@
.type __switch_data, %object
__switch_data:
.quad __mmap_switched
- .quad __data_loc // x4
- .quad _data // x5
.quad __bss_start // x6
.quad _end // x7
.quad processor_id // x4
@@ -454,15 +461,7 @@
__mmap_switched:
adr x3, __switch_data + 8
- ldp x4, x5, [x3], #16
ldp x6, x7, [x3], #16
- cmp x4, x5 // Copy data segment if needed
-1: ccmp x5, x6, #4, ne
- b.eq 2f
- ldr x16, [x4], #8
- str x16, [x5], #8
- b 1b
-2:
1: cmp x6, x7
b.hs 2f
str xzr, [x6], #8 // Clear BSS
diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c
index 68e371e..6de3460 100644
--- a/arch/arm64/kernel/hw_breakpoint.c
+++ b/arch/arm64/kernel/hw_breakpoint.c
@@ -21,6 +21,7 @@
#define pr_fmt(fmt) "hw-breakpoint: " fmt
#include <linux/compat.h>
+#include <linux/cpu_pm.h>
#include <linux/errno.h>
#include <linux/hw_breakpoint.h>
#include <linux/perf_event.h>
@@ -169,15 +170,68 @@
}
}
-/*
- * Install a perf counter breakpoint.
+enum hw_breakpoint_ops {
+ HW_BREAKPOINT_INSTALL,
+ HW_BREAKPOINT_UNINSTALL,
+ HW_BREAKPOINT_RESTORE
+};
+
+/**
+ * hw_breakpoint_slot_setup - Find and setup a perf slot according to
+ * operations
+ *
+ * @slots: pointer to array of slots
+ * @max_slots: max number of slots
+ * @bp: perf_event to setup
+ * @ops: operation to be carried out on the slot
+ *
+ * Return:
+ * slot index on success
+ * -ENOSPC if no slot is available/matches
+ * -EINVAL on wrong operations parameter
*/
-int arch_install_hw_breakpoint(struct perf_event *bp)
+static int hw_breakpoint_slot_setup(struct perf_event **slots, int max_slots,
+ struct perf_event *bp,
+ enum hw_breakpoint_ops ops)
+{
+ int i;
+ struct perf_event **slot;
+
+ for (i = 0; i < max_slots; ++i) {
+ slot = &slots[i];
+ switch (ops) {
+ case HW_BREAKPOINT_INSTALL:
+ if (!*slot) {
+ *slot = bp;
+ return i;
+ }
+ break;
+ case HW_BREAKPOINT_UNINSTALL:
+ if (*slot == bp) {
+ *slot = NULL;
+ return i;
+ }
+ break;
+ case HW_BREAKPOINT_RESTORE:
+ if (*slot == bp)
+ return i;
+ break;
+ default:
+ pr_warn_once("Unhandled hw breakpoint ops %d\n", ops);
+ return -EINVAL;
+ }
+ }
+ return -ENOSPC;
+}
+
+static int hw_breakpoint_control(struct perf_event *bp,
+ enum hw_breakpoint_ops ops)
{
struct arch_hw_breakpoint *info = counter_arch_bp(bp);
- struct perf_event **slot, **slots;
+ struct perf_event **slots;
struct debug_info *debug_info = ¤t->thread.debug;
int i, max_slots, ctrl_reg, val_reg, reg_enable;
+ enum debug_el dbg_el = debug_exception_level(info->ctrl.privilege);
u32 ctrl;
if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) {
@@ -196,67 +250,54 @@
reg_enable = !debug_info->wps_disabled;
}
- for (i = 0; i < max_slots; ++i) {
- slot = &slots[i];
+ i = hw_breakpoint_slot_setup(slots, max_slots, bp, ops);
- if (!*slot) {
- *slot = bp;
- break;
- }
+ if (WARN_ONCE(i < 0, "Can't find any breakpoint slot"))
+ return i;
+
+ switch (ops) {
+ case HW_BREAKPOINT_INSTALL:
+ /*
+ * Ensure debug monitors are enabled at the correct exception
+ * level.
+ */
+ enable_debug_monitors(dbg_el);
+ /* Fall through */
+ case HW_BREAKPOINT_RESTORE:
+ /* Setup the address register. */
+ write_wb_reg(val_reg, i, info->address);
+
+ /* Setup the control register. */
+ ctrl = encode_ctrl_reg(info->ctrl);
+ write_wb_reg(ctrl_reg, i,
+ reg_enable ? ctrl | 0x1 : ctrl & ~0x1);
+ break;
+ case HW_BREAKPOINT_UNINSTALL:
+ /* Reset the control register. */
+ write_wb_reg(ctrl_reg, i, 0);
+
+ /*
+ * Release the debug monitors for the correct exception
+ * level.
+ */
+ disable_debug_monitors(dbg_el);
+ break;
}
- if (WARN_ONCE(i == max_slots, "Can't find any breakpoint slot"))
- return -ENOSPC;
-
- /* Ensure debug monitors are enabled at the correct exception level. */
- enable_debug_monitors(debug_exception_level(info->ctrl.privilege));
-
- /* Setup the address register. */
- write_wb_reg(val_reg, i, info->address);
-
- /* Setup the control register. */
- ctrl = encode_ctrl_reg(info->ctrl);
- write_wb_reg(ctrl_reg, i, reg_enable ? ctrl | 0x1 : ctrl & ~0x1);
-
return 0;
}
+/*
+ * Install a perf counter breakpoint.
+ */
+int arch_install_hw_breakpoint(struct perf_event *bp)
+{
+ return hw_breakpoint_control(bp, HW_BREAKPOINT_INSTALL);
+}
+
void arch_uninstall_hw_breakpoint(struct perf_event *bp)
{
- struct arch_hw_breakpoint *info = counter_arch_bp(bp);
- struct perf_event **slot, **slots;
- int i, max_slots, base;
-
- if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) {
- /* Breakpoint */
- base = AARCH64_DBG_REG_BCR;
- slots = __get_cpu_var(bp_on_reg);
- max_slots = core_num_brps;
- } else {
- /* Watchpoint */
- base = AARCH64_DBG_REG_WCR;
- slots = __get_cpu_var(wp_on_reg);
- max_slots = core_num_wrps;
- }
-
- /* Remove the breakpoint. */
- for (i = 0; i < max_slots; ++i) {
- slot = &slots[i];
-
- if (*slot == bp) {
- *slot = NULL;
- break;
- }
- }
-
- if (WARN_ONCE(i == max_slots, "Can't find any breakpoint slot"))
- return;
-
- /* Reset the control register. */
- write_wb_reg(base, i, 0);
-
- /* Release the debug monitors for the correct exception level. */
- disable_debug_monitors(debug_exception_level(info->ctrl.privilege));
+ hw_breakpoint_control(bp, HW_BREAKPOINT_UNINSTALL);
}
static int get_hbp_len(u8 hbp_len)
@@ -806,18 +847,36 @@
/*
* CPU initialisation.
*/
-static void reset_ctrl_regs(void *unused)
+static void hw_breakpoint_reset(void *unused)
{
int i;
-
- for (i = 0; i < core_num_brps; ++i) {
- write_wb_reg(AARCH64_DBG_REG_BCR, i, 0UL);
- write_wb_reg(AARCH64_DBG_REG_BVR, i, 0UL);
+ struct perf_event **slots;
+ /*
+ * When a CPU goes through cold-boot, it does not have any installed
+ * slot, so it is safe to share the same function for restoring and
+ * resetting breakpoints; when a CPU is hotplugged in, it goes
+ * through the slots, which are all empty, hence it just resets control
+ * and value for debug registers.
+ * When this function is triggered on warm-boot through a CPU PM
+ * notifier some slots might be initialized; if so they are
+ * reprogrammed according to the debug slots content.
+ */
+ for (slots = __get_cpu_var(bp_on_reg), i = 0; i < core_num_brps; ++i) {
+ if (slots[i]) {
+ hw_breakpoint_control(slots[i], HW_BREAKPOINT_RESTORE);
+ } else {
+ write_wb_reg(AARCH64_DBG_REG_BCR, i, 0UL);
+ write_wb_reg(AARCH64_DBG_REG_BVR, i, 0UL);
+ }
}
- for (i = 0; i < core_num_wrps; ++i) {
- write_wb_reg(AARCH64_DBG_REG_WCR, i, 0UL);
- write_wb_reg(AARCH64_DBG_REG_WVR, i, 0UL);
+ for (slots = __get_cpu_var(wp_on_reg), i = 0; i < core_num_wrps; ++i) {
+ if (slots[i]) {
+ hw_breakpoint_control(slots[i], HW_BREAKPOINT_RESTORE);
+ } else {
+ write_wb_reg(AARCH64_DBG_REG_WCR, i, 0UL);
+ write_wb_reg(AARCH64_DBG_REG_WVR, i, 0UL);
+ }
}
}
@@ -827,7 +886,7 @@
{
int cpu = (long)hcpu;
if (action == CPU_ONLINE)
- smp_call_function_single(cpu, reset_ctrl_regs, NULL, 1);
+ smp_call_function_single(cpu, hw_breakpoint_reset, NULL, 1);
return NOTIFY_OK;
}
@@ -835,6 +894,14 @@
.notifier_call = hw_breakpoint_reset_notify,
};
+#ifdef CONFIG_ARM64_CPU_SUSPEND
+extern void cpu_suspend_set_dbg_restorer(void (*hw_bp_restore)(void *));
+#else
+static inline void cpu_suspend_set_dbg_restorer(void (*hw_bp_restore)(void *))
+{
+}
+#endif
+
/*
* One-time initialisation.
*/
@@ -850,8 +917,8 @@
* Reset the breakpoint resources. We assume that a halting
* debugger will leave the world in a nice state for us.
*/
- smp_call_function(reset_ctrl_regs, NULL, 1);
- reset_ctrl_regs(NULL);
+ smp_call_function(hw_breakpoint_reset, NULL, 1);
+ hw_breakpoint_reset(NULL);
/* Register debug fault handlers. */
hook_debug_fault_code(DBG_ESR_EVT_HWBP, breakpoint_handler, SIGTRAP,
@@ -861,6 +928,8 @@
/* Register hotplug notifier. */
register_cpu_notifier(&hw_breakpoint_reset_nb);
+ /* Register cpu_suspend hw breakpoint restore hook */
+ cpu_suspend_set_dbg_restorer(hw_breakpoint_reset);
return 0;
}
diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c
new file mode 100644
index 0000000..92f3683
--- /dev/null
+++ b/arch/arm64/kernel/insn.c
@@ -0,0 +1,304 @@
+/*
+ * Copyright (C) 2013 Huawei Ltd.
+ * Author: Jiang Liu <liuj97@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/bitops.h>
+#include <linux/compiler.h>
+#include <linux/kernel.h>
+#include <linux/smp.h>
+#include <linux/stop_machine.h>
+#include <linux/uaccess.h>
+#include <asm/cacheflush.h>
+#include <asm/insn.h>
+
+static int aarch64_insn_encoding_class[] = {
+ AARCH64_INSN_CLS_UNKNOWN,
+ AARCH64_INSN_CLS_UNKNOWN,
+ AARCH64_INSN_CLS_UNKNOWN,
+ AARCH64_INSN_CLS_UNKNOWN,
+ AARCH64_INSN_CLS_LDST,
+ AARCH64_INSN_CLS_DP_REG,
+ AARCH64_INSN_CLS_LDST,
+ AARCH64_INSN_CLS_DP_FPSIMD,
+ AARCH64_INSN_CLS_DP_IMM,
+ AARCH64_INSN_CLS_DP_IMM,
+ AARCH64_INSN_CLS_BR_SYS,
+ AARCH64_INSN_CLS_BR_SYS,
+ AARCH64_INSN_CLS_LDST,
+ AARCH64_INSN_CLS_DP_REG,
+ AARCH64_INSN_CLS_LDST,
+ AARCH64_INSN_CLS_DP_FPSIMD,
+};
+
+enum aarch64_insn_encoding_class __kprobes aarch64_get_insn_class(u32 insn)
+{
+ return aarch64_insn_encoding_class[(insn >> 25) & 0xf];
+}
+
+/* NOP is an alias of HINT */
+bool __kprobes aarch64_insn_is_nop(u32 insn)
+{
+ if (!aarch64_insn_is_hint(insn))
+ return false;
+
+ switch (insn & 0xFE0) {
+ case AARCH64_INSN_HINT_YIELD:
+ case AARCH64_INSN_HINT_WFE:
+ case AARCH64_INSN_HINT_WFI:
+ case AARCH64_INSN_HINT_SEV:
+ case AARCH64_INSN_HINT_SEVL:
+ return false;
+ default:
+ return true;
+ }
+}
+
+/*
+ * In ARMv8-A, A64 instructions have a fixed length of 32 bits and are always
+ * little-endian.
+ */
+int __kprobes aarch64_insn_read(void *addr, u32 *insnp)
+{
+ int ret;
+ u32 val;
+
+ ret = probe_kernel_read(&val, addr, AARCH64_INSN_SIZE);
+ if (!ret)
+ *insnp = le32_to_cpu(val);
+
+ return ret;
+}
+
+int __kprobes aarch64_insn_write(void *addr, u32 insn)
+{
+ insn = cpu_to_le32(insn);
+ return probe_kernel_write(addr, &insn, AARCH64_INSN_SIZE);
+}
+
+static bool __kprobes __aarch64_insn_hotpatch_safe(u32 insn)
+{
+ if (aarch64_get_insn_class(insn) != AARCH64_INSN_CLS_BR_SYS)
+ return false;
+
+ return aarch64_insn_is_b(insn) ||
+ aarch64_insn_is_bl(insn) ||
+ aarch64_insn_is_svc(insn) ||
+ aarch64_insn_is_hvc(insn) ||
+ aarch64_insn_is_smc(insn) ||
+ aarch64_insn_is_brk(insn) ||
+ aarch64_insn_is_nop(insn);
+}
+
+/*
+ * ARM Architecture Reference Manual for ARMv8 Profile-A, Issue A.a
+ * Section B2.6.5 "Concurrent modification and execution of instructions":
+ * Concurrent modification and execution of instructions can lead to the
+ * resulting instruction performing any behavior that can be achieved by
+ * executing any sequence of instructions that can be executed from the
+ * same Exception level, except where the instruction before modification
+ * and the instruction after modification is a B, BL, NOP, BKPT, SVC, HVC,
+ * or SMC instruction.
+ */
+bool __kprobes aarch64_insn_hotpatch_safe(u32 old_insn, u32 new_insn)
+{
+ return __aarch64_insn_hotpatch_safe(old_insn) &&
+ __aarch64_insn_hotpatch_safe(new_insn);
+}
+
+int __kprobes aarch64_insn_patch_text_nosync(void *addr, u32 insn)
+{
+ u32 *tp = addr;
+ int ret;
+
+ /* A64 instructions must be word aligned */
+ if ((uintptr_t)tp & 0x3)
+ return -EINVAL;
+
+ ret = aarch64_insn_write(tp, insn);
+ if (ret == 0)
+ flush_icache_range((uintptr_t)tp,
+ (uintptr_t)tp + AARCH64_INSN_SIZE);
+
+ return ret;
+}
+
+struct aarch64_insn_patch {
+ void **text_addrs;
+ u32 *new_insns;
+ int insn_cnt;
+ atomic_t cpu_count;
+};
+
+static int __kprobes aarch64_insn_patch_text_cb(void *arg)
+{
+ int i, ret = 0;
+ struct aarch64_insn_patch *pp = arg;
+
+ /* The first CPU becomes master */
+ if (atomic_inc_return(&pp->cpu_count) == 1) {
+ for (i = 0; ret == 0 && i < pp->insn_cnt; i++)
+ ret = aarch64_insn_patch_text_nosync(pp->text_addrs[i],
+ pp->new_insns[i]);
+ /*
+ * aarch64_insn_patch_text_nosync() calls flush_icache_range(),
+ * which ends with "dsb; isb" pair guaranteeing global
+ * visibility.
+ */
+ atomic_set(&pp->cpu_count, -1);
+ } else {
+ while (atomic_read(&pp->cpu_count) != -1)
+ cpu_relax();
+ isb();
+ }
+
+ return ret;
+}
+
+int __kprobes aarch64_insn_patch_text_sync(void *addrs[], u32 insns[], int cnt)
+{
+ struct aarch64_insn_patch patch = {
+ .text_addrs = addrs,
+ .new_insns = insns,
+ .insn_cnt = cnt,
+ .cpu_count = ATOMIC_INIT(0),
+ };
+
+ if (cnt <= 0)
+ return -EINVAL;
+
+ return stop_machine(aarch64_insn_patch_text_cb, &patch,
+ cpu_online_mask);
+}
+
+int __kprobes aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt)
+{
+ int ret;
+ u32 insn;
+
+ /* Unsafe to patch multiple instructions without synchronizaiton */
+ if (cnt == 1) {
+ ret = aarch64_insn_read(addrs[0], &insn);
+ if (ret)
+ return ret;
+
+ if (aarch64_insn_hotpatch_safe(insn, insns[0])) {
+ /*
+ * ARMv8 architecture doesn't guarantee all CPUs see
+ * the new instruction after returning from function
+ * aarch64_insn_patch_text_nosync(). So send IPIs to
+ * all other CPUs to achieve instruction
+ * synchronization.
+ */
+ ret = aarch64_insn_patch_text_nosync(addrs[0], insns[0]);
+ kick_all_cpus_sync();
+ return ret;
+ }
+ }
+
+ return aarch64_insn_patch_text_sync(addrs, insns, cnt);
+}
+
+u32 __kprobes aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type,
+ u32 insn, u64 imm)
+{
+ u32 immlo, immhi, lomask, himask, mask;
+ int shift;
+
+ switch (type) {
+ case AARCH64_INSN_IMM_ADR:
+ lomask = 0x3;
+ himask = 0x7ffff;
+ immlo = imm & lomask;
+ imm >>= 2;
+ immhi = imm & himask;
+ imm = (immlo << 24) | (immhi);
+ mask = (lomask << 24) | (himask);
+ shift = 5;
+ break;
+ case AARCH64_INSN_IMM_26:
+ mask = BIT(26) - 1;
+ shift = 0;
+ break;
+ case AARCH64_INSN_IMM_19:
+ mask = BIT(19) - 1;
+ shift = 5;
+ break;
+ case AARCH64_INSN_IMM_16:
+ mask = BIT(16) - 1;
+ shift = 5;
+ break;
+ case AARCH64_INSN_IMM_14:
+ mask = BIT(14) - 1;
+ shift = 5;
+ break;
+ case AARCH64_INSN_IMM_12:
+ mask = BIT(12) - 1;
+ shift = 10;
+ break;
+ case AARCH64_INSN_IMM_9:
+ mask = BIT(9) - 1;
+ shift = 12;
+ break;
+ default:
+ pr_err("aarch64_insn_encode_immediate: unknown immediate encoding %d\n",
+ type);
+ return 0;
+ }
+
+ /* Update the immediate field. */
+ insn &= ~(mask << shift);
+ insn |= (imm & mask) << shift;
+
+ return insn;
+}
+
+u32 __kprobes aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr,
+ enum aarch64_insn_branch_type type)
+{
+ u32 insn;
+ long offset;
+
+ /*
+ * PC: A 64-bit Program Counter holding the address of the current
+ * instruction. A64 instructions must be word-aligned.
+ */
+ BUG_ON((pc & 0x3) || (addr & 0x3));
+
+ /*
+ * B/BL support [-128M, 128M) offset
+ * ARM64 virtual address arrangement guarantees all kernel and module
+ * texts are within +/-128M.
+ */
+ offset = ((long)addr - (long)pc);
+ BUG_ON(offset < -SZ_128M || offset >= SZ_128M);
+
+ if (type == AARCH64_INSN_BRANCH_LINK)
+ insn = aarch64_insn_get_bl_value();
+ else
+ insn = aarch64_insn_get_b_value();
+
+ return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26, insn,
+ offset >> 2);
+}
+
+u32 __kprobes aarch64_insn_gen_hint(enum aarch64_insn_hint_op op)
+{
+ return aarch64_insn_get_hint_value() | op;
+}
+
+u32 __kprobes aarch64_insn_gen_nop(void)
+{
+ return aarch64_insn_gen_hint(AARCH64_INSN_HINT_NOP);
+}
diff --git a/arch/arm64/kernel/irq.c b/arch/arm64/kernel/irq.c
index ecb3354..473e5db 100644
--- a/arch/arm64/kernel/irq.c
+++ b/arch/arm64/kernel/irq.c
@@ -81,3 +81,64 @@
if (!handle_arch_irq)
panic("No interrupt controller found.");
}
+
+#ifdef CONFIG_HOTPLUG_CPU
+static bool migrate_one_irq(struct irq_desc *desc)
+{
+ struct irq_data *d = irq_desc_get_irq_data(desc);
+ const struct cpumask *affinity = d->affinity;
+ struct irq_chip *c;
+ bool ret = false;
+
+ /*
+ * If this is a per-CPU interrupt, or the affinity does not
+ * include this CPU, then we have nothing to do.
+ */
+ if (irqd_is_per_cpu(d) || !cpumask_test_cpu(smp_processor_id(), affinity))
+ return false;
+
+ if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
+ affinity = cpu_online_mask;
+ ret = true;
+ }
+
+ c = irq_data_get_irq_chip(d);
+ if (!c->irq_set_affinity)
+ pr_debug("IRQ%u: unable to set affinity\n", d->irq);
+ else if (c->irq_set_affinity(d, affinity, true) == IRQ_SET_MASK_OK && ret)
+ cpumask_copy(d->affinity, affinity);
+
+ return ret;
+}
+
+/*
+ * The current CPU has been marked offline. Migrate IRQs off this CPU.
+ * If the affinity settings do not allow other CPUs, force them onto any
+ * available CPU.
+ *
+ * Note: we must iterate over all IRQs, whether they have an attached
+ * action structure or not, as we need to get chained interrupts too.
+ */
+void migrate_irqs(void)
+{
+ unsigned int i;
+ struct irq_desc *desc;
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ for_each_irq_desc(i, desc) {
+ bool affinity_broken;
+
+ raw_spin_lock(&desc->lock);
+ affinity_broken = migrate_one_irq(desc);
+ raw_spin_unlock(&desc->lock);
+
+ if (affinity_broken)
+ pr_warn_ratelimited("IRQ%u no longer affine to CPU%u\n",
+ i, smp_processor_id());
+ }
+
+ local_irq_restore(flags);
+}
+#endif /* CONFIG_HOTPLUG_CPU */
diff --git a/arch/arm64/kernel/jump_label.c b/arch/arm64/kernel/jump_label.c
new file mode 100644
index 0000000..263a166
--- /dev/null
+++ b/arch/arm64/kernel/jump_label.c
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2013 Huawei Ltd.
+ * Author: Jiang Liu <liuj97@gmail.com>
+ *
+ * Based on arch/arm/kernel/jump_label.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+#include <linux/kernel.h>
+#include <linux/jump_label.h>
+#include <asm/insn.h>
+
+#ifdef HAVE_JUMP_LABEL
+
+static void __arch_jump_label_transform(struct jump_entry *entry,
+ enum jump_label_type type,
+ bool is_static)
+{
+ void *addr = (void *)entry->code;
+ u32 insn;
+
+ if (type == JUMP_LABEL_ENABLE) {
+ insn = aarch64_insn_gen_branch_imm(entry->code,
+ entry->target,
+ AARCH64_INSN_BRANCH_NOLINK);
+ } else {
+ insn = aarch64_insn_gen_nop();
+ }
+
+ if (is_static)
+ aarch64_insn_patch_text_nosync(addr, insn);
+ else
+ aarch64_insn_patch_text(&addr, &insn, 1);
+}
+
+void arch_jump_label_transform(struct jump_entry *entry,
+ enum jump_label_type type)
+{
+ __arch_jump_label_transform(entry, type, false);
+}
+
+void arch_jump_label_transform_static(struct jump_entry *entry,
+ enum jump_label_type type)
+{
+ __arch_jump_label_transform(entry, type, true);
+}
+
+#endif /* HAVE_JUMP_LABEL */
diff --git a/arch/arm64/kernel/kgdb.c b/arch/arm64/kernel/kgdb.c
new file mode 100644
index 0000000..75c9cf1
--- /dev/null
+++ b/arch/arm64/kernel/kgdb.c
@@ -0,0 +1,336 @@
+/*
+ * AArch64 KGDB support
+ *
+ * Based on arch/arm/kernel/kgdb.c
+ *
+ * Copyright (C) 2013 Cavium Inc.
+ * Author: Vijaya Kumar K <vijaya.kumar@caviumnetworks.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/irq.h>
+#include <linux/kdebug.h>
+#include <linux/kgdb.h>
+#include <asm/traps.h>
+
+struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] = {
+ { "x0", 8, offsetof(struct pt_regs, regs[0])},
+ { "x1", 8, offsetof(struct pt_regs, regs[1])},
+ { "x2", 8, offsetof(struct pt_regs, regs[2])},
+ { "x3", 8, offsetof(struct pt_regs, regs[3])},
+ { "x4", 8, offsetof(struct pt_regs, regs[4])},
+ { "x5", 8, offsetof(struct pt_regs, regs[5])},
+ { "x6", 8, offsetof(struct pt_regs, regs[6])},
+ { "x7", 8, offsetof(struct pt_regs, regs[7])},
+ { "x8", 8, offsetof(struct pt_regs, regs[8])},
+ { "x9", 8, offsetof(struct pt_regs, regs[9])},
+ { "x10", 8, offsetof(struct pt_regs, regs[10])},
+ { "x11", 8, offsetof(struct pt_regs, regs[11])},
+ { "x12", 8, offsetof(struct pt_regs, regs[12])},
+ { "x13", 8, offsetof(struct pt_regs, regs[13])},
+ { "x14", 8, offsetof(struct pt_regs, regs[14])},
+ { "x15", 8, offsetof(struct pt_regs, regs[15])},
+ { "x16", 8, offsetof(struct pt_regs, regs[16])},
+ { "x17", 8, offsetof(struct pt_regs, regs[17])},
+ { "x18", 8, offsetof(struct pt_regs, regs[18])},
+ { "x19", 8, offsetof(struct pt_regs, regs[19])},
+ { "x20", 8, offsetof(struct pt_regs, regs[20])},
+ { "x21", 8, offsetof(struct pt_regs, regs[21])},
+ { "x22", 8, offsetof(struct pt_regs, regs[22])},
+ { "x23", 8, offsetof(struct pt_regs, regs[23])},
+ { "x24", 8, offsetof(struct pt_regs, regs[24])},
+ { "x25", 8, offsetof(struct pt_regs, regs[25])},
+ { "x26", 8, offsetof(struct pt_regs, regs[26])},
+ { "x27", 8, offsetof(struct pt_regs, regs[27])},
+ { "x28", 8, offsetof(struct pt_regs, regs[28])},
+ { "x29", 8, offsetof(struct pt_regs, regs[29])},
+ { "x30", 8, offsetof(struct pt_regs, regs[30])},
+ { "sp", 8, offsetof(struct pt_regs, sp)},
+ { "pc", 8, offsetof(struct pt_regs, pc)},
+ { "pstate", 8, offsetof(struct pt_regs, pstate)},
+ { "v0", 16, -1 },
+ { "v1", 16, -1 },
+ { "v2", 16, -1 },
+ { "v3", 16, -1 },
+ { "v4", 16, -1 },
+ { "v5", 16, -1 },
+ { "v6", 16, -1 },
+ { "v7", 16, -1 },
+ { "v8", 16, -1 },
+ { "v9", 16, -1 },
+ { "v10", 16, -1 },
+ { "v11", 16, -1 },
+ { "v12", 16, -1 },
+ { "v13", 16, -1 },
+ { "v14", 16, -1 },
+ { "v15", 16, -1 },
+ { "v16", 16, -1 },
+ { "v17", 16, -1 },
+ { "v18", 16, -1 },
+ { "v19", 16, -1 },
+ { "v20", 16, -1 },
+ { "v21", 16, -1 },
+ { "v22", 16, -1 },
+ { "v23", 16, -1 },
+ { "v24", 16, -1 },
+ { "v25", 16, -1 },
+ { "v26", 16, -1 },
+ { "v27", 16, -1 },
+ { "v28", 16, -1 },
+ { "v29", 16, -1 },
+ { "v30", 16, -1 },
+ { "v31", 16, -1 },
+ { "fpsr", 4, -1 },
+ { "fpcr", 4, -1 },
+};
+
+char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
+{
+ if (regno >= DBG_MAX_REG_NUM || regno < 0)
+ return NULL;
+
+ if (dbg_reg_def[regno].offset != -1)
+ memcpy(mem, (void *)regs + dbg_reg_def[regno].offset,
+ dbg_reg_def[regno].size);
+ else
+ memset(mem, 0, dbg_reg_def[regno].size);
+ return dbg_reg_def[regno].name;
+}
+
+int dbg_set_reg(int regno, void *mem, struct pt_regs *regs)
+{
+ if (regno >= DBG_MAX_REG_NUM || regno < 0)
+ return -EINVAL;
+
+ if (dbg_reg_def[regno].offset != -1)
+ memcpy((void *)regs + dbg_reg_def[regno].offset, mem,
+ dbg_reg_def[regno].size);
+ return 0;
+}
+
+void
+sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *task)
+{
+ struct pt_regs *thread_regs;
+
+ /* Initialize to zero */
+ memset((char *)gdb_regs, 0, NUMREGBYTES);
+ thread_regs = task_pt_regs(task);
+ memcpy((void *)gdb_regs, (void *)thread_regs->regs, GP_REG_BYTES);
+}
+
+void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)
+{
+ regs->pc = pc;
+}
+
+static int compiled_break;
+
+static void kgdb_arch_update_addr(struct pt_regs *regs,
+ char *remcom_in_buffer)
+{
+ unsigned long addr;
+ char *ptr;
+
+ ptr = &remcom_in_buffer[1];
+ if (kgdb_hex2long(&ptr, &addr))
+ kgdb_arch_set_pc(regs, addr);
+ else if (compiled_break == 1)
+ kgdb_arch_set_pc(regs, regs->pc + 4);
+
+ compiled_break = 0;
+}
+
+int kgdb_arch_handle_exception(int exception_vector, int signo,
+ int err_code, char *remcom_in_buffer,
+ char *remcom_out_buffer,
+ struct pt_regs *linux_regs)
+{
+ int err;
+
+ switch (remcom_in_buffer[0]) {
+ case 'D':
+ case 'k':
+ /*
+ * Packet D (Detach), k (kill). No special handling
+ * is required here. Handle same as c packet.
+ */
+ case 'c':
+ /*
+ * Packet c (Continue) to continue executing.
+ * Set pc to required address.
+ * Try to read optional parameter and set pc.
+ * If this was a compiled breakpoint, we need to move
+ * to the next instruction else we will just breakpoint
+ * over and over again.
+ */
+ kgdb_arch_update_addr(linux_regs, remcom_in_buffer);
+ atomic_set(&kgdb_cpu_doing_single_step, -1);
+ kgdb_single_step = 0;
+
+ /*
+ * Received continue command, disable single step
+ */
+ if (kernel_active_single_step())
+ kernel_disable_single_step();
+
+ err = 0;
+ break;
+ case 's':
+ /*
+ * Update step address value with address passed
+ * with step packet.
+ * On debug exception return PC is copied to ELR
+ * So just update PC.
+ * If no step address is passed, resume from the address
+ * pointed by PC. Do not update PC
+ */
+ kgdb_arch_update_addr(linux_regs, remcom_in_buffer);
+ atomic_set(&kgdb_cpu_doing_single_step, raw_smp_processor_id());
+ kgdb_single_step = 1;
+
+ /*
+ * Enable single step handling
+ */
+ if (!kernel_active_single_step())
+ kernel_enable_single_step(linux_regs);
+ err = 0;
+ break;
+ default:
+ err = -1;
+ }
+ return err;
+}
+
+static int kgdb_brk_fn(struct pt_regs *regs, unsigned int esr)
+{
+ kgdb_handle_exception(1, SIGTRAP, 0, regs);
+ return 0;
+}
+
+static int kgdb_compiled_brk_fn(struct pt_regs *regs, unsigned int esr)
+{
+ compiled_break = 1;
+ kgdb_handle_exception(1, SIGTRAP, 0, regs);
+
+ return 0;
+}
+
+static int kgdb_step_brk_fn(struct pt_regs *regs, unsigned int esr)
+{
+ kgdb_handle_exception(1, SIGTRAP, 0, regs);
+ return 0;
+}
+
+static struct break_hook kgdb_brkpt_hook = {
+ .esr_mask = 0xffffffff,
+ .esr_val = DBG_ESR_VAL_BRK(KGDB_DYN_DGB_BRK_IMM),
+ .fn = kgdb_brk_fn
+};
+
+static struct break_hook kgdb_compiled_brkpt_hook = {
+ .esr_mask = 0xffffffff,
+ .esr_val = DBG_ESR_VAL_BRK(KDBG_COMPILED_DBG_BRK_IMM),
+ .fn = kgdb_compiled_brk_fn
+};
+
+static struct step_hook kgdb_step_hook = {
+ .fn = kgdb_step_brk_fn
+};
+
+static void kgdb_call_nmi_hook(void *ignored)
+{
+ kgdb_nmicallback(raw_smp_processor_id(), get_irq_regs());
+}
+
+void kgdb_roundup_cpus(unsigned long flags)
+{
+ local_irq_enable();
+ smp_call_function(kgdb_call_nmi_hook, NULL, 0);
+ local_irq_disable();
+}
+
+static int __kgdb_notify(struct die_args *args, unsigned long cmd)
+{
+ struct pt_regs *regs = args->regs;
+
+ if (kgdb_handle_exception(1, args->signr, cmd, regs))
+ return NOTIFY_DONE;
+ return NOTIFY_STOP;
+}
+
+static int
+kgdb_notify(struct notifier_block *self, unsigned long cmd, void *ptr)
+{
+ unsigned long flags;
+ int ret;
+
+ local_irq_save(flags);
+ ret = __kgdb_notify(ptr, cmd);
+ local_irq_restore(flags);
+
+ return ret;
+}
+
+static struct notifier_block kgdb_notifier = {
+ .notifier_call = kgdb_notify,
+ /*
+ * Want to be lowest priority
+ */
+ .priority = -INT_MAX,
+};
+
+/*
+ * kgdb_arch_init - Perform any architecture specific initalization.
+ * This function will handle the initalization of any architecture
+ * specific callbacks.
+ */
+int kgdb_arch_init(void)
+{
+ int ret = register_die_notifier(&kgdb_notifier);
+
+ if (ret != 0)
+ return ret;
+
+ register_break_hook(&kgdb_brkpt_hook);
+ register_break_hook(&kgdb_compiled_brkpt_hook);
+ register_step_hook(&kgdb_step_hook);
+ return 0;
+}
+
+/*
+ * kgdb_arch_exit - Perform any architecture specific uninitalization.
+ * This function will handle the uninitalization of any architecture
+ * specific callbacks, for dynamic registration and unregistration.
+ */
+void kgdb_arch_exit(void)
+{
+ unregister_break_hook(&kgdb_brkpt_hook);
+ unregister_break_hook(&kgdb_compiled_brkpt_hook);
+ unregister_step_hook(&kgdb_step_hook);
+ unregister_die_notifier(&kgdb_notifier);
+}
+
+/*
+ * ARM instructions are always in LE.
+ * Break instruction is encoded in LE format
+ */
+struct kgdb_arch arch_kgdb_ops = {
+ .gdb_bpt_instr = {
+ KGDB_DYN_BRK_INS_BYTE0,
+ KGDB_DYN_BRK_INS_BYTE1,
+ KGDB_DYN_BRK_INS_BYTE2,
+ KGDB_DYN_BRK_INS_BYTE3,
+ }
+};
diff --git a/arch/arm64/kernel/kuser32.S b/arch/arm64/kernel/kuser32.S
index 2f5b3ff..bbd726c 100644
--- a/arch/arm64/kernel/kuser32.S
+++ b/arch/arm64/kernel/kuser32.S
@@ -38,33 +38,30 @@
.inst 0xe92d00f0 // push {r4, r5, r6, r7}
.inst 0xe1c040d0 // ldrd r4, r5, [r0]
.inst 0xe1c160d0 // ldrd r6, r7, [r1]
- .inst 0xf57ff05f // dmb sy
- .inst 0xe1b20f9f // 1: ldrexd r0, r1, [r2]
+ .inst 0xe1b20e9f // 1: ldaexd r0, r1, [r2]
.inst 0xe0303004 // eors r3, r0, r4
.inst 0x00313005 // eoreqs r3, r1, r5
- .inst 0x01a23f96 // strexdeq r3, r6, [r2]
+ .inst 0x01a23e96 // stlexdeq r3, r6, [r2]
.inst 0x03330001 // teqeq r3, #1
.inst 0x0afffff9 // beq 1b
- .inst 0xf57ff05f // dmb sy
.inst 0xe2730000 // rsbs r0, r3, #0
.inst 0xe8bd00f0 // pop {r4, r5, r6, r7}
.inst 0xe12fff1e // bx lr
.align 5
__kuser_memory_barrier: // 0xffff0fa0
- .inst 0xf57ff05f // dmb sy
+ .inst 0xf57ff05b // dmb ish
.inst 0xe12fff1e // bx lr
.align 5
__kuser_cmpxchg: // 0xffff0fc0
- .inst 0xf57ff05f // dmb sy
- .inst 0xe1923f9f // 1: ldrex r3, [r2]
+ .inst 0xe1923e9f // 1: ldaex r3, [r2]
.inst 0xe0533000 // subs r3, r3, r0
- .inst 0x01823f91 // strexeq r3, r1, [r2]
+ .inst 0x01823e91 // stlexeq r3, r1, [r2]
.inst 0x03330001 // teqeq r3, #1
.inst 0x0afffffa // beq 1b
.inst 0xe2730000 // rsbs r0, r3, #0
- .inst 0xeaffffef // b <__kuser_memory_barrier>
+ .inst 0xe12fff1e // bx lr
.align 5
__kuser_get_tls: // 0xffff0fe0
diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c
index ca0e3d5..df08a6e 100644
--- a/arch/arm64/kernel/module.c
+++ b/arch/arm64/kernel/module.c
@@ -25,6 +25,10 @@
#include <linux/mm.h>
#include <linux/moduleloader.h>
#include <linux/vmalloc.h>
+#include <asm/insn.h>
+
+#define AARCH64_INSN_IMM_MOVNZ AARCH64_INSN_IMM_MAX
+#define AARCH64_INSN_IMM_MOVK AARCH64_INSN_IMM_16
void *module_alloc(unsigned long size)
{
@@ -94,25 +98,18 @@
return 0;
}
-enum aarch64_imm_type {
- INSN_IMM_MOVNZ,
- INSN_IMM_MOVK,
- INSN_IMM_ADR,
- INSN_IMM_26,
- INSN_IMM_19,
- INSN_IMM_16,
- INSN_IMM_14,
- INSN_IMM_12,
- INSN_IMM_9,
-};
-
-static u32 encode_insn_immediate(enum aarch64_imm_type type, u32 insn, u64 imm)
+static int reloc_insn_movw(enum aarch64_reloc_op op, void *place, u64 val,
+ int lsb, enum aarch64_insn_imm_type imm_type)
{
- u32 immlo, immhi, lomask, himask, mask;
- int shift;
+ u64 imm, limit = 0;
+ s64 sval;
+ u32 insn = le32_to_cpu(*(u32 *)place);
- switch (type) {
- case INSN_IMM_MOVNZ:
+ sval = do_reloc(op, place, val);
+ sval >>= lsb;
+ imm = sval & 0xffff;
+
+ if (imm_type == AARCH64_INSN_IMM_MOVNZ) {
/*
* For signed MOVW relocations, we have to manipulate the
* instruction encoding depending on whether or not the
@@ -131,70 +128,12 @@
*/
imm = ~imm;
}
- case INSN_IMM_MOVK:
- mask = BIT(16) - 1;
- shift = 5;
- break;
- case INSN_IMM_ADR:
- lomask = 0x3;
- himask = 0x7ffff;
- immlo = imm & lomask;
- imm >>= 2;
- immhi = imm & himask;
- imm = (immlo << 24) | (immhi);
- mask = (lomask << 24) | (himask);
- shift = 5;
- break;
- case INSN_IMM_26:
- mask = BIT(26) - 1;
- shift = 0;
- break;
- case INSN_IMM_19:
- mask = BIT(19) - 1;
- shift = 5;
- break;
- case INSN_IMM_16:
- mask = BIT(16) - 1;
- shift = 5;
- break;
- case INSN_IMM_14:
- mask = BIT(14) - 1;
- shift = 5;
- break;
- case INSN_IMM_12:
- mask = BIT(12) - 1;
- shift = 10;
- break;
- case INSN_IMM_9:
- mask = BIT(9) - 1;
- shift = 12;
- break;
- default:
- pr_err("encode_insn_immediate: unknown immediate encoding %d\n",
- type);
- return 0;
+ imm_type = AARCH64_INSN_IMM_MOVK;
}
- /* Update the immediate field. */
- insn &= ~(mask << shift);
- insn |= (imm & mask) << shift;
-
- return insn;
-}
-
-static int reloc_insn_movw(enum aarch64_reloc_op op, void *place, u64 val,
- int lsb, enum aarch64_imm_type imm_type)
-{
- u64 imm, limit = 0;
- s64 sval;
- u32 insn = *(u32 *)place;
-
- sval = do_reloc(op, place, val);
- sval >>= lsb;
- imm = sval & 0xffff;
-
/* Update the instruction with the new encoding. */
- *(u32 *)place = encode_insn_immediate(imm_type, insn, imm);
+ insn = aarch64_insn_encode_immediate(imm_type, insn, imm);
+ *(u32 *)place = cpu_to_le32(insn);
/* Shift out the immediate field. */
sval >>= 16;
@@ -203,9 +142,9 @@
* For unsigned immediates, the overflow check is straightforward.
* For signed immediates, the sign bit is actually the bit past the
* most significant bit of the field.
- * The INSN_IMM_16 immediate type is unsigned.
+ * The AARCH64_INSN_IMM_16 immediate type is unsigned.
*/
- if (imm_type != INSN_IMM_16) {
+ if (imm_type != AARCH64_INSN_IMM_16) {
sval++;
limit++;
}
@@ -218,11 +157,11 @@
}
static int reloc_insn_imm(enum aarch64_reloc_op op, void *place, u64 val,
- int lsb, int len, enum aarch64_imm_type imm_type)
+ int lsb, int len, enum aarch64_insn_imm_type imm_type)
{
u64 imm, imm_mask;
s64 sval;
- u32 insn = *(u32 *)place;
+ u32 insn = le32_to_cpu(*(u32 *)place);
/* Calculate the relocation value. */
sval = do_reloc(op, place, val);
@@ -233,7 +172,8 @@
imm = sval & imm_mask;
/* Update the instruction's immediate field. */
- *(u32 *)place = encode_insn_immediate(imm_type, insn, imm);
+ insn = aarch64_insn_encode_immediate(imm_type, insn, imm);
+ *(u32 *)place = cpu_to_le32(insn);
/*
* Extract the upper value bits (including the sign bit) and
@@ -315,125 +255,125 @@
overflow_check = false;
case R_AARCH64_MOVW_UABS_G0:
ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0,
- INSN_IMM_16);
+ AARCH64_INSN_IMM_16);
break;
case R_AARCH64_MOVW_UABS_G1_NC:
overflow_check = false;
case R_AARCH64_MOVW_UABS_G1:
ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16,
- INSN_IMM_16);
+ AARCH64_INSN_IMM_16);
break;
case R_AARCH64_MOVW_UABS_G2_NC:
overflow_check = false;
case R_AARCH64_MOVW_UABS_G2:
ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32,
- INSN_IMM_16);
+ AARCH64_INSN_IMM_16);
break;
case R_AARCH64_MOVW_UABS_G3:
/* We're using the top bits so we can't overflow. */
overflow_check = false;
ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 48,
- INSN_IMM_16);
+ AARCH64_INSN_IMM_16);
break;
case R_AARCH64_MOVW_SABS_G0:
ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0,
- INSN_IMM_MOVNZ);
+ AARCH64_INSN_IMM_MOVNZ);
break;
case R_AARCH64_MOVW_SABS_G1:
ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16,
- INSN_IMM_MOVNZ);
+ AARCH64_INSN_IMM_MOVNZ);
break;
case R_AARCH64_MOVW_SABS_G2:
ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32,
- INSN_IMM_MOVNZ);
+ AARCH64_INSN_IMM_MOVNZ);
break;
case R_AARCH64_MOVW_PREL_G0_NC:
overflow_check = false;
ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0,
- INSN_IMM_MOVK);
+ AARCH64_INSN_IMM_MOVK);
break;
case R_AARCH64_MOVW_PREL_G0:
ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0,
- INSN_IMM_MOVNZ);
+ AARCH64_INSN_IMM_MOVNZ);
break;
case R_AARCH64_MOVW_PREL_G1_NC:
overflow_check = false;
ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16,
- INSN_IMM_MOVK);
+ AARCH64_INSN_IMM_MOVK);
break;
case R_AARCH64_MOVW_PREL_G1:
ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16,
- INSN_IMM_MOVNZ);
+ AARCH64_INSN_IMM_MOVNZ);
break;
case R_AARCH64_MOVW_PREL_G2_NC:
overflow_check = false;
ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32,
- INSN_IMM_MOVK);
+ AARCH64_INSN_IMM_MOVK);
break;
case R_AARCH64_MOVW_PREL_G2:
ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32,
- INSN_IMM_MOVNZ);
+ AARCH64_INSN_IMM_MOVNZ);
break;
case R_AARCH64_MOVW_PREL_G3:
/* We're using the top bits so we can't overflow. */
overflow_check = false;
ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 48,
- INSN_IMM_MOVNZ);
+ AARCH64_INSN_IMM_MOVNZ);
break;
/* Immediate instruction relocations. */
case R_AARCH64_LD_PREL_LO19:
ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19,
- INSN_IMM_19);
+ AARCH64_INSN_IMM_19);
break;
case R_AARCH64_ADR_PREL_LO21:
ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 0, 21,
- INSN_IMM_ADR);
+ AARCH64_INSN_IMM_ADR);
break;
case R_AARCH64_ADR_PREL_PG_HI21_NC:
overflow_check = false;
case R_AARCH64_ADR_PREL_PG_HI21:
ovf = reloc_insn_imm(RELOC_OP_PAGE, loc, val, 12, 21,
- INSN_IMM_ADR);
+ AARCH64_INSN_IMM_ADR);
break;
case R_AARCH64_ADD_ABS_LO12_NC:
case R_AARCH64_LDST8_ABS_LO12_NC:
overflow_check = false;
ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 0, 12,
- INSN_IMM_12);
+ AARCH64_INSN_IMM_12);
break;
case R_AARCH64_LDST16_ABS_LO12_NC:
overflow_check = false;
ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 1, 11,
- INSN_IMM_12);
+ AARCH64_INSN_IMM_12);
break;
case R_AARCH64_LDST32_ABS_LO12_NC:
overflow_check = false;
ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 2, 10,
- INSN_IMM_12);
+ AARCH64_INSN_IMM_12);
break;
case R_AARCH64_LDST64_ABS_LO12_NC:
overflow_check = false;
ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 3, 9,
- INSN_IMM_12);
+ AARCH64_INSN_IMM_12);
break;
case R_AARCH64_LDST128_ABS_LO12_NC:
overflow_check = false;
ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 4, 8,
- INSN_IMM_12);
+ AARCH64_INSN_IMM_12);
break;
case R_AARCH64_TSTBR14:
ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 14,
- INSN_IMM_14);
+ AARCH64_INSN_IMM_14);
break;
case R_AARCH64_CONDBR19:
ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19,
- INSN_IMM_19);
+ AARCH64_INSN_IMM_19);
break;
case R_AARCH64_JUMP26:
case R_AARCH64_CALL26:
ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 26,
- INSN_IMM_26);
+ AARCH64_INSN_IMM_26);
break;
default:
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index 9ba33c4..dfcd8fa 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -22,6 +22,7 @@
#include <linux/bitmap.h>
#include <linux/interrupt.h>
+#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/perf_event.h>
@@ -107,7 +108,12 @@
static int
armpmu_map_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config)
{
- int mapping = (*event_map)[config];
+ int mapping;
+
+ if (config >= PERF_COUNT_HW_MAX)
+ return -EINVAL;
+
+ mapping = (*event_map)[config];
return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping;
}
@@ -317,7 +323,13 @@
struct hw_perf_event fake_event = event->hw;
struct pmu *leader_pmu = event->group_leader->pmu;
- if (event->pmu != leader_pmu || event->state <= PERF_EVENT_STATE_OFF)
+ if (is_software_event(event))
+ return 1;
+
+ if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF)
+ return 1;
+
+ if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec)
return 1;
return armpmu->get_event_idx(hw_events, &fake_event) >= 0;
@@ -352,26 +364,53 @@
}
static void
+armpmu_disable_percpu_irq(void *data)
+{
+ unsigned int irq = *(unsigned int *)data;
+ disable_percpu_irq(irq);
+}
+
+static void
armpmu_release_hardware(struct arm_pmu *armpmu)
{
- int i, irq, irqs;
+ int irq;
+ unsigned int i, irqs;
struct platform_device *pmu_device = armpmu->plat_device;
irqs = min(pmu_device->num_resources, num_possible_cpus());
+ if (!irqs)
+ return;
- for (i = 0; i < irqs; ++i) {
- if (!cpumask_test_and_clear_cpu(i, &armpmu->active_irqs))
- continue;
- irq = platform_get_irq(pmu_device, i);
- if (irq >= 0)
- free_irq(irq, armpmu);
+ irq = platform_get_irq(pmu_device, 0);
+ if (irq <= 0)
+ return;
+
+ if (irq_is_percpu(irq)) {
+ on_each_cpu(armpmu_disable_percpu_irq, &irq, 1);
+ free_percpu_irq(irq, &cpu_hw_events);
+ } else {
+ for (i = 0; i < irqs; ++i) {
+ if (!cpumask_test_and_clear_cpu(i, &armpmu->active_irqs))
+ continue;
+ irq = platform_get_irq(pmu_device, i);
+ if (irq > 0)
+ free_irq(irq, armpmu);
+ }
}
}
+static void
+armpmu_enable_percpu_irq(void *data)
+{
+ unsigned int irq = *(unsigned int *)data;
+ enable_percpu_irq(irq, IRQ_TYPE_NONE);
+}
+
static int
armpmu_reserve_hardware(struct arm_pmu *armpmu)
{
- int i, err, irq, irqs;
+ int err, irq;
+ unsigned int i, irqs;
struct platform_device *pmu_device = armpmu->plat_device;
if (!pmu_device) {
@@ -380,39 +419,59 @@
}
irqs = min(pmu_device->num_resources, num_possible_cpus());
- if (irqs < 1) {
+ if (!irqs) {
pr_err("no irqs for PMUs defined\n");
return -ENODEV;
}
- for (i = 0; i < irqs; ++i) {
- err = 0;
- irq = platform_get_irq(pmu_device, i);
- if (irq < 0)
- continue;
+ irq = platform_get_irq(pmu_device, 0);
+ if (irq <= 0) {
+ pr_err("failed to get valid irq for PMU device\n");
+ return -ENODEV;
+ }
- /*
- * If we have a single PMU interrupt that we can't shift,
- * assume that we're running on a uniprocessor machine and
- * continue. Otherwise, continue without this interrupt.
- */
- if (irq_set_affinity(irq, cpumask_of(i)) && irqs > 1) {
- pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n",
- irq, i);
- continue;
- }
+ if (irq_is_percpu(irq)) {
+ err = request_percpu_irq(irq, armpmu->handle_irq,
+ "arm-pmu", &cpu_hw_events);
- err = request_irq(irq, armpmu->handle_irq,
- IRQF_NOBALANCING,
- "arm-pmu", armpmu);
if (err) {
- pr_err("unable to request IRQ%d for ARM PMU counters\n",
- irq);
+ pr_err("unable to request percpu IRQ%d for ARM PMU counters\n",
+ irq);
armpmu_release_hardware(armpmu);
return err;
}
- cpumask_set_cpu(i, &armpmu->active_irqs);
+ on_each_cpu(armpmu_enable_percpu_irq, &irq, 1);
+ } else {
+ for (i = 0; i < irqs; ++i) {
+ err = 0;
+ irq = platform_get_irq(pmu_device, i);
+ if (irq <= 0)
+ continue;
+
+ /*
+ * If we have a single PMU interrupt that we can't shift,
+ * assume that we're running on a uniprocessor machine and
+ * continue. Otherwise, continue without this interrupt.
+ */
+ if (irq_set_affinity(irq, cpumask_of(i)) && irqs > 1) {
+ pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n",
+ irq, i);
+ continue;
+ }
+
+ err = request_irq(irq, armpmu->handle_irq,
+ IRQF_NOBALANCING,
+ "arm-pmu", armpmu);
+ if (err) {
+ pr_err("unable to request IRQ%d for ARM PMU counters\n",
+ irq);
+ armpmu_release_hardware(armpmu);
+ return err;
+ }
+
+ cpumask_set_cpu(i, &armpmu->active_irqs);
+ }
}
return 0;
@@ -773,7 +832,7 @@
/*
* PMXEVTYPER: Event selection reg
*/
-#define ARMV8_EVTYPE_MASK 0xc00000ff /* Mask for writable bits */
+#define ARMV8_EVTYPE_MASK 0xc80000ff /* Mask for writable bits */
#define ARMV8_EVTYPE_EVENT 0xff /* Mask for EVENT bits */
/*
@@ -1288,8 +1347,8 @@
* Callchain handling code.
*/
struct frame_tail {
- struct frame_tail __user *fp;
- unsigned long lr;
+ struct frame_tail __user *fp;
+ unsigned long lr;
} __attribute__((packed));
/*
@@ -1326,22 +1385,84 @@
return buftail.fp;
}
+#ifdef CONFIG_COMPAT
+/*
+ * The registers we're interested in are at the end of the variable
+ * length saved register structure. The fp points at the end of this
+ * structure so the address of this struct is:
+ * (struct compat_frame_tail *)(xxx->fp)-1
+ *
+ * This code has been adapted from the ARM OProfile support.
+ */
+struct compat_frame_tail {
+ compat_uptr_t fp; /* a (struct compat_frame_tail *) in compat mode */
+ u32 sp;
+ u32 lr;
+} __attribute__((packed));
+
+static struct compat_frame_tail __user *
+compat_user_backtrace(struct compat_frame_tail __user *tail,
+ struct perf_callchain_entry *entry)
+{
+ struct compat_frame_tail buftail;
+ unsigned long err;
+
+ /* Also check accessibility of one struct frame_tail beyond */
+ if (!access_ok(VERIFY_READ, tail, sizeof(buftail)))
+ return NULL;
+
+ pagefault_disable();
+ err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail));
+ pagefault_enable();
+
+ if (err)
+ return NULL;
+
+ perf_callchain_store(entry, buftail.lr);
+
+ /*
+ * Frame pointers should strictly progress back up the stack
+ * (towards higher addresses).
+ */
+ if (tail + 1 >= (struct compat_frame_tail __user *)
+ compat_ptr(buftail.fp))
+ return NULL;
+
+ return (struct compat_frame_tail __user *)compat_ptr(buftail.fp) - 1;
+}
+#endif /* CONFIG_COMPAT */
+
void perf_callchain_user(struct perf_callchain_entry *entry,
struct pt_regs *regs)
{
- struct frame_tail __user *tail;
-
if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
/* We don't support guest os callchain now */
return;
}
perf_callchain_store(entry, regs->pc);
- tail = (struct frame_tail __user *)regs->regs[29];
- while (entry->nr < PERF_MAX_STACK_DEPTH &&
- tail && !((unsigned long)tail & 0xf))
- tail = user_backtrace(tail, entry);
+ if (!compat_user_mode(regs)) {
+ /* AARCH64 mode */
+ struct frame_tail __user *tail;
+
+ tail = (struct frame_tail __user *)regs->regs[29];
+
+ while (entry->nr < PERF_MAX_STACK_DEPTH &&
+ tail && !((unsigned long)tail & 0xf))
+ tail = user_backtrace(tail, entry);
+ } else {
+#ifdef CONFIG_COMPAT
+ /* AARCH32 compat mode */
+ struct compat_frame_tail __user *tail;
+
+ tail = (struct compat_frame_tail __user *)regs->compat_fp - 1;
+
+ while ((entry->nr < PERF_MAX_STACK_DEPTH) &&
+ tail && !((unsigned long)tail & 0x3))
+ tail = compat_user_backtrace(tail, entry);
+#endif
+ }
}
/*
@@ -1369,6 +1490,7 @@
frame.fp = regs->regs[29];
frame.sp = regs->sp;
frame.pc = regs->pc;
+
walk_stackframe(&frame, callchain_trace, entry);
}
diff --git a/arch/arm64/kernel/perf_regs.c b/arch/arm64/kernel/perf_regs.c
new file mode 100644
index 0000000..422ebd6
--- /dev/null
+++ b/arch/arm64/kernel/perf_regs.c
@@ -0,0 +1,46 @@
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/perf_event.h>
+#include <linux/bug.h>
+
+#include <asm/compat.h>
+#include <asm/perf_regs.h>
+#include <asm/ptrace.h>
+
+u64 perf_reg_value(struct pt_regs *regs, int idx)
+{
+ if (WARN_ON_ONCE((u32)idx >= PERF_REG_ARM64_MAX))
+ return 0;
+
+ /*
+ * Compat (i.e. 32 bit) mode:
+ * - PC has been set in the pt_regs struct in kernel_entry,
+ * - Handle SP and LR here.
+ */
+ if (compat_user_mode(regs)) {
+ if ((u32)idx == PERF_REG_ARM64_SP)
+ return regs->compat_sp;
+ if ((u32)idx == PERF_REG_ARM64_LR)
+ return regs->compat_lr;
+ }
+
+ return regs->regs[idx];
+}
+
+#define REG_RESERVED (~((1ULL << PERF_REG_ARM64_MAX) - 1))
+
+int perf_reg_validate(u64 mask)
+{
+ if (!mask || mask & REG_RESERVED)
+ return -EINVAL;
+
+ return 0;
+}
+
+u64 perf_reg_abi(struct task_struct *task)
+{
+ if (is_compat_thread(task_thread_info(task)))
+ return PERF_SAMPLE_REGS_ABI_32;
+ else
+ return PERF_SAMPLE_REGS_ABI_64;
+}
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 977ca8a..6160f5d 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -34,6 +34,7 @@
#include <linux/kallsyms.h>
#include <linux/init.h>
#include <linux/cpu.h>
+#include <linux/cpuidle.h>
#include <linux/elfcore.h>
#include <linux/pm.h>
#include <linux/tick.h>
@@ -72,8 +73,17 @@
void soft_restart(unsigned long addr)
{
+ typedef void (*phys_reset_t)(unsigned long);
+ phys_reset_t phys_reset;
+
setup_restart();
- cpu_reset(addr);
+
+ /* Switch to the identity mapping */
+ phys_reset = (phys_reset_t)virt_to_phys(cpu_reset);
+ phys_reset(addr);
+
+ /* Should never get here */
+ BUG();
}
/*
@@ -99,10 +109,19 @@
* This should do all the clock switching and wait for interrupt
* tricks
*/
- cpu_do_idle();
- local_irq_enable();
+ if (cpuidle_idle_call()) {
+ cpu_do_idle();
+ local_irq_enable();
+ }
}
+#ifdef CONFIG_HOTPLUG_CPU
+void arch_cpu_idle_dead(void)
+{
+ cpu_die();
+}
+#endif
+
void machine_shutdown(void)
{
#ifdef CONFIG_SMP
@@ -208,15 +227,26 @@
void __show_regs(struct pt_regs *regs)
{
- int i;
+ int i, top_reg;
+ u64 lr, sp;
+
+ if (compat_user_mode(regs)) {
+ lr = regs->compat_lr;
+ sp = regs->compat_sp;
+ top_reg = 12;
+ } else {
+ lr = regs->regs[30];
+ sp = regs->sp;
+ top_reg = 29;
+ }
show_regs_print_info(KERN_DEFAULT);
print_symbol("PC is at %s\n", instruction_pointer(regs));
- print_symbol("LR is at %s\n", regs->regs[30]);
+ print_symbol("LR is at %s\n", lr);
printk("pc : [<%016llx>] lr : [<%016llx>] pstate: %08llx\n",
- regs->pc, regs->regs[30], regs->pstate);
- printk("sp : %016llx\n", regs->sp);
- for (i = 29; i >= 0; i--) {
+ regs->pc, lr, regs->pstate);
+ printk("sp : %016llx\n", sp);
+ for (i = top_reg; i >= 0; i--) {
printk("x%-2d: %016llx ", i, regs->regs[i]);
if (i % 2 == 0)
printk("\n");
@@ -346,7 +376,7 @@
* Complete any pending TLB or cache maintenance on this CPU in case
* the thread migrates to a different CPU.
*/
- dsb();
+ dsb(ish);
/* the actual thread switch */
last = cpu_switch_to(prev, next);
@@ -357,6 +387,7 @@
unsigned long get_wchan(struct task_struct *p)
{
struct stackframe frame;
+ unsigned long stack_page;
int count = 0;
if (!p || p == current || p->state == TASK_RUNNING)
return 0;
@@ -364,9 +395,11 @@
frame.fp = thread_saved_fp(p);
frame.sp = thread_saved_sp(p);
frame.pc = thread_saved_pc(p);
+ stack_page = (unsigned long)task_stack_page(p);
do {
- int ret = unwind_frame(&frame);
- if (ret < 0)
+ if (frame.sp < stack_page ||
+ frame.sp >= stack_page + THREAD_SIZE ||
+ unwind_frame(&frame))
return 0;
if (!in_sched_functions(frame.pc))
return frame.pc;
diff --git a/arch/arm64/kernel/psci.c b/arch/arm64/kernel/psci.c
index 14f73c4..ea4828a 100644
--- a/arch/arm64/kernel/psci.c
+++ b/arch/arm64/kernel/psci.c
@@ -17,12 +17,32 @@
#include <linux/init.h>
#include <linux/of.h>
+#include <linux/smp.h>
#include <asm/compiler.h>
+#include <asm/cpu_ops.h>
#include <asm/errno.h>
#include <asm/psci.h>
+#include <asm/smp_plat.h>
-struct psci_operations psci_ops;
+#define PSCI_POWER_STATE_TYPE_STANDBY 0
+#define PSCI_POWER_STATE_TYPE_POWER_DOWN 1
+
+struct psci_power_state {
+ u16 id;
+ u8 type;
+ u8 affinity_level;
+};
+
+struct psci_operations {
+ int (*cpu_suspend)(struct psci_power_state state,
+ unsigned long entry_point);
+ int (*cpu_off)(struct psci_power_state state);
+ int (*cpu_on)(unsigned long cpuid, unsigned long entry_point);
+ int (*migrate)(unsigned long cpuid);
+};
+
+static struct psci_operations psci_ops;
static int (*invoke_psci_fn)(u64, u64, u64, u64);
@@ -156,22 +176,20 @@
{},
};
-int __init psci_init(void)
+void __init psci_init(void)
{
struct device_node *np;
const char *method;
u32 id;
- int err = 0;
np = of_find_matching_node(NULL, psci_of_match);
if (!np)
- return -ENODEV;
+ return;
pr_info("probing function IDs from device-tree\n");
if (of_property_read_string(np, "method", &method)) {
pr_warning("missing \"method\" property\n");
- err = -ENXIO;
goto out_put_node;
}
@@ -181,7 +199,6 @@
invoke_psci_fn = __invoke_psci_fn_smc;
} else {
pr_warning("invalid \"method\" property: %s\n", method);
- err = -EINVAL;
goto out_put_node;
}
@@ -207,5 +224,70 @@
out_put_node:
of_node_put(np);
+ return;
+}
+
+#ifdef CONFIG_SMP
+
+static int __init cpu_psci_cpu_init(struct device_node *dn, unsigned int cpu)
+{
+ return 0;
+}
+
+static int __init cpu_psci_cpu_prepare(unsigned int cpu)
+{
+ if (!psci_ops.cpu_on) {
+ pr_err("no cpu_on method, not booting CPU%d\n", cpu);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int cpu_psci_cpu_boot(unsigned int cpu)
+{
+ int err = psci_ops.cpu_on(cpu_logical_map(cpu), __pa(secondary_entry));
+ if (err)
+ pr_err("failed to boot CPU%d (%d)\n", cpu, err);
+
return err;
}
+
+#ifdef CONFIG_HOTPLUG_CPU
+static int cpu_psci_cpu_disable(unsigned int cpu)
+{
+ /* Fail early if we don't have CPU_OFF support */
+ if (!psci_ops.cpu_off)
+ return -EOPNOTSUPP;
+ return 0;
+}
+
+static void cpu_psci_cpu_die(unsigned int cpu)
+{
+ int ret;
+ /*
+ * There are no known implementations of PSCI actually using the
+ * power state field, pass a sensible default for now.
+ */
+ struct psci_power_state state = {
+ .type = PSCI_POWER_STATE_TYPE_POWER_DOWN,
+ };
+
+ ret = psci_ops.cpu_off(state);
+
+ pr_crit("unable to power off CPU%u (%d)\n", cpu, ret);
+}
+#endif
+
+const struct cpu_operations cpu_psci_ops = {
+ .name = "psci",
+ .cpu_init = cpu_psci_cpu_init,
+ .cpu_prepare = cpu_psci_cpu_prepare,
+ .cpu_boot = cpu_psci_cpu_boot,
+#ifdef CONFIG_HOTPLUG_CPU
+ .cpu_disable = cpu_psci_cpu_disable,
+ .cpu_die = cpu_psci_cpu_die,
+#endif
+};
+
+#endif
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
index f47e70f..7003d81 100644
--- a/arch/arm64/kernel/ptrace.c
+++ b/arch/arm64/kernel/ptrace.c
@@ -218,31 +218,29 @@
{
int err, len, type, disabled = !ctrl.enabled;
- if (disabled) {
- len = 0;
- type = HW_BREAKPOINT_EMPTY;
- } else {
- err = arch_bp_generic_fields(ctrl, &len, &type);
- if (err)
- return err;
+ attr->disabled = disabled;
+ if (disabled)
+ return 0;
- switch (note_type) {
- case NT_ARM_HW_BREAK:
- if ((type & HW_BREAKPOINT_X) != type)
- return -EINVAL;
- break;
- case NT_ARM_HW_WATCH:
- if ((type & HW_BREAKPOINT_RW) != type)
- return -EINVAL;
- break;
- default:
+ err = arch_bp_generic_fields(ctrl, &len, &type);
+ if (err)
+ return err;
+
+ switch (note_type) {
+ case NT_ARM_HW_BREAK:
+ if ((type & HW_BREAKPOINT_X) != type)
return -EINVAL;
- }
+ break;
+ case NT_ARM_HW_WATCH:
+ if ((type & HW_BREAKPOINT_RW) != type)
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
}
attr->bp_len = len;
attr->bp_type = type;
- attr->disabled = disabled;
return 0;
}
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index 06aeec4..f96e3aa 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -42,9 +42,11 @@
#include <linux/of_fdt.h>
#include <linux/of_platform.h>
+#include <asm/fixmap.h>
#include <asm/cputype.h>
#include <asm/elf.h>
#include <asm/cputable.h>
+#include <asm/cpu_ops.h>
#include <asm/sections.h>
#include <asm/setup.h>
#include <asm/smp_plat.h>
@@ -108,16 +110,95 @@
printk("%s", buf);
}
+void __init smp_setup_processor_id(void)
+{
+ /*
+ * clear __my_cpu_offset on boot CPU to avoid hang caused by
+ * using percpu variable early, for example, lockdep will
+ * access percpu variable inside lock_release
+ */
+ set_my_cpu_offset(0);
+}
+
+bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
+{
+ return phys_id == cpu_logical_map(cpu);
+}
+
+struct mpidr_hash mpidr_hash;
+#ifdef CONFIG_SMP
+/**
+ * smp_build_mpidr_hash - Pre-compute shifts required at each affinity
+ * level in order to build a linear index from an
+ * MPIDR value. Resulting algorithm is a collision
+ * free hash carried out through shifting and ORing
+ */
+static void __init smp_build_mpidr_hash(void)
+{
+ u32 i, affinity, fs[4], bits[4], ls;
+ u64 mask = 0;
+ /*
+ * Pre-scan the list of MPIDRS and filter out bits that do
+ * not contribute to affinity levels, ie they never toggle.
+ */
+ for_each_possible_cpu(i)
+ mask |= (cpu_logical_map(i) ^ cpu_logical_map(0));
+ pr_debug("mask of set bits %#llx\n", mask);
+ /*
+ * Find and stash the last and first bit set at all affinity levels to
+ * check how many bits are required to represent them.
+ */
+ for (i = 0; i < 4; i++) {
+ affinity = MPIDR_AFFINITY_LEVEL(mask, i);
+ /*
+ * Find the MSB bit and LSB bits position
+ * to determine how many bits are required
+ * to express the affinity level.
+ */
+ ls = fls(affinity);
+ fs[i] = affinity ? ffs(affinity) - 1 : 0;
+ bits[i] = ls - fs[i];
+ }
+ /*
+ * An index can be created from the MPIDR_EL1 by isolating the
+ * significant bits at each affinity level and by shifting
+ * them in order to compress the 32 bits values space to a
+ * compressed set of values. This is equivalent to hashing
+ * the MPIDR_EL1 through shifting and ORing. It is a collision free
+ * hash though not minimal since some levels might contain a number
+ * of CPUs that is not an exact power of 2 and their bit
+ * representation might contain holes, eg MPIDR_EL1[7:0] = {0x2, 0x80}.
+ */
+ mpidr_hash.shift_aff[0] = MPIDR_LEVEL_SHIFT(0) + fs[0];
+ mpidr_hash.shift_aff[1] = MPIDR_LEVEL_SHIFT(1) + fs[1] - bits[0];
+ mpidr_hash.shift_aff[2] = MPIDR_LEVEL_SHIFT(2) + fs[2] -
+ (bits[1] + bits[0]);
+ mpidr_hash.shift_aff[3] = MPIDR_LEVEL_SHIFT(3) +
+ fs[3] - (bits[2] + bits[1] + bits[0]);
+ mpidr_hash.mask = mask;
+ mpidr_hash.bits = bits[3] + bits[2] + bits[1] + bits[0];
+ pr_debug("MPIDR hash: aff0[%u] aff1[%u] aff2[%u] aff3[%u] mask[%#llx] bits[%u]\n",
+ mpidr_hash.shift_aff[0],
+ mpidr_hash.shift_aff[1],
+ mpidr_hash.shift_aff[2],
+ mpidr_hash.shift_aff[3],
+ mpidr_hash.mask,
+ mpidr_hash.bits);
+ /*
+ * 4x is an arbitrary value used to warn on a hash table much bigger
+ * than expected on most systems.
+ */
+ if (mpidr_hash_size() > 4 * num_possible_cpus())
+ pr_warn("Large number of MPIDR hash buckets detected\n");
+ __flush_dcache_area(&mpidr_hash, sizeof(struct mpidr_hash));
+}
+#endif
+
static void __init setup_processor(void)
{
struct cpu_info *cpu_info;
u64 features, block;
- /*
- * locate processor in the list of supported processor
- * types. The linker builds this table for us from the
- * entries in arch/arm/mm/proc.S
- */
cpu_info = lookup_processor_type(read_cpuid_id());
if (!cpu_info) {
printk("CPU configuration botched (ID %08x), unable to continue.\n",
@@ -332,6 +413,8 @@
*cmdline_p = boot_command_line;
+ early_ioremap_init();
+
parse_early_param();
arm64_memblock_init();
@@ -344,8 +427,10 @@
psci_init();
cpu_logical_map(0) = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
+ cpu_read_bootcpu_ops();
#ifdef CONFIG_SMP
smp_init_cpus();
+ smp_build_mpidr_hash();
#endif
#ifdef CONFIG_VT
@@ -363,7 +448,7 @@
of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
return 0;
}
-arch_initcall(arm64_device_init);
+arch_initcall_sync(arm64_device_init);
static DEFINE_PER_CPU(struct cpu, cpu_data);
@@ -409,9 +494,6 @@
#ifdef CONFIG_SMP
seq_printf(m, "processor\t: %d\n", i);
#endif
- seq_printf(m, "BogoMIPS\t: %lu.%02lu\n\n",
- loops_per_jiffy / (500000UL/HZ),
- loops_per_jiffy / (5000UL/HZ) % 100);
}
/* dump out the processor features */
diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c
index 182b6fc..bbc1aad 100644
--- a/arch/arm64/kernel/signal.c
+++ b/arch/arm64/kernel/signal.c
@@ -97,8 +97,7 @@
{
sigset_t set;
int i, err;
- struct aux_context __user *aux =
- (struct aux_context __user *)sf->uc.uc_mcontext.__reserved;
+ void *aux = sf->uc.uc_mcontext.__reserved;
err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set));
if (err == 0)
@@ -118,8 +117,11 @@
err |= !valid_user_regs(®s->user_regs);
- if (err == 0)
- err |= restore_fpsimd_context(&aux->fpsimd);
+ if (err == 0) {
+ struct fpsimd_context *fpsimd_ctx =
+ container_of(aux, struct fpsimd_context, head);
+ err |= restore_fpsimd_context(fpsimd_ctx);
+ }
return err;
}
@@ -164,8 +166,8 @@
struct pt_regs *regs, sigset_t *set)
{
int i, err = 0;
- struct aux_context __user *aux =
- (struct aux_context __user *)sf->uc.uc_mcontext.__reserved;
+ void *aux = sf->uc.uc_mcontext.__reserved;
+ struct _aarch64_ctx *end;
/* set up the stack frame for unwinding */
__put_user_error(regs->regs[29], &sf->fp, err);
@@ -182,12 +184,17 @@
err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(*set));
- if (err == 0)
- err |= preserve_fpsimd_context(&aux->fpsimd);
+ if (err == 0) {
+ struct fpsimd_context *fpsimd_ctx =
+ container_of(aux, struct fpsimd_context, head);
+ err |= preserve_fpsimd_context(fpsimd_ctx);
+ aux += sizeof(*fpsimd_ctx);
+ }
/* set the "end" magic */
- __put_user_error(0, &aux->end.magic, err);
- __put_user_error(0, &aux->end.size, err);
+ end = aux;
+ __put_user_error(0, &end->magic, err);
+ __put_user_error(0, &end->size, err);
return err;
}
diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c
index e1b8b9f..7f21c88 100644
--- a/arch/arm64/kernel/signal32.c
+++ b/arch/arm64/kernel/signal32.c
@@ -479,12 +479,13 @@
/* Check if the handler is written for ARM or Thumb */
thumb = handler & 1;
- if (thumb) {
+ if (thumb)
spsr |= COMPAT_PSR_T_BIT;
- spsr &= ~COMPAT_PSR_IT_MASK;
- } else {
+ else
spsr &= ~COMPAT_PSR_T_BIT;
- }
+
+ /* The IT state must be cleared for both ARM and Thumb-2 */
+ spsr &= ~COMPAT_PSR_IT_MASK;
if (ka->sa.sa_flags & SA_RESTORER) {
retcode = ptr_to_compat(ka->sa.sa_restorer);
diff --git a/arch/arm64/kernel/sleep.S b/arch/arm64/kernel/sleep.S
new file mode 100644
index 0000000..b192572
--- /dev/null
+++ b/arch/arm64/kernel/sleep.S
@@ -0,0 +1,184 @@
+#include <linux/errno.h>
+#include <linux/linkage.h>
+#include <asm/asm-offsets.h>
+#include <asm/assembler.h>
+
+ .text
+/*
+ * Implementation of MPIDR_EL1 hash algorithm through shifting
+ * and OR'ing.
+ *
+ * @dst: register containing hash result
+ * @rs0: register containing affinity level 0 bit shift
+ * @rs1: register containing affinity level 1 bit shift
+ * @rs2: register containing affinity level 2 bit shift
+ * @rs3: register containing affinity level 3 bit shift
+ * @mpidr: register containing MPIDR_EL1 value
+ * @mask: register containing MPIDR mask
+ *
+ * Pseudo C-code:
+ *
+ *u32 dst;
+ *
+ *compute_mpidr_hash(u32 rs0, u32 rs1, u32 rs2, u32 rs3, u64 mpidr, u64 mask) {
+ * u32 aff0, aff1, aff2, aff3;
+ * u64 mpidr_masked = mpidr & mask;
+ * aff0 = mpidr_masked & 0xff;
+ * aff1 = mpidr_masked & 0xff00;
+ * aff2 = mpidr_masked & 0xff0000;
+ * aff2 = mpidr_masked & 0xff00000000;
+ * dst = (aff0 >> rs0 | aff1 >> rs1 | aff2 >> rs2 | aff3 >> rs3);
+ *}
+ * Input registers: rs0, rs1, rs2, rs3, mpidr, mask
+ * Output register: dst
+ * Note: input and output registers must be disjoint register sets
+ (eg: a macro instance with mpidr = x1 and dst = x1 is invalid)
+ */
+ .macro compute_mpidr_hash dst, rs0, rs1, rs2, rs3, mpidr, mask
+ and \mpidr, \mpidr, \mask // mask out MPIDR bits
+ and \dst, \mpidr, #0xff // mask=aff0
+ lsr \dst ,\dst, \rs0 // dst=aff0>>rs0
+ and \mask, \mpidr, #0xff00 // mask = aff1
+ lsr \mask ,\mask, \rs1
+ orr \dst, \dst, \mask // dst|=(aff1>>rs1)
+ and \mask, \mpidr, #0xff0000 // mask = aff2
+ lsr \mask ,\mask, \rs2
+ orr \dst, \dst, \mask // dst|=(aff2>>rs2)
+ and \mask, \mpidr, #0xff00000000 // mask = aff3
+ lsr \mask ,\mask, \rs3
+ orr \dst, \dst, \mask // dst|=(aff3>>rs3)
+ .endm
+/*
+ * Save CPU state for a suspend. This saves callee registers, and allocates
+ * space on the kernel stack to save the CPU specific registers + some
+ * other data for resume.
+ *
+ * x0 = suspend finisher argument
+ */
+ENTRY(__cpu_suspend)
+ stp x29, lr, [sp, #-96]!
+ stp x19, x20, [sp,#16]
+ stp x21, x22, [sp,#32]
+ stp x23, x24, [sp,#48]
+ stp x25, x26, [sp,#64]
+ stp x27, x28, [sp,#80]
+ mov x2, sp
+ sub sp, sp, #CPU_SUSPEND_SZ // allocate cpu_suspend_ctx
+ mov x1, sp
+ /*
+ * x1 now points to struct cpu_suspend_ctx allocated on the stack
+ */
+ str x2, [x1, #CPU_CTX_SP]
+ ldr x2, =sleep_save_sp
+ ldr x2, [x2, #SLEEP_SAVE_SP_VIRT]
+#ifdef CONFIG_SMP
+ mrs x7, mpidr_el1
+ ldr x9, =mpidr_hash
+ ldr x10, [x9, #MPIDR_HASH_MASK]
+ /*
+ * Following code relies on the struct mpidr_hash
+ * members size.
+ */
+ ldp w3, w4, [x9, #MPIDR_HASH_SHIFTS]
+ ldp w5, w6, [x9, #(MPIDR_HASH_SHIFTS + 8)]
+ compute_mpidr_hash x8, x3, x4, x5, x6, x7, x10
+ add x2, x2, x8, lsl #3
+#endif
+ bl __cpu_suspend_finisher
+ /*
+ * Never gets here, unless suspend fails.
+ * Successful cpu_suspend should return from cpu_resume, returning
+ * through this code path is considered an error
+ * If the return value is set to 0 force x0 = -EOPNOTSUPP
+ * to make sure a proper error condition is propagated
+ */
+ cmp x0, #0
+ mov x3, #-EOPNOTSUPP
+ csel x0, x3, x0, eq
+ add sp, sp, #CPU_SUSPEND_SZ // rewind stack pointer
+ ldp x19, x20, [sp, #16]
+ ldp x21, x22, [sp, #32]
+ ldp x23, x24, [sp, #48]
+ ldp x25, x26, [sp, #64]
+ ldp x27, x28, [sp, #80]
+ ldp x29, lr, [sp], #96
+ ret
+ENDPROC(__cpu_suspend)
+ .ltorg
+
+/*
+ * x0 must contain the sctlr value retrieved from restored context
+ */
+ENTRY(cpu_resume_mmu)
+ ldr x3, =cpu_resume_after_mmu
+ msr sctlr_el1, x0 // restore sctlr_el1
+ isb
+ br x3 // global jump to virtual address
+ENDPROC(cpu_resume_mmu)
+cpu_resume_after_mmu:
+ mov x0, #0 // return zero on success
+ ldp x19, x20, [sp, #16]
+ ldp x21, x22, [sp, #32]
+ ldp x23, x24, [sp, #48]
+ ldp x25, x26, [sp, #64]
+ ldp x27, x28, [sp, #80]
+ ldp x29, lr, [sp], #96
+ ret
+ENDPROC(cpu_resume_after_mmu)
+
+ .data
+ENTRY(cpu_resume)
+ bl el2_setup // if in EL2 drop to EL1 cleanly
+#ifdef CONFIG_SMP
+ mrs x1, mpidr_el1
+ adr x4, mpidr_hash_ptr
+ ldr x5, [x4]
+ add x8, x4, x5 // x8 = struct mpidr_hash phys address
+ /* retrieve mpidr_hash members to compute the hash */
+ ldr x2, [x8, #MPIDR_HASH_MASK]
+ ldp w3, w4, [x8, #MPIDR_HASH_SHIFTS]
+ ldp w5, w6, [x8, #(MPIDR_HASH_SHIFTS + 8)]
+ compute_mpidr_hash x7, x3, x4, x5, x6, x1, x2
+ /* x7 contains hash index, let's use it to grab context pointer */
+#else
+ mov x7, xzr
+#endif
+ adr x0, sleep_save_sp
+ ldr x0, [x0, #SLEEP_SAVE_SP_PHYS]
+ ldr x0, [x0, x7, lsl #3]
+ /* load sp from context */
+ ldr x2, [x0, #CPU_CTX_SP]
+ adr x1, sleep_idmap_phys
+ /* load physical address of identity map page table in x1 */
+ ldr x1, [x1]
+ mov sp, x2
+ /*
+ * cpu_do_resume expects x0 to contain context physical address
+ * pointer and x1 to contain physical address of 1:1 page tables
+ */
+ bl cpu_do_resume // PC relative jump, MMU off
+ b cpu_resume_mmu // Resume MMU, never returns
+ENDPROC(cpu_resume)
+
+ .align 3
+mpidr_hash_ptr:
+ /*
+ * offset of mpidr_hash symbol from current location
+ * used to obtain run-time mpidr_hash address with MMU off
+ */
+ .quad mpidr_hash - .
+/*
+ * physical address of identity mapped page tables
+ */
+ .type sleep_idmap_phys, #object
+ENTRY(sleep_idmap_phys)
+ .quad 0
+/*
+ * struct sleep_save_sp {
+ * phys_addr_t *save_ptr_stash;
+ * phys_addr_t save_ptr_stash_phys;
+ * };
+ */
+ .type sleep_save_sp, #object
+ENTRY(sleep_save_sp)
+ .space SLEEP_SAVE_SP_SZ // struct sleep_save_sp
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index 5d54e37..5381e6f 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -39,6 +39,7 @@
#include <asm/atomic.h>
#include <asm/cacheflush.h>
#include <asm/cputype.h>
+#include <asm/cpu_ops.h>
#include <asm/mmu_context.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
@@ -54,70 +55,25 @@
* where to place its SVC stack
*/
struct secondary_data secondary_data;
-volatile unsigned long secondary_holding_pen_release = INVALID_HWID;
enum ipi_msg_type {
IPI_RESCHEDULE,
IPI_CALL_FUNC,
IPI_CALL_FUNC_SINGLE,
IPI_CPU_STOP,
+ IPI_TIMER,
};
-static DEFINE_RAW_SPINLOCK(boot_lock);
-
-/*
- * Write secondary_holding_pen_release in a way that is guaranteed to be
- * visible to all observers, irrespective of whether they're taking part
- * in coherency or not. This is necessary for the hotplug code to work
- * reliably.
- */
-static void __cpuinit write_pen_release(u64 val)
-{
- void *start = (void *)&secondary_holding_pen_release;
- unsigned long size = sizeof(secondary_holding_pen_release);
-
- secondary_holding_pen_release = val;
- __flush_dcache_area(start, size);
-}
-
/*
* Boot a secondary CPU, and assign it the specified idle task.
* This also gives us the initial stack to use for this CPU.
*/
static int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
{
- unsigned long timeout;
+ if (cpu_ops[cpu]->cpu_boot)
+ return cpu_ops[cpu]->cpu_boot(cpu);
- /*
- * Set synchronisation state between this boot processor
- * and the secondary one
- */
- raw_spin_lock(&boot_lock);
-
- /*
- * Update the pen release flag.
- */
- write_pen_release(cpu_logical_map(cpu));
-
- /*
- * Send an event, causing the secondaries to read pen_release.
- */
- sev();
-
- timeout = jiffies + (1 * HZ);
- while (time_before(jiffies, timeout)) {
- if (secondary_holding_pen_release == INVALID_HWID)
- break;
- udelay(10);
- }
-
- /*
- * Now the secondary core is starting up let it run its
- * calibrations, then wait for it to finish
- */
- raw_spin_unlock(&boot_lock);
-
- return secondary_holding_pen_release != INVALID_HWID ? -ENOSYS : 0;
+ return -EOPNOTSUPP;
}
static DECLARE_COMPLETION(cpu_running);
@@ -158,6 +114,11 @@
return ret;
}
+static void smp_store_cpu_info(unsigned int cpuid)
+{
+ store_cpu_topology(cpuid);
+}
+
/*
* This is the secondary CPU boot entry. We're using this CPUs
* idle thread stack, but a set of temporary page tables.
@@ -167,8 +128,6 @@
struct mm_struct *mm = &init_mm;
unsigned int cpu = smp_processor_id();
- printk("CPU%u: Booted secondary processor\n", cpu);
-
/*
* All kernel threads share the same mm context; grab a
* reference and switch to it.
@@ -177,6 +136,9 @@
current->active_mm = mm;
cpumask_set_cpu(cpu, mm_cpumask(mm));
+ set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
+ printk("CPU%u: Booted secondary processor\n", cpu);
+
/*
* TTBR0 is only used for the identity mapping at this stage. Make it
* point to zero page to avoid speculatively fetching new entries.
@@ -187,24 +149,8 @@
preempt_disable();
trace_hardirqs_off();
- /*
- * Let the primary processor know we're out of the
- * pen, then head off into the C entry point
- */
- write_pen_release(INVALID_HWID);
-
- /*
- * Synchronise with the boot thread.
- */
- raw_spin_lock(&boot_lock);
- raw_spin_unlock(&boot_lock);
-
- /*
- * Enable local interrupts.
- */
- notify_cpu_starting(cpu);
- local_irq_enable();
- local_fiq_enable();
+ if (cpu_ops[cpu]->cpu_postboot)
+ cpu_ops[cpu]->cpu_postboot();
/*
* OK, now it's safe to let the boot CPU continue. Wait for
@@ -215,11 +161,118 @@
complete(&cpu_running);
/*
+ * Enable GIC and timers.
+ */
+ notify_cpu_starting(cpu);
+
+ local_dbg_enable();
+ local_irq_enable();
+ local_fiq_enable();
+
+ smp_store_cpu_info(cpu);
+
+ /*
* OK, it's off to the idle thread for us
*/
cpu_startup_entry(CPUHP_ONLINE);
}
+#ifdef CONFIG_HOTPLUG_CPU
+static int op_cpu_disable(unsigned int cpu)
+{
+ /*
+ * If we don't have a cpu_die method, abort before we reach the point
+ * of no return. CPU0 may not have an cpu_ops, so test for it.
+ */
+ if (!cpu_ops[cpu] || !cpu_ops[cpu]->cpu_die)
+ return -EOPNOTSUPP;
+
+ /*
+ * We may need to abort a hot unplug for some other mechanism-specific
+ * reason.
+ */
+ if (cpu_ops[cpu]->cpu_disable)
+ return cpu_ops[cpu]->cpu_disable(cpu);
+
+ return 0;
+}
+
+/*
+ * __cpu_disable runs on the processor to be shutdown.
+ */
+int __cpu_disable(void)
+{
+ unsigned int cpu = smp_processor_id();
+ int ret;
+
+ ret = op_cpu_disable(cpu);
+ if (ret)
+ return ret;
+
+ /*
+ * Take this CPU offline. Once we clear this, we can't return,
+ * and we must not schedule until we're ready to give up the cpu.
+ */
+ set_cpu_online(cpu, false);
+
+ /*
+ * OK - migrate IRQs away from this CPU
+ */
+ migrate_irqs();
+
+ /*
+ * Remove this CPU from the vm mask set of all processes.
+ */
+ clear_tasks_mm_cpumask(cpu);
+
+ return 0;
+}
+
+static DECLARE_COMPLETION(cpu_died);
+
+/*
+ * called on the thread which is asking for a CPU to be shutdown -
+ * waits until shutdown has completed, or it is timed out.
+ */
+void __cpu_die(unsigned int cpu)
+{
+ if (!wait_for_completion_timeout(&cpu_died, msecs_to_jiffies(5000))) {
+ pr_crit("CPU%u: cpu didn't die\n", cpu);
+ return;
+ }
+ pr_notice("CPU%u: shutdown\n", cpu);
+}
+
+/*
+ * Called from the idle thread for the CPU which has been shutdown.
+ *
+ * Note that we disable IRQs here, but do not re-enable them
+ * before returning to the caller. This is also the behaviour
+ * of the other hotplug-cpu capable cores, so presumably coming
+ * out of idle fixes this.
+ */
+void cpu_die(void)
+{
+ unsigned int cpu = smp_processor_id();
+
+ idle_task_exit();
+
+ local_irq_disable();
+
+ /* Tell __cpu_die() that this CPU is now safe to dispose of */
+ complete(&cpu_died);
+
+ /*
+ * Actually shutdown the CPU. This must never fail. The specific hotplug
+ * mechanism must perform all required cache maintenance to ensure that
+ * no dirty lines are lost in the process of shutting down the CPU.
+ */
+ cpu_ops[cpu]->cpu_die(cpu);
+
+ BUG();
+}
+#endif
+
void __init smp_cpus_done(unsigned int max_cpus)
{
unsigned long bogosum = loops_per_jiffy * num_online_cpus();
@@ -231,32 +284,11 @@
void __init smp_prepare_boot_cpu(void)
{
+ set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
}
static void (*smp_cross_call)(const struct cpumask *, unsigned int);
-static const struct smp_enable_ops *enable_ops[] __initconst = {
- &smp_spin_table_ops,
- &smp_psci_ops,
- NULL,
-};
-
-static const struct smp_enable_ops *smp_enable_ops[NR_CPUS];
-
-static const struct smp_enable_ops * __init smp_get_enable_ops(const char *name)
-{
- const struct smp_enable_ops **ops = enable_ops;
-
- while (*ops) {
- if (!strcmp(name, (*ops)->name))
- return *ops;
-
- ops++;
- }
-
- return NULL;
-}
-
/*
* Enumerate the possible CPU set from the device tree and build the
* cpu logical map array containing MPIDR values related to logical
@@ -264,9 +296,8 @@
*/
void __init smp_init_cpus(void)
{
- const char *enable_method;
struct device_node *dn = NULL;
- int i, cpu = 1;
+ unsigned int i, cpu = 1;
bool bootcpu_valid = false;
while ((dn = of_find_node_by_type(dn, "cpu"))) {
@@ -335,25 +366,10 @@
if (cpu >= NR_CPUS)
goto next;
- /*
- * We currently support only the "spin-table" enable-method.
- */
- enable_method = of_get_property(dn, "enable-method", NULL);
- if (!enable_method) {
- pr_err("%s: missing enable-method property\n",
- dn->full_name);
+ if (cpu_read_ops(dn, cpu) != 0)
goto next;
- }
- smp_enable_ops[cpu] = smp_get_enable_ops(enable_method);
-
- if (!smp_enable_ops[cpu]) {
- pr_err("%s: invalid enable-method property: %s\n",
- dn->full_name, enable_method);
- goto next;
- }
-
- if (smp_enable_ops[cpu]->init_cpu(dn, cpu))
+ if (cpu_ops[cpu]->cpu_init(dn, cpu))
goto next;
pr_debug("cpu logical map 0x%llx\n", hwid);
@@ -383,8 +399,12 @@
void __init smp_prepare_cpus(unsigned int max_cpus)
{
- int cpu, err;
- unsigned int ncores = num_possible_cpus();
+ int err;
+ unsigned int cpu, ncores = num_possible_cpus();
+
+ init_cpu_topology();
+
+ smp_store_cpu_info(smp_processor_id());
/*
* are we trying to boot more cores than exist?
@@ -411,10 +431,10 @@
if (cpu == smp_processor_id())
continue;
- if (!smp_enable_ops[cpu])
+ if (!cpu_ops[cpu])
continue;
- err = smp_enable_ops[cpu]->prepare_cpu(cpu);
+ err = cpu_ops[cpu]->cpu_prepare(cpu);
if (err)
continue;
@@ -445,6 +465,7 @@
S(IPI_CALL_FUNC, "Function call interrupts"),
S(IPI_CALL_FUNC_SINGLE, "Single function call interrupts"),
S(IPI_CPU_STOP, "CPU stop interrupts"),
+ S(IPI_TIMER, "Timer broadcast interrupts"),
};
void show_ipi_list(struct seq_file *p, int prec)
@@ -454,7 +475,7 @@
for (i = 0; i < NR_IPI; i++) {
seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i + IPI_RESCHEDULE,
prec >= 4 ? " " : "");
- for_each_present_cpu(cpu)
+ for_each_online_cpu(cpu)
seq_printf(p, "%10u ",
__get_irq_stat(cpu, ipi_irqs[i]));
seq_printf(p, " %s\n", ipi_types[i]);
@@ -530,6 +551,14 @@
irq_exit();
break;
+#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
+ case IPI_TIMER:
+ irq_enter();
+ tick_receive_broadcast();
+ irq_exit();
+ break;
+#endif
+
default:
pr_crit("CPU%u: Unknown IPI message 0x%x\n", cpu, ipinr);
break;
@@ -542,6 +571,13 @@
smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
}
+#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
+void tick_broadcast(const struct cpumask *mask)
+{
+ smp_cross_call(mask, IPI_TIMER);
+}
+#endif
+
void smp_send_stop(void)
{
unsigned long timeout;
diff --git a/arch/arm64/kernel/smp_psci.c b/arch/arm64/kernel/smp_psci.c
deleted file mode 100644
index 0c53330..0000000
--- a/arch/arm64/kernel/smp_psci.c
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * PSCI SMP initialisation
- *
- * Copyright (C) 2013 ARM Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include <linux/init.h>
-#include <linux/of.h>
-#include <linux/smp.h>
-
-#include <asm/psci.h>
-#include <asm/smp_plat.h>
-
-static int __init smp_psci_init_cpu(struct device_node *dn, int cpu)
-{
- return 0;
-}
-
-static int __init smp_psci_prepare_cpu(int cpu)
-{
- int err;
-
- if (!psci_ops.cpu_on) {
- pr_err("psci: no cpu_on method, not booting CPU%d\n", cpu);
- return -ENODEV;
- }
-
- err = psci_ops.cpu_on(cpu_logical_map(cpu), __pa(secondary_holding_pen));
- if (err) {
- pr_err("psci: failed to boot CPU%d (%d)\n", cpu, err);
- return err;
- }
-
- return 0;
-}
-
-const struct smp_enable_ops smp_psci_ops __initconst = {
- .name = "psci",
- .init_cpu = smp_psci_init_cpu,
- .prepare_cpu = smp_psci_prepare_cpu,
-};
diff --git a/arch/arm64/kernel/smp_spin_table.c b/arch/arm64/kernel/smp_spin_table.c
index 7c35fa6..e3e5755 100644
--- a/arch/arm64/kernel/smp_spin_table.c
+++ b/arch/arm64/kernel/smp_spin_table.c
@@ -16,15 +16,38 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/delay.h>
#include <linux/init.h>
#include <linux/of.h>
#include <linux/smp.h>
#include <asm/cacheflush.h>
+#include <asm/cpu_ops.h>
+#include <asm/cputype.h>
+#include <asm/smp_plat.h>
+
+extern void secondary_holding_pen(void);
+volatile unsigned long secondary_holding_pen_release = INVALID_HWID;
static phys_addr_t cpu_release_addr[NR_CPUS];
-static int __init smp_spin_table_init_cpu(struct device_node *dn, int cpu)
+/*
+ * Write secondary_holding_pen_release in a way that is guaranteed to be
+ * visible to all observers, irrespective of whether they're taking part
+ * in coherency or not. This is necessary for the hotplug code to work
+ * reliably.
+ */
+static void write_pen_release(u64 val)
+{
+ void *start = (void *)&secondary_holding_pen_release;
+ unsigned long size = sizeof(secondary_holding_pen_release);
+
+ secondary_holding_pen_release = val;
+ __flush_dcache_area(start, size);
+}
+
+
+static int smp_spin_table_cpu_init(struct device_node *dn, unsigned int cpu)
{
/*
* Determine the address from which the CPU is polling.
@@ -40,7 +63,7 @@
return 0;
}
-static int __init smp_spin_table_prepare_cpu(int cpu)
+static int smp_spin_table_cpu_prepare(unsigned int cpu)
{
void **release_addr;
@@ -59,8 +82,24 @@
return 0;
}
-const struct smp_enable_ops smp_spin_table_ops __initconst = {
+static int smp_spin_table_cpu_boot(unsigned int cpu)
+{
+ /*
+ * Update the pen release flag.
+ */
+ write_pen_release(cpu_logical_map(cpu));
+
+ /*
+ * Send an event, causing the secondaries to read pen_release.
+ */
+ sev();
+
+ return 0;
+}
+
+const struct cpu_operations smp_spin_table_ops = {
.name = "spin-table",
- .init_cpu = smp_spin_table_init_cpu,
- .prepare_cpu = smp_spin_table_prepare_cpu,
+ .cpu_init = smp_spin_table_cpu_init,
+ .cpu_prepare = smp_spin_table_cpu_prepare,
+ .cpu_boot = smp_spin_table_cpu_boot,
};
diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
index d25459f..38f0558 100644
--- a/arch/arm64/kernel/stacktrace.c
+++ b/arch/arm64/kernel/stacktrace.c
@@ -43,12 +43,16 @@
low = frame->sp;
high = ALIGN(low, THREAD_SIZE);
- if (fp < low || fp > high || fp & 0xf)
+ if (fp < low || fp > high - 0x18 || fp & 0xf)
return -EINVAL;
frame->sp = fp + 0x10;
frame->fp = *(unsigned long *)(fp);
- frame->pc = *(unsigned long *)(fp + 8);
+ /*
+ * -4 here because we care about the PC at time of bl,
+ * not where the return will go.
+ */
+ frame->pc = *(unsigned long *)(fp + 8) - 4;
return 0;
}
diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c
new file mode 100644
index 0000000..1fa9ce4
--- /dev/null
+++ b/arch/arm64/kernel/suspend.c
@@ -0,0 +1,140 @@
+#include <linux/percpu.h>
+#include <linux/slab.h>
+#include <asm/cacheflush.h>
+#include <asm/cpu_ops.h>
+#include <asm/debug-monitors.h>
+#include <asm/pgtable.h>
+#include <asm/memory.h>
+#include <asm/smp_plat.h>
+#include <asm/suspend.h>
+#include <asm/tlbflush.h>
+
+extern int __cpu_suspend(unsigned long);
+/*
+ * This is called by __cpu_suspend() to save the state, and do whatever
+ * flushing is required to ensure that when the CPU goes to sleep we have
+ * the necessary data available when the caches are not searched.
+ *
+ * @arg: Argument to pass to suspend operations
+ * @ptr: CPU context virtual address
+ * @save_ptr: address of the location where the context physical address
+ * must be saved
+ */
+int __cpu_suspend_finisher(unsigned long arg, struct cpu_suspend_ctx *ptr,
+ phys_addr_t *save_ptr)
+{
+ int cpu = smp_processor_id();
+
+ *save_ptr = virt_to_phys(ptr);
+
+ cpu_do_suspend(ptr);
+ /*
+ * Only flush the context that must be retrieved with the MMU
+ * off. VA primitives ensure the flush is applied to all
+ * cache levels so context is pushed to DRAM.
+ */
+ __flush_dcache_area(ptr, sizeof(*ptr));
+ __flush_dcache_area(save_ptr, sizeof(*save_ptr));
+
+ return cpu_ops[cpu]->cpu_suspend(arg);
+}
+
+/*
+ * This hook is provided so that cpu_suspend code can restore HW
+ * breakpoints as early as possible in the resume path, before reenabling
+ * debug exceptions. Code cannot be run from a CPU PM notifier since by the
+ * time the notifier runs debug exceptions might have been enabled already,
+ * with HW breakpoints registers content still in an unknown state.
+ */
+void (*hw_breakpoint_restore)(void *);
+void __init cpu_suspend_set_dbg_restorer(void (*hw_bp_restore)(void *))
+{
+ /* Prevent multiple restore hook initializations */
+ if (WARN_ON(hw_breakpoint_restore))
+ return;
+ hw_breakpoint_restore = hw_bp_restore;
+}
+
+/**
+ * cpu_suspend
+ *
+ * @arg: argument to pass to the finisher function
+ */
+int cpu_suspend(unsigned long arg)
+{
+ struct mm_struct *mm = current->active_mm;
+ int ret, cpu = smp_processor_id();
+ unsigned long flags;
+
+ /*
+ * If cpu_ops have not been registered or suspend
+ * has not been initialized, cpu_suspend call fails early.
+ */
+ if (!cpu_ops[cpu] || !cpu_ops[cpu]->cpu_suspend)
+ return -EOPNOTSUPP;
+
+ /*
+ * From this point debug exceptions are disabled to prevent
+ * updates to mdscr register (saved and restored along with
+ * general purpose registers) from kernel debuggers.
+ */
+ local_dbg_save(flags);
+
+ /*
+ * mm context saved on the stack, it will be restored when
+ * the cpu comes out of reset through the identity mapped
+ * page tables, so that the thread address space is properly
+ * set-up on function return.
+ */
+ ret = __cpu_suspend(arg);
+ if (ret == 0) {
+ cpu_switch_mm(mm->pgd, mm);
+ flush_tlb_all();
+
+ /*
+ * Restore per-cpu offset before any kernel
+ * subsystem relying on it has a chance to run.
+ */
+ set_my_cpu_offset(per_cpu_offset(cpu));
+
+ /*
+ * Restore HW breakpoint registers to sane values
+ * before debug exceptions are possibly reenabled
+ * through local_dbg_restore.
+ */
+ if (hw_breakpoint_restore)
+ hw_breakpoint_restore(NULL);
+ }
+
+ /*
+ * Restore pstate flags. OS lock and mdscr have been already
+ * restored, so from this point onwards, debugging is fully
+ * renabled if it was enabled when core started shutdown.
+ */
+ local_dbg_restore(flags);
+
+ return ret;
+}
+
+extern struct sleep_save_sp sleep_save_sp;
+extern phys_addr_t sleep_idmap_phys;
+
+static int cpu_suspend_init(void)
+{
+ void *ctx_ptr;
+
+ /* ctx_ptr is an array of physical addresses */
+ ctx_ptr = kcalloc(mpidr_hash_size(), sizeof(phys_addr_t), GFP_KERNEL);
+
+ if (WARN_ON(!ctx_ptr))
+ return -ENOMEM;
+
+ sleep_save_sp.save_ptr_stash = ctx_ptr;
+ sleep_save_sp.save_ptr_stash_phys = virt_to_phys(ctx_ptr);
+ sleep_idmap_phys = virt_to_phys(idmap_pg_dir);
+ __flush_dcache_area(&sleep_save_sp, sizeof(struct sleep_save_sp));
+ __flush_dcache_area(&sleep_idmap_phys, sizeof(sleep_idmap_phys));
+
+ return 0;
+}
+early_initcall(cpu_suspend_init);
diff --git a/arch/arm64/kernel/time.c b/arch/arm64/kernel/time.c
index a551f88..03dc371 100644
--- a/arch/arm64/kernel/time.c
+++ b/arch/arm64/kernel/time.c
@@ -68,12 +68,6 @@
return arch_timer_read_counter() * sched_clock_mult;
}
-int read_current_timer(unsigned long *timer_value)
-{
- *timer_value = arch_timer_read_counter();
- return 0;
-}
-
void __init time_init(void)
{
u32 arch_timer_rate;
diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c
new file mode 100644
index 0000000..d450a6d
--- /dev/null
+++ b/arch/arm64/kernel/topology.c
@@ -0,0 +1,438 @@
+/*
+ * arch/arm64/kernel/topology.c
+ *
+ * Copyright (C) 2011,2013,2014 Linaro Limited.
+ *
+ * Based on the arm32 version written by Vincent Guittot in turn based on
+ * arch/sh/kernel/topology.c
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/cpu.h>
+#include <linux/cpumask.h>
+#include <linux/init.h>
+#include <linux/percpu.h>
+#include <linux/node.h>
+#include <linux/nodemask.h>
+#include <linux/of.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+
+#include <asm/topology.h>
+
+/*
+ * cpu power table
+ * This per cpu data structure describes the relative capacity of each core.
+ * On a heteregenous system, cores don't have the same computation capacity
+ * and we reflect that difference in the cpu_power field so the scheduler can
+ * take this difference into account during load balance. A per cpu structure
+ * is preferred because each CPU updates its own cpu_power field during the
+ * load balance except for idle cores. One idle core is selected to run the
+ * rebalance_domains for all idle cores and the cpu_power can be updated
+ * during this sequence.
+ */
+static DEFINE_PER_CPU(unsigned long, cpu_scale);
+
+unsigned long arch_scale_freq_power(struct sched_domain *sd, int cpu)
+{
+ return per_cpu(cpu_scale, cpu);
+}
+
+static void set_power_scale(unsigned int cpu, unsigned long power)
+{
+ per_cpu(cpu_scale, cpu) = power;
+}
+
+static int __init get_cpu_for_node(struct device_node *node)
+{
+ struct device_node *cpu_node;
+ int cpu;
+
+ cpu_node = of_parse_phandle(node, "cpu", 0);
+ if (!cpu_node)
+ return -1;
+
+ for_each_possible_cpu(cpu) {
+ if (of_get_cpu_node(cpu, NULL) == cpu_node) {
+ of_node_put(cpu_node);
+ return cpu;
+ }
+ }
+
+ pr_crit("Unable to find CPU node for %s\n", cpu_node->full_name);
+
+ of_node_put(cpu_node);
+ return -1;
+}
+
+static int __init parse_core(struct device_node *core, int cluster_id,
+ int core_id)
+{
+ char name[10];
+ bool leaf = true;
+ int i = 0;
+ int cpu;
+ struct device_node *t;
+
+ do {
+ snprintf(name, sizeof(name), "thread%d", i);
+ t = of_get_child_by_name(core, name);
+ if (t) {
+ leaf = false;
+ cpu = get_cpu_for_node(t);
+ if (cpu >= 0) {
+ cpu_topology[cpu].cluster_id = cluster_id;
+ cpu_topology[cpu].core_id = core_id;
+ cpu_topology[cpu].thread_id = i;
+ } else {
+ pr_err("%s: Can't get CPU for thread\n",
+ t->full_name);
+ of_node_put(t);
+ return -EINVAL;
+ }
+ of_node_put(t);
+ }
+ i++;
+ } while (t);
+
+ cpu = get_cpu_for_node(core);
+ if (cpu >= 0) {
+ if (!leaf) {
+ pr_err("%s: Core has both threads and CPU\n",
+ core->full_name);
+ return -EINVAL;
+ }
+
+ cpu_topology[cpu].cluster_id = cluster_id;
+ cpu_topology[cpu].core_id = core_id;
+ } else if (leaf) {
+ pr_err("%s: Can't get CPU for leaf core\n", core->full_name);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int __init parse_cluster(struct device_node *cluster, int depth)
+{
+ char name[10];
+ bool leaf = true;
+ bool has_cores = false;
+ struct device_node *c;
+ static int cluster_id __initdata;
+ int core_id = 0;
+ int i, ret;
+
+ /*
+ * First check for child clusters; we currently ignore any
+ * information about the nesting of clusters and present the
+ * scheduler with a flat list of them.
+ */
+ i = 0;
+ do {
+ snprintf(name, sizeof(name), "cluster%d", i);
+ c = of_get_child_by_name(cluster, name);
+ if (c) {
+ leaf = false;
+ ret = parse_cluster(c, depth + 1);
+ of_node_put(c);
+ if (ret != 0)
+ return ret;
+ }
+ i++;
+ } while (c);
+
+ /* Now check for cores */
+ i = 0;
+ do {
+ snprintf(name, sizeof(name), "core%d", i);
+ c = of_get_child_by_name(cluster, name);
+ if (c) {
+ has_cores = true;
+
+ if (depth == 0) {
+ pr_err("%s: cpu-map children should be clusters\n",
+ c->full_name);
+ of_node_put(c);
+ return -EINVAL;
+ }
+
+ if (leaf) {
+ ret = parse_core(c, cluster_id, core_id++);
+ } else {
+ pr_err("%s: Non-leaf cluster with core %s\n",
+ cluster->full_name, name);
+ ret = -EINVAL;
+ }
+
+ of_node_put(c);
+ if (ret != 0)
+ return ret;
+ }
+ i++;
+ } while (c);
+
+ if (leaf && !has_cores)
+ pr_warn("%s: empty cluster\n", cluster->full_name);
+
+ if (leaf)
+ cluster_id++;
+
+ return 0;
+}
+
+struct cpu_efficiency {
+ const char *compatible;
+ unsigned long efficiency;
+};
+
+/*
+ * Table of relative efficiency of each processors
+ * The efficiency value must fit in 20bit and the final
+ * cpu_scale value must be in the range
+ * 0 < cpu_scale < 3*SCHED_POWER_SCALE/2
+ * in order to return at most 1 when DIV_ROUND_CLOSEST
+ * is used to compute the capacity of a CPU.
+ * Processors that are not defined in the table,
+ * use the default SCHED_POWER_SCALE value for cpu_scale.
+ */
+static const struct cpu_efficiency table_efficiency[] = {
+ { "arm,cortex-a57", 3891 },
+ { "arm,cortex-a53", 2048 },
+ { NULL, },
+};
+
+static unsigned long *__cpu_capacity;
+#define cpu_capacity(cpu) __cpu_capacity[cpu]
+
+static unsigned long middle_capacity = 1;
+
+/*
+ * Iterate all CPUs' descriptor in DT and compute the efficiency
+ * (as per table_efficiency). Also calculate a middle efficiency
+ * as close as possible to (max{eff_i} - min{eff_i}) / 2
+ * This is later used to scale the cpu_power field such that an
+ * 'average' CPU is of middle power. Also see the comments near
+ * table_efficiency[] and update_cpu_power().
+ */
+static int __init parse_dt_topology(void)
+{
+ struct device_node *cn, *map;
+ int ret = 0;
+ int cpu;
+
+ cn = of_find_node_by_path("/cpus");
+ if (!cn) {
+ pr_err("No CPU information found in DT\n");
+ return 0;
+ }
+
+ /*
+ * When topology is provided cpu-map is essentially a root
+ * cluster with restricted subnodes.
+ */
+ map = of_get_child_by_name(cn, "cpu-map");
+ if (!map)
+ goto out;
+
+ ret = parse_cluster(map, 0);
+ if (ret != 0)
+ goto out_map;
+
+ /*
+ * Check that all cores are in the topology; the SMP code will
+ * only mark cores described in the DT as possible.
+ */
+ for_each_possible_cpu(cpu) {
+ if (cpu_topology[cpu].cluster_id == -1) {
+ pr_err("CPU%d: No topology information specified\n",
+ cpu);
+ ret = -EINVAL;
+ }
+ }
+
+out_map:
+ of_node_put(map);
+out:
+ of_node_put(cn);
+ return ret;
+}
+
+static void __init parse_dt_cpu_power(void)
+{
+ const struct cpu_efficiency *cpu_eff;
+ struct device_node *cn;
+ unsigned long min_capacity = ULONG_MAX;
+ unsigned long max_capacity = 0;
+ unsigned long capacity = 0;
+ int cpu;
+
+ __cpu_capacity = kcalloc(nr_cpu_ids, sizeof(*__cpu_capacity),
+ GFP_NOWAIT);
+
+ for_each_possible_cpu(cpu) {
+ const u32 *rate;
+ int len;
+
+ /* Too early to use cpu->of_node */
+ cn = of_get_cpu_node(cpu, NULL);
+ if (!cn) {
+ pr_err("Missing device node for CPU %d\n", cpu);
+ continue;
+ }
+
+ for (cpu_eff = table_efficiency; cpu_eff->compatible; cpu_eff++)
+ if (of_device_is_compatible(cn, cpu_eff->compatible))
+ break;
+
+ if (cpu_eff->compatible == NULL) {
+ pr_warn("%s: Unknown CPU type\n", cn->full_name);
+ continue;
+ }
+
+ rate = of_get_property(cn, "clock-frequency", &len);
+ if (!rate || len != 4) {
+ pr_err("%s: Missing clock-frequency property\n",
+ cn->full_name);
+ continue;
+ }
+
+ capacity = ((be32_to_cpup(rate)) >> 20) * cpu_eff->efficiency;
+
+ /* Save min capacity of the system */
+ if (capacity < min_capacity)
+ min_capacity = capacity;
+
+ /* Save max capacity of the system */
+ if (capacity > max_capacity)
+ max_capacity = capacity;
+
+ cpu_capacity(cpu) = capacity;
+ }
+
+ /* If min and max capacities are equal we bypass the update of the
+ * cpu_scale because all CPUs have the same capacity. Otherwise, we
+ * compute a middle_capacity factor that will ensure that the capacity
+ * of an 'average' CPU of the system will be as close as possible to
+ * SCHED_POWER_SCALE, which is the default value, but with the
+ * constraint explained near table_efficiency[].
+ */
+ if (min_capacity == max_capacity)
+ return;
+ else if (4 * max_capacity < (3 * (max_capacity + min_capacity)))
+ middle_capacity = (min_capacity + max_capacity)
+ >> (SCHED_POWER_SHIFT+1);
+ else
+ middle_capacity = ((max_capacity / 3)
+ >> (SCHED_POWER_SHIFT-1)) + 1;
+}
+
+/*
+ * Look for a customed capacity of a CPU in the cpu_topo_data table during the
+ * boot. The update of all CPUs is in O(n^2) for heteregeneous system but the
+ * function returns directly for SMP system.
+ */
+static void update_cpu_power(unsigned int cpu)
+{
+ if (!cpu_capacity(cpu))
+ return;
+
+ set_power_scale(cpu, cpu_capacity(cpu) / middle_capacity);
+
+ pr_info("CPU%u: update cpu_power %lu\n",
+ cpu, arch_scale_freq_power(NULL, cpu));
+}
+
+/*
+ * cpu topology table
+ */
+struct cpu_topology cpu_topology[NR_CPUS];
+EXPORT_SYMBOL_GPL(cpu_topology);
+
+const struct cpumask *cpu_coregroup_mask(int cpu)
+{
+ return &cpu_topology[cpu].core_sibling;
+}
+
+static void update_siblings_masks(unsigned int cpuid)
+{
+ struct cpu_topology *cpu_topo, *cpuid_topo = &cpu_topology[cpuid];
+ int cpu;
+
+ if (cpuid_topo->cluster_id == -1) {
+ /*
+ * DT does not contain topology information for this cpu.
+ */
+ pr_debug("CPU%u: No topology information configured\n", cpuid);
+ return;
+ }
+
+ /* update core and thread sibling masks */
+ for_each_possible_cpu(cpu) {
+ cpu_topo = &cpu_topology[cpu];
+
+ if (cpuid_topo->cluster_id != cpu_topo->cluster_id)
+ continue;
+
+ cpumask_set_cpu(cpuid, &cpu_topo->core_sibling);
+ if (cpu != cpuid)
+ cpumask_set_cpu(cpu, &cpuid_topo->core_sibling);
+
+ if (cpuid_topo->core_id != cpu_topo->core_id)
+ continue;
+
+ cpumask_set_cpu(cpuid, &cpu_topo->thread_sibling);
+ if (cpu != cpuid)
+ cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling);
+ }
+}
+
+void store_cpu_topology(unsigned int cpuid)
+{
+ update_siblings_masks(cpuid);
+ update_cpu_power(cpuid);
+}
+
+static void __init reset_cpu_topology(void)
+{
+ unsigned int cpu;
+
+ for_each_possible_cpu(cpu) {
+ struct cpu_topology *cpu_topo = &cpu_topology[cpu];
+
+ cpu_topo->thread_id = -1;
+ cpu_topo->core_id = 0;
+ cpu_topo->cluster_id = -1;
+
+ cpumask_clear(&cpu_topo->core_sibling);
+ cpumask_set_cpu(cpu, &cpu_topo->core_sibling);
+ cpumask_clear(&cpu_topo->thread_sibling);
+ cpumask_set_cpu(cpu, &cpu_topo->thread_sibling);
+ }
+}
+
+static void __init reset_cpu_power(void)
+{
+ unsigned int cpu;
+
+ for_each_possible_cpu(cpu)
+ set_power_scale(cpu, SCHED_POWER_SCALE);
+}
+
+void __init init_cpu_topology(void)
+{
+ reset_cpu_topology();
+
+ /*
+ * Discard anything that was parsed if we hit an error so we
+ * don't use partial information.
+ */
+ if (parse_dt_topology())
+ reset_cpu_topology();
+
+ reset_cpu_power();
+ parse_dt_cpu_power();
+}
diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c
index 6a389dc..10c27f5 100644
--- a/arch/arm64/kernel/vdso.c
+++ b/arch/arm64/kernel/vdso.c
@@ -103,49 +103,31 @@
static int __init vdso_init(void)
{
- struct page *pg;
- char *vbase;
- int i, ret = 0;
+ int i;
+
+ if (memcmp(&vdso_start, "\177ELF", 4)) {
+ pr_err("vDSO is not a valid ELF object!\n");
+ return -EINVAL;
+ }
vdso_pages = (&vdso_end - &vdso_start) >> PAGE_SHIFT;
pr_info("vdso: %ld pages (%ld code, %ld data) at base %p\n",
vdso_pages + 1, vdso_pages, 1L, &vdso_start);
/* Allocate the vDSO pagelist, plus a page for the data. */
- vdso_pagelist = kzalloc(sizeof(struct page *) * (vdso_pages + 1),
+ vdso_pagelist = kcalloc(vdso_pages + 1, sizeof(struct page *),
GFP_KERNEL);
- if (vdso_pagelist == NULL) {
- pr_err("Failed to allocate vDSO pagelist!\n");
+ if (vdso_pagelist == NULL)
return -ENOMEM;
- }
/* Grab the vDSO code pages. */
- for (i = 0; i < vdso_pages; i++) {
- pg = virt_to_page(&vdso_start + i*PAGE_SIZE);
- ClearPageReserved(pg);
- get_page(pg);
- vdso_pagelist[i] = pg;
- }
-
- /* Sanity check the shared object header. */
- vbase = vmap(vdso_pagelist, 1, 0, PAGE_KERNEL);
- if (vbase == NULL) {
- pr_err("Failed to map vDSO pagelist!\n");
- return -ENOMEM;
- } else if (memcmp(vbase, "\177ELF", 4)) {
- pr_err("vDSO is not a valid ELF object!\n");
- ret = -EINVAL;
- goto unmap;
- }
+ for (i = 0; i < vdso_pages; i++)
+ vdso_pagelist[i] = virt_to_page(&vdso_start + i * PAGE_SIZE);
/* Grab the vDSO data page. */
- pg = virt_to_page(vdso_data);
- get_page(pg);
- vdso_pagelist[i] = pg;
+ vdso_pagelist[i] = virt_to_page(vdso_data);
-unmap:
- vunmap(vbase);
- return ret;
+ return 0;
}
arch_initcall(vdso_init);
@@ -235,6 +217,8 @@
vdso_data->use_syscall = use_syscall;
vdso_data->xtime_coarse_sec = xtime_coarse.tv_sec;
vdso_data->xtime_coarse_nsec = xtime_coarse.tv_nsec;
+ vdso_data->wtm_clock_sec = tk->wall_to_monotonic.tv_sec;
+ vdso_data->wtm_clock_nsec = tk->wall_to_monotonic.tv_nsec;
if (!use_syscall) {
vdso_data->cs_cycle_last = tk->clock->cycle_last;
@@ -242,8 +226,6 @@
vdso_data->xtime_clock_nsec = tk->xtime_nsec;
vdso_data->cs_mult = tk->mult;
vdso_data->cs_shift = tk->shift;
- vdso_data->wtm_clock_sec = tk->wall_to_monotonic.tv_sec;
- vdso_data->wtm_clock_nsec = tk->wall_to_monotonic.tv_nsec;
}
smp_wmb();
diff --git a/arch/arm64/kernel/vdso/Makefile b/arch/arm64/kernel/vdso/Makefile
index d8064af..6d20b7d 100644
--- a/arch/arm64/kernel/vdso/Makefile
+++ b/arch/arm64/kernel/vdso/Makefile
@@ -48,7 +48,7 @@
# Actual build commands
quiet_cmd_vdsold = VDSOL $@
- cmd_vdsold = $(CC) $(c_flags) -Wl,-T $^ -o $@
+ cmd_vdsold = $(CC) $(c_flags) -Wl,-n -Wl,-T $^ -o $@
quiet_cmd_vdsoas = VDSOA $@
cmd_vdsoas = $(CC) $(a_flags) -c -o $@ $<
diff --git a/arch/arm64/kernel/vdso/gettimeofday.S b/arch/arm64/kernel/vdso/gettimeofday.S
index f0a6d10..fe652ff 100644
--- a/arch/arm64/kernel/vdso/gettimeofday.S
+++ b/arch/arm64/kernel/vdso/gettimeofday.S
@@ -103,6 +103,8 @@
bl __do_get_tspec
seqcnt_check w9, 1b
+ mov x30, x2
+
cmp w0, #CLOCK_MONOTONIC
b.ne 6f
@@ -118,6 +120,9 @@
ccmp w0, #CLOCK_MONOTONIC_COARSE, #0x4, ne
b.ne 8f
+ /* xtime_coarse_nsec is already right-shifted */
+ mov x12, #0
+
/* Get coarse timespec. */
adr vdso_data, _vdso_data
3: seqcnt_acquire
@@ -156,7 +161,7 @@
lsr x11, x11, x12
stp x10, x11, [x1, #TSPEC_TV_SEC]
mov x0, xzr
- ret x2
+ ret
7:
mov x30, x2
8: /* Syscall fallback. */
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
index 3fae2be..18a08e1 100644
--- a/arch/arm64/kernel/vmlinux.lds.S
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -41,7 +41,6 @@
}
.text : { /* Real text segment */
_stext = .; /* Text and read-only data */
- *(.smp.pen.text)
__exception_text_start = .;
*(.exception.text)
__exception_text_end = .;
@@ -56,7 +55,8 @@
}
RO_DATA(PAGE_SIZE)
-
+ EXCEPTION_TABLE(8)
+ NOTES
_etext = .; /* End of text and rodata section */
. = ALIGN(PAGE_SIZE);
@@ -82,41 +82,12 @@
PERCPU_SECTION(64)
__init_end = .;
- . = ALIGN(THREAD_SIZE);
- __data_loc = .;
- .data : AT(__data_loc) {
- _data = .; /* address in memory */
- _sdata = .;
-
- /*
- * first, the init task union, aligned
- * to an 8192 byte boundary.
- */
- INIT_TASK_DATA(THREAD_SIZE)
- NOSAVE_DATA
- CACHELINE_ALIGNED_DATA(64)
- READ_MOSTLY_DATA(64)
-
- /*
- * The exception fixup table (might need resorting at runtime)
- */
- . = ALIGN(32);
- __start___ex_table = .;
- *(__ex_table)
- __stop___ex_table = .;
-
- /*
- * and the usual data section
- */
- DATA_DATA
- CONSTRUCTORS
-
- _edata = .;
- }
- _edata_loc = __data_loc + SIZEOF(.data);
-
- NOTES
+ . = ALIGN(PAGE_SIZE);
+ _data = .;
+ _sdata = .;
+ RW_DATA_SECTION(64, PAGE_SIZE, THREAD_SIZE)
+ _edata = .;
BSS_SECTION(0, 0, 0)
_end = .;
diff --git a/arch/arm64/lib/Makefile b/arch/arm64/lib/Makefile
index 59acc0e..328ce1a9 100644
--- a/arch/arm64/lib/Makefile
+++ b/arch/arm64/lib/Makefile
@@ -1,6 +1,4 @@
-lib-y := bitops.o delay.o \
- strncpy_from_user.o strnlen_user.o clear_user.o \
- copy_from_user.o copy_to_user.o copy_in_user.o \
- copy_page.o clear_page.o \
- memchr.o memcpy.o memmove.o memset.o \
+lib-y := bitops.o clear_user.o delay.o copy_from_user.o \
+ copy_to_user.o copy_in_user.o copy_page.o \
+ clear_page.o memchr.o memcpy.o memmove.o memset.o \
strchr.o strrchr.o
diff --git a/arch/arm64/lib/strncpy_from_user.S b/arch/arm64/lib/strncpy_from_user.S
deleted file mode 100644
index 56e448a..0000000
--- a/arch/arm64/lib/strncpy_from_user.S
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Based on arch/arm/lib/strncpy_from_user.S
- *
- * Copyright (C) 1995-2000 Russell King
- * Copyright (C) 2012 ARM Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include <linux/linkage.h>
-#include <asm/assembler.h>
-#include <asm/errno.h>
-
- .text
- .align 5
-
-/*
- * Copy a string from user space to kernel space.
- * x0 = dst, x1 = src, x2 = byte length
- * returns the number of characters copied (strlen of copied string),
- * -EFAULT on exception, or "len" if we fill the whole buffer
- */
-ENTRY(__strncpy_from_user)
- mov x4, x1
-1: subs x2, x2, #1
- bmi 2f
-USER(9f, ldrb w3, [x1], #1 )
- strb w3, [x0], #1
- cbnz w3, 1b
- sub x1, x1, #1 // take NUL character out of count
-2: sub x0, x1, x4
- ret
-ENDPROC(__strncpy_from_user)
-
- .section .fixup,"ax"
- .align 0
-9: strb wzr, [x0] // null terminate
- mov x0, #-EFAULT
- ret
- .previous
diff --git a/arch/arm64/lib/strnlen_user.S b/arch/arm64/lib/strnlen_user.S
deleted file mode 100644
index 7f7b176..0000000
--- a/arch/arm64/lib/strnlen_user.S
+++ /dev/null
@@ -1,47 +0,0 @@
-/*
- * Based on arch/arm/lib/strnlen_user.S
- *
- * Copyright (C) 1995-2000 Russell King
- * Copyright (C) 2012 ARM Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include <linux/linkage.h>
-#include <asm/assembler.h>
-#include <asm/errno.h>
-
- .text
- .align 5
-
-/* Prototype: unsigned long __strnlen_user(const char *str, long n)
- * Purpose : get length of a string in user memory
- * Params : str - address of string in user memory
- * Returns : length of string *including terminator*
- * or zero on exception, or n if too long
- */
-ENTRY(__strnlen_user)
- mov x2, x0
-1: subs x1, x1, #1
- b.mi 2f
-USER(9f, ldrb w3, [x0], #1 )
- cbnz w3, 1b
-2: sub x0, x0, x2
- ret
-ENDPROC(__strnlen_user)
-
- .section .fixup,"ax"
- .align 0
-9: mov x0, #0
- ret
- .previous
diff --git a/arch/arm64/mm/Makefile b/arch/arm64/mm/Makefile
index 3140a2a..b51d364 100644
--- a/arch/arm64/mm/Makefile
+++ b/arch/arm64/mm/Makefile
@@ -2,3 +2,4 @@
cache.o copypage.o flush.o \
ioremap.o mmap.o pgd.o mmu.o \
context.o tlb.o proc.o
+obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S
index 48a3860..0e379c4 100644
--- a/arch/arm64/mm/cache.S
+++ b/arch/arm64/mm/cache.S
@@ -30,7 +30,7 @@
*
* Corrupted registers: x0-x7, x9-x11
*/
-ENTRY(__flush_dcache_all)
+__flush_dcache_all:
dsb sy // ensure ordering with previous memory accesses
mrs x0, clidr_el1 // read clidr
and x3, x0, #0x7000000 // extract loc from clidr
@@ -146,7 +146,7 @@
ENDPROC(__flush_cache_user_range)
/*
- * __flush_kern_dcache_page(kaddr)
+ * __flush_dcache_area(kaddr, size)
*
* Ensure that the data held in the page kaddr is written back to the
* page in question.
@@ -166,3 +166,88 @@
dsb sy
ret
ENDPROC(__flush_dcache_area)
+
+/*
+ * __dma_inv_range(start, end)
+ * - start - virtual start address of region
+ * - end - virtual end address of region
+ */
+__dma_inv_range:
+ dcache_line_size x2, x3
+ sub x3, x2, #1
+ tst x1, x3 // end cache line aligned?
+ bic x1, x1, x3
+ b.eq 1f
+ dc civac, x1 // clean & invalidate D / U line
+1: tst x0, x3 // start cache line aligned?
+ bic x0, x0, x3
+ b.eq 2f
+ dc civac, x0 // clean & invalidate D / U line
+ b 3f
+2: dc ivac, x0 // invalidate D / U line
+3: add x0, x0, x2
+ cmp x0, x1
+ b.lo 2b
+ dsb sy
+ ret
+ENDPROC(__dma_inv_range)
+
+/*
+ * __dma_clean_range(start, end)
+ * - start - virtual start address of region
+ * - end - virtual end address of region
+ */
+__dma_clean_range:
+ dcache_line_size x2, x3
+ sub x3, x2, #1
+ bic x0, x0, x3
+1: dc cvac, x0 // clean D / U line
+ add x0, x0, x2
+ cmp x0, x1
+ b.lo 1b
+ dsb sy
+ ret
+ENDPROC(__dma_clean_range)
+
+/*
+ * __dma_flush_range(start, end)
+ * - start - virtual start address of region
+ * - end - virtual end address of region
+ */
+ENTRY(__dma_flush_range)
+ dcache_line_size x2, x3
+ sub x3, x2, #1
+ bic x0, x0, x3
+1: dc civac, x0 // clean & invalidate D / U line
+ add x0, x0, x2
+ cmp x0, x1
+ b.lo 1b
+ dsb sy
+ ret
+ENDPROC(__dma_flush_range)
+
+/*
+ * __dma_map_area(start, size, dir)
+ * - start - kernel virtual start address
+ * - size - size of region
+ * - dir - DMA direction
+ */
+ENTRY(__dma_map_area)
+ add x1, x1, x0
+ cmp w2, #DMA_FROM_DEVICE
+ b.eq __dma_inv_range
+ b __dma_clean_range
+ENDPROC(__dma_map_area)
+
+/*
+ * __dma_unmap_area(start, size, dir)
+ * - start - kernel virtual start address
+ * - size - size of region
+ * - dir - DMA direction
+ */
+ENTRY(__dma_unmap_area)
+ add x1, x1, x0
+ cmp w2, #DMA_TO_DEVICE
+ b.ne __dma_inv_range
+ ret
+ENDPROC(__dma_unmap_area)
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index 4bd7579..f39a55d 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -21,34 +21,277 @@
#include <linux/export.h>
#include <linux/slab.h>
#include <linux/dma-mapping.h>
+#include <linux/dma-contiguous.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
#include <linux/vmalloc.h>
#include <linux/swiotlb.h>
+#include <linux/amba/bus.h>
#include <asm/cacheflush.h>
struct dma_map_ops *dma_ops;
EXPORT_SYMBOL(dma_ops);
-static void *arm64_swiotlb_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flags,
- struct dma_attrs *attrs)
+static pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot,
+ bool coherent)
{
- if (IS_ENABLED(CONFIG_ZONE_DMA32) &&
+ if (dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs))
+ return pgprot_writecombine(prot);
+ else if (!coherent)
+ return pgprot_dmacoherent(prot);
+ return prot;
+}
+
+static void *__dma_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flags,
+ struct dma_attrs *attrs)
+{
+ if (IS_ENABLED(CONFIG_ZONE_DMA) &&
dev->coherent_dma_mask <= DMA_BIT_MASK(32))
- flags |= GFP_DMA32;
- return swiotlb_alloc_coherent(dev, size, dma_handle, flags);
+ flags |= GFP_DMA;
+ if (IS_ENABLED(CONFIG_DMA_CMA)) {
+ struct page *page;
+
+ size = PAGE_ALIGN(size);
+ page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
+ get_order(size));
+ if (!page)
+ return NULL;
+
+ *dma_handle = phys_to_dma(dev, page_to_phys(page));
+ return page_address(page);
+ } else {
+ return swiotlb_alloc_coherent(dev, size, dma_handle, flags);
+ }
}
-static void arm64_swiotlb_free_coherent(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_handle,
- struct dma_attrs *attrs)
+static void __dma_free_coherent(struct device *dev, size_t size,
+ void *vaddr, dma_addr_t dma_handle,
+ struct dma_attrs *attrs)
{
- swiotlb_free_coherent(dev, size, vaddr, dma_handle);
+ if (dev == NULL) {
+ WARN_ONCE(1, "Use an actual device structure for DMA allocation\n");
+ return;
+ }
+
+ if (IS_ENABLED(CONFIG_DMA_CMA)) {
+ phys_addr_t paddr = dma_to_phys(dev, dma_handle);
+
+ dma_release_from_contiguous(dev,
+ phys_to_page(paddr),
+ size >> PAGE_SHIFT);
+ } else {
+ swiotlb_free_coherent(dev, size, vaddr, dma_handle);
+ }
}
-static struct dma_map_ops arm64_swiotlb_dma_ops = {
- .alloc = arm64_swiotlb_alloc_coherent,
- .free = arm64_swiotlb_free_coherent,
+static void *__dma_alloc_noncoherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flags,
+ struct dma_attrs *attrs)
+{
+ struct page *page, **map;
+ void *ptr, *coherent_ptr;
+ int order, i;
+
+ size = PAGE_ALIGN(size);
+ order = get_order(size);
+
+ ptr = __dma_alloc_coherent(dev, size, dma_handle, flags, attrs);
+ if (!ptr)
+ goto no_mem;
+ map = kmalloc(sizeof(struct page *) << order, flags & ~GFP_DMA);
+ if (!map)
+ goto no_map;
+
+ /* remove any dirty cache lines on the kernel alias */
+ __dma_flush_range(ptr, ptr + size);
+
+ /* create a coherent mapping */
+ page = virt_to_page(ptr);
+ for (i = 0; i < (size >> PAGE_SHIFT); i++)
+ map[i] = page + i;
+ coherent_ptr = vmap(map, size >> PAGE_SHIFT, VM_MAP,
+ __get_dma_pgprot(attrs, __pgprot(PROT_NORMAL_NC), false));
+ kfree(map);
+ if (!coherent_ptr)
+ goto no_map;
+
+ return coherent_ptr;
+
+no_map:
+ __dma_free_coherent(dev, size, ptr, *dma_handle, attrs);
+no_mem:
+ *dma_handle = ~0;
+ return NULL;
+}
+
+static void __dma_free_noncoherent(struct device *dev, size_t size,
+ void *vaddr, dma_addr_t dma_handle,
+ struct dma_attrs *attrs)
+{
+ void *swiotlb_addr = phys_to_virt(dma_to_phys(dev, dma_handle));
+
+ vunmap(vaddr);
+ __dma_free_coherent(dev, size, swiotlb_addr, dma_handle, attrs);
+}
+
+static dma_addr_t __swiotlb_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ dma_addr_t dev_addr;
+
+ dev_addr = swiotlb_map_page(dev, page, offset, size, dir, attrs);
+ __dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
+
+ return dev_addr;
+}
+
+
+static void __swiotlb_unmap_page(struct device *dev, dma_addr_t dev_addr,
+ size_t size, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ __dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
+ swiotlb_unmap_page(dev, dev_addr, size, dir, attrs);
+}
+
+static int __swiotlb_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
+ int nelems, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ struct scatterlist *sg;
+ int i, ret;
+
+ ret = swiotlb_map_sg_attrs(dev, sgl, nelems, dir, attrs);
+ for_each_sg(sgl, sg, ret, i)
+ __dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
+ sg->length, dir);
+
+ return ret;
+}
+
+static void __swiotlb_unmap_sg_attrs(struct device *dev,
+ struct scatterlist *sgl, int nelems,
+ enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ struct scatterlist *sg;
+ int i;
+
+ for_each_sg(sgl, sg, nelems, i)
+ __dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
+ sg->length, dir);
+ swiotlb_unmap_sg_attrs(dev, sgl, nelems, dir, attrs);
+}
+
+static void __swiotlb_sync_single_for_cpu(struct device *dev,
+ dma_addr_t dev_addr, size_t size,
+ enum dma_data_direction dir)
+{
+ __dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
+ swiotlb_sync_single_for_cpu(dev, dev_addr, size, dir);
+}
+
+static void __swiotlb_sync_single_for_device(struct device *dev,
+ dma_addr_t dev_addr, size_t size,
+ enum dma_data_direction dir)
+{
+ swiotlb_sync_single_for_device(dev, dev_addr, size, dir);
+ __dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
+}
+
+static void __swiotlb_sync_sg_for_cpu(struct device *dev,
+ struct scatterlist *sgl, int nelems,
+ enum dma_data_direction dir)
+{
+ struct scatterlist *sg;
+ int i;
+
+ for_each_sg(sgl, sg, nelems, i)
+ __dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
+ sg->length, dir);
+ swiotlb_sync_sg_for_cpu(dev, sgl, nelems, dir);
+}
+
+static void __swiotlb_sync_sg_for_device(struct device *dev,
+ struct scatterlist *sgl, int nelems,
+ enum dma_data_direction dir)
+{
+ struct scatterlist *sg;
+ int i;
+
+ swiotlb_sync_sg_for_device(dev, sgl, nelems, dir);
+ for_each_sg(sgl, sg, nelems, i)
+ __dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
+ sg->length, dir);
+}
+
+/* vma->vm_page_prot must be set appropriately before calling this function */
+static int __dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size)
+{
+ int ret = -ENXIO;
+ unsigned long nr_vma_pages = (vma->vm_end - vma->vm_start) >>
+ PAGE_SHIFT;
+ unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
+ unsigned long pfn = dma_to_phys(dev, dma_addr) >> PAGE_SHIFT;
+ unsigned long off = vma->vm_pgoff;
+
+ if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
+ return ret;
+
+ if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) {
+ ret = remap_pfn_range(vma, vma->vm_start,
+ pfn + off,
+ vma->vm_end - vma->vm_start,
+ vma->vm_page_prot);
+ }
+
+ return ret;
+}
+
+static int __swiotlb_mmap_noncoherent(struct device *dev,
+ struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size,
+ struct dma_attrs *attrs)
+{
+ vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot, false);
+ return __dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
+}
+
+static int __swiotlb_mmap_coherent(struct device *dev,
+ struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size,
+ struct dma_attrs *attrs)
+{
+ /* Just use whatever page_prot attributes were specified */
+ return __dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
+}
+
+struct dma_map_ops noncoherent_swiotlb_dma_ops = {
+ .alloc = __dma_alloc_noncoherent,
+ .free = __dma_free_noncoherent,
+ .mmap = __swiotlb_mmap_noncoherent,
+ .map_page = __swiotlb_map_page,
+ .unmap_page = __swiotlb_unmap_page,
+ .map_sg = __swiotlb_map_sg_attrs,
+ .unmap_sg = __swiotlb_unmap_sg_attrs,
+ .sync_single_for_cpu = __swiotlb_sync_single_for_cpu,
+ .sync_single_for_device = __swiotlb_sync_single_for_device,
+ .sync_sg_for_cpu = __swiotlb_sync_sg_for_cpu,
+ .sync_sg_for_device = __swiotlb_sync_sg_for_device,
+ .dma_supported = swiotlb_dma_supported,
+ .mapping_error = swiotlb_dma_mapping_error,
+};
+EXPORT_SYMBOL(noncoherent_swiotlb_dma_ops);
+
+struct dma_map_ops coherent_swiotlb_dma_ops = {
+ .alloc = __dma_alloc_coherent,
+ .free = __dma_free_coherent,
+ .mmap = __swiotlb_mmap_coherent,
.map_page = swiotlb_map_page,
.unmap_page = swiotlb_unmap_page,
.map_sg = swiotlb_map_sg_attrs,
@@ -60,13 +303,48 @@
.dma_supported = swiotlb_dma_supported,
.mapping_error = swiotlb_dma_mapping_error,
};
+EXPORT_SYMBOL(coherent_swiotlb_dma_ops);
-void __init arm64_swiotlb_init(void)
+static int dma_bus_notifier(struct notifier_block *nb,
+ unsigned long event, void *_dev)
{
- dma_ops = &arm64_swiotlb_dma_ops;
- swiotlb_init(1);
+ struct device *dev = _dev;
+
+ if (event != BUS_NOTIFY_ADD_DEVICE)
+ return NOTIFY_DONE;
+
+ if (of_property_read_bool(dev->of_node, "dma-coherent"))
+ set_dma_ops(dev, &coherent_swiotlb_dma_ops);
+
+ return NOTIFY_OK;
}
+static struct notifier_block platform_bus_nb = {
+ .notifier_call = dma_bus_notifier,
+};
+
+static struct notifier_block amba_bus_nb = {
+ .notifier_call = dma_bus_notifier,
+};
+
+extern int swiotlb_late_init_with_default_size(size_t default_size);
+
+static int __init swiotlb_late_init(void)
+{
+ size_t swiotlb_size = min(SZ_64M, MAX_ORDER_NR_PAGES << PAGE_SHIFT);
+
+ /*
+ * These must be registered before of_platform_populate().
+ */
+ bus_register_notifier(&platform_bus_type, &platform_bus_nb);
+ bus_register_notifier(&amba_bustype, &amba_bus_nb);
+
+ dma_ops = &noncoherent_swiotlb_dma_ops;
+
+ return swiotlb_late_init_with_default_size(swiotlb_size);
+}
+arch_initcall(swiotlb_late_init);
+
#define PREALLOC_DMA_DEBUG_ENTRIES 4096
static int __init dma_debug_do_init(void)
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 916dbe1..2b1fc4b 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -130,7 +130,7 @@
force_sig_info(sig, &si, tsk);
}
-void do_bad_area(unsigned long addr, unsigned int esr, struct pt_regs *regs)
+static void do_bad_area(unsigned long addr, unsigned int esr, struct pt_regs *regs)
{
struct task_struct *tsk = current;
struct mm_struct *mm = tsk->active_mm;
@@ -152,25 +152,8 @@
#define ESR_CM (1 << 8)
#define ESR_LNX_EXEC (1 << 24)
-/*
- * Check that the permissions on the VMA allow for the fault which occurred.
- * If we encountered a write fault, we must have write permission, otherwise
- * we allow any permission.
- */
-static inline bool access_error(unsigned int esr, struct vm_area_struct *vma)
-{
- unsigned int mask = VM_READ | VM_WRITE | VM_EXEC;
-
- if (esr & ESR_WRITE)
- mask = VM_WRITE;
- if (esr & ESR_LNX_EXEC)
- mask = VM_EXEC;
-
- return vma->vm_flags & mask ? false : true;
-}
-
static int __do_page_fault(struct mm_struct *mm, unsigned long addr,
- unsigned int esr, unsigned int flags,
+ unsigned int mm_flags, unsigned long vm_flags,
struct task_struct *tsk)
{
struct vm_area_struct *vma;
@@ -188,12 +171,16 @@
* it.
*/
good_area:
- if (access_error(esr, vma)) {
+ /*
+ * Check that the permissions on the VMA allow for the fault which
+ * occurred.
+ */
+ if (!(vma->vm_flags & vm_flags)) {
fault = VM_FAULT_BADACCESS;
goto out;
}
- return handle_mm_fault(mm, vma, addr & PAGE_MASK, flags);
+ return handle_mm_fault(mm, vma, addr & PAGE_MASK, mm_flags);
check_stack:
if (vma->vm_flags & VM_GROWSDOWN && !expand_stack(vma, addr))
@@ -208,9 +195,15 @@
struct task_struct *tsk;
struct mm_struct *mm;
int fault, sig, code;
- bool write = (esr & ESR_WRITE) && !(esr & ESR_CM);
- unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
- (write ? FAULT_FLAG_WRITE : 0);
+ unsigned long vm_flags = VM_READ | VM_WRITE;
+ unsigned int mm_flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
+
+ if (esr & ESR_LNX_EXEC) {
+ vm_flags = VM_EXEC;
+ } else if ((esr & ESR_WRITE) && !(esr & ESR_CM)) {
+ vm_flags = VM_WRITE;
+ mm_flags |= FAULT_FLAG_WRITE;
+ }
tsk = current;
mm = tsk->mm;
@@ -248,7 +241,7 @@
#endif
}
- fault = __do_page_fault(mm, addr, esr, flags, tsk);
+ fault = __do_page_fault(mm, addr, mm_flags, vm_flags, tsk);
/*
* If we need to retry but a fatal signal is pending, handle the
@@ -265,7 +258,7 @@
*/
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
- if (flags & FAULT_FLAG_ALLOW_RETRY) {
+ if (mm_flags & FAULT_FLAG_ALLOW_RETRY) {
if (fault & VM_FAULT_MAJOR) {
tsk->maj_flt++;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs,
@@ -280,8 +273,8 @@
* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk of
* starvation.
*/
- flags &= ~FAULT_FLAG_ALLOW_RETRY;
- flags |= FAULT_FLAG_TRIED;
+ mm_flags &= ~FAULT_FLAG_ALLOW_RETRY;
+ mm_flags |= FAULT_FLAG_TRIED;
goto retry;
}
}
@@ -366,17 +359,6 @@
}
/*
- * Some section permission faults need to be handled gracefully. They can
- * happen due to a __{get,put}_user during an oops.
- */
-static int do_sect_fault(unsigned long addr, unsigned int esr,
- struct pt_regs *regs)
-{
- do_bad_area(addr, esr, regs);
- return 0;
-}
-
-/*
* This abort handler always returns "fault".
*/
static int do_bad(unsigned long addr, unsigned int esr, struct pt_regs *regs)
@@ -399,12 +381,12 @@
{ do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 2 translation fault" },
{ do_page_fault, SIGSEGV, SEGV_MAPERR, "level 3 translation fault" },
{ do_bad, SIGBUS, 0, "reserved access flag fault" },
- { do_bad, SIGSEGV, SEGV_ACCERR, "level 1 access flag fault" },
- { do_bad, SIGSEGV, SEGV_ACCERR, "level 2 access flag fault" },
+ { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 1 access flag fault" },
+ { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 2 access flag fault" },
{ do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 access flag fault" },
{ do_bad, SIGBUS, 0, "reserved permission fault" },
- { do_bad, SIGSEGV, SEGV_ACCERR, "level 1 permission fault" },
- { do_sect_fault, SIGSEGV, SEGV_ACCERR, "level 2 permission fault" },
+ { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 1 permission fault" },
+ { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 2 permission fault" },
{ do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 permission fault" },
{ do_bad, SIGBUS, 0, "synchronous external abort" },
{ do_bad, SIGBUS, 0, "asynchronous external abort" },
diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c
index 88611c3..e4193e3 100644
--- a/arch/arm64/mm/flush.c
+++ b/arch/arm64/mm/flush.c
@@ -70,23 +70,16 @@
#endif
}
-void __flush_dcache_page(struct page *page)
-{
- __flush_dcache_area(page_address(page), PAGE_SIZE);
-}
-
void __sync_icache_dcache(pte_t pte, unsigned long addr)
{
- unsigned long pfn;
- struct page *page;
+ struct page *page = pte_page(pte);
- pfn = pte_pfn(pte);
- if (!pfn_valid(pfn))
+ /* no flushing needed for anonymous pages */
+ if (!page_mapping(page))
return;
- page = pfn_to_page(pfn);
if (!test_and_set_bit(PG_dcache_clean, &page->flags)) {
- __flush_dcache_page(page);
+ __flush_dcache_area(page_address(page), PAGE_SIZE);
__flush_icache_all();
} else if (icache_is_aivivt()) {
__flush_icache_all();
@@ -94,28 +87,14 @@
}
/*
- * Ensure cache coherency between kernel mapping and userspace mapping of this
- * page.
+ * This function is called when a page has been modified by the kernel. Mark
+ * it as dirty for later flushing when mapped in user space (if executable,
+ * see __sync_icache_dcache).
*/
void flush_dcache_page(struct page *page)
{
- struct address_space *mapping;
-
- /*
- * The zero page is never written to, so never has any dirty cache
- * lines, and therefore never needs to be flushed.
- */
- if (page == ZERO_PAGE(0))
- return;
-
- mapping = page_mapping(page);
- if (mapping && mapping_mapped(mapping)) {
- __flush_dcache_page(page);
- __flush_icache_all();
- set_bit(PG_dcache_clean, &page->flags);
- } else {
+ if (test_bit(PG_dcache_clean, &page->flags))
clear_bit(PG_dcache_clean, &page->flags);
- }
}
EXPORT_SYMBOL(flush_dcache_page);
diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
new file mode 100644
index 0000000..2fc8258
--- /dev/null
+++ b/arch/arm64/mm/hugetlbpage.c
@@ -0,0 +1,70 @@
+/*
+ * arch/arm64/mm/hugetlbpage.c
+ *
+ * Copyright (C) 2013 Linaro Ltd.
+ *
+ * Based on arch/x86/mm/hugetlbpage.c.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/hugetlb.h>
+#include <linux/pagemap.h>
+#include <linux/err.h>
+#include <linux/sysctl.h>
+#include <asm/mman.h>
+#include <asm/tlb.h>
+#include <asm/tlbflush.h>
+#include <asm/pgalloc.h>
+
+#ifndef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
+int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
+{
+ return 0;
+}
+#endif
+
+struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
+ int write)
+{
+ return ERR_PTR(-EINVAL);
+}
+
+int pmd_huge(pmd_t pmd)
+{
+ return !(pmd_val(pmd) & PMD_TABLE_BIT);
+}
+
+int pud_huge(pud_t pud)
+{
+ return !(pud_val(pud) & PUD_TABLE_BIT);
+}
+
+static __init int setup_hugepagesz(char *opt)
+{
+ unsigned long ps = memparse(opt, &opt);
+ if (ps == PMD_SIZE) {
+ hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT);
+ } else if (ps == PUD_SIZE) {
+ hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
+ } else {
+ pr_err("hugepagesz: Unsupported page size %lu M\n", ps >> 20);
+ return 0;
+ }
+ return 1;
+}
+__setup("hugepagesz=", setup_hugepagesz);
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 61599b5..8af694d 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -30,6 +30,8 @@
#include <linux/memblock.h>
#include <linux/sort.h>
#include <linux/of_fdt.h>
+#include <linux/dma-mapping.h>
+#include <linux/dma-contiguous.h>
#include <asm/prom.h>
#include <asm/sections.h>
@@ -44,8 +46,7 @@
phys_addr_t memstart_addr __read_mostly = 0;
-void __init early_init_dt_setup_initrd_arch(unsigned long start,
- unsigned long end)
+void __init early_init_dt_setup_initrd_arch(u64 start, u64 end)
{
phys_initrd_start = start;
phys_initrd_size = end - start;
@@ -67,22 +68,22 @@
}
early_param("initrd", early_initrd);
-#define MAX_DMA32_PFN ((4UL * 1024 * 1024 * 1024) >> PAGE_SHIFT)
-
static void __init zone_sizes_init(unsigned long min, unsigned long max)
{
struct memblock_region *reg;
unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES];
- unsigned long max_dma32 = min;
+ unsigned long max_dma = min;
memset(zone_size, 0, sizeof(zone_size));
-#ifdef CONFIG_ZONE_DMA32
/* 4GB maximum for 32-bit only capable devices */
- max_dma32 = max(min, min(max, MAX_DMA32_PFN));
- zone_size[ZONE_DMA32] = max_dma32 - min;
-#endif
- zone_size[ZONE_NORMAL] = max - max_dma32;
+ if (IS_ENABLED(CONFIG_ZONE_DMA)) {
+ unsigned long max_dma_phys =
+ (unsigned long)dma_to_phys(NULL, DMA_BIT_MASK(32) + 1);
+ max_dma = max(min, min(max, max_dma_phys >> PAGE_SHIFT));
+ zone_size[ZONE_DMA] = max_dma - min;
+ }
+ zone_size[ZONE_NORMAL] = max - max_dma;
memcpy(zhole_size, zone_size, sizeof(zhole_size));
@@ -92,15 +93,15 @@
if (start >= max)
continue;
-#ifdef CONFIG_ZONE_DMA32
- if (start < max_dma32) {
- unsigned long dma_end = min(end, max_dma32);
- zhole_size[ZONE_DMA32] -= dma_end - start;
+
+ if (IS_ENABLED(CONFIG_ZONE_DMA) && start < max_dma) {
+ unsigned long dma_end = min(end, max_dma);
+ zhole_size[ZONE_DMA] -= dma_end - start;
}
-#endif
- if (end > max_dma32) {
+
+ if (end > max_dma) {
unsigned long normal_end = min(end, max);
- unsigned long normal_start = max(start, max_dma32);
+ unsigned long normal_start = max(start, max_dma);
zhole_size[ZONE_NORMAL] -= normal_end - normal_start;
}
}
@@ -175,6 +176,8 @@
memblock_reserve(base, size);
}
+ dma_contiguous_reserve(0);
+
memblock_allow_resize();
memblock_dump_all();
}
@@ -285,8 +288,6 @@
unsigned long reserved_pages, free_pages;
struct memblock_region *reg;
- arm64_swiotlb_init();
-
max_mapnr = pfn_to_page(max_pfn + PHYS_PFN_OFFSET) - mem_map;
#ifndef CONFIG_SPARSEMEM_VMEMMAP
diff --git a/arch/arm64/mm/ioremap.c b/arch/arm64/mm/ioremap.c
index 1725cd6..00d315a 100644
--- a/arch/arm64/mm/ioremap.c
+++ b/arch/arm64/mm/ioremap.c
@@ -25,6 +25,10 @@
#include <linux/vmalloc.h>
#include <linux/io.h>
+#include <asm/fixmap.h>
+#include <asm/tlbflush.h>
+#include <asm/pgalloc.h>
+
static void __iomem *__ioremap_caller(phys_addr_t phys_addr, size_t size,
pgprot_t prot, void *caller)
{
@@ -82,3 +86,95 @@
vunmap(addr);
}
EXPORT_SYMBOL(__iounmap);
+
+void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size)
+{
+ /* For normal memory we already have a cacheable mapping. */
+ if (pfn_valid(__phys_to_pfn(phys_addr)))
+ return (void __iomem *)__phys_to_virt(phys_addr);
+
+ return __ioremap_caller(phys_addr, size, __pgprot(PROT_NORMAL),
+ __builtin_return_address(0));
+}
+EXPORT_SYMBOL(ioremap_cache);
+
+#ifndef CONFIG_ARM64_64K_PAGES
+static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss;
+#endif
+
+static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
+{
+ pgd_t *pgd;
+ pud_t *pud;
+
+ pgd = pgd_offset_k(addr);
+ BUG_ON(pgd_none(*pgd) || pgd_bad(*pgd));
+
+ pud = pud_offset(pgd, addr);
+ BUG_ON(pud_none(*pud) || pud_bad(*pud));
+
+ return pmd_offset(pud, addr);
+}
+
+static inline pte_t * __init early_ioremap_pte(unsigned long addr)
+{
+ pmd_t *pmd = early_ioremap_pmd(addr);
+
+ BUG_ON(pmd_none(*pmd) || pmd_bad(*pmd));
+
+ return pte_offset_kernel(pmd, addr);
+}
+
+void __init early_ioremap_init(void)
+{
+ pmd_t *pmd;
+
+ pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
+#ifndef CONFIG_ARM64_64K_PAGES
+ /* need to populate pmd for 4k pagesize only */
+ pmd_populate_kernel(&init_mm, pmd, bm_pte);
+#endif
+ /*
+ * The boot-ioremap range spans multiple pmds, for which
+ * we are not prepared:
+ */
+ BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
+ != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
+
+ if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
+ WARN_ON(1);
+ pr_warn("pmd %p != %p\n",
+ pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
+ pr_warn("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
+ fix_to_virt(FIX_BTMAP_BEGIN));
+ pr_warn("fix_to_virt(FIX_BTMAP_END): %08lx\n",
+ fix_to_virt(FIX_BTMAP_END));
+
+ pr_warn("FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
+ pr_warn("FIX_BTMAP_BEGIN: %d\n",
+ FIX_BTMAP_BEGIN);
+ }
+
+ early_ioremap_setup();
+}
+
+void __init __early_set_fixmap(enum fixed_addresses idx,
+ phys_addr_t phys, pgprot_t flags)
+{
+ unsigned long addr = __fix_to_virt(idx);
+ pte_t *pte;
+
+ if (idx >= __end_of_fixed_addresses) {
+ BUG();
+ return;
+ }
+
+ pte = early_ioremap_pte(addr);
+
+ if (pgprot_val(flags))
+ set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
+ else {
+ pte_clear(&init_mm, addr, pte);
+ flush_tlb_kernel_range(addr, addr+PAGE_SIZE);
+ }
+}
diff --git a/arch/arm64/mm/mm.h b/arch/arm64/mm/mm.h
index 916701e..d519f4f 100644
--- a/arch/arm64/mm/mm.h
+++ b/arch/arm64/mm/mm.h
@@ -1,3 +1,2 @@
-extern void __flush_dcache_page(struct page *page);
extern void __init bootmem_init(void);
extern void __init arm64_swiotlb_init(void);
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index eeecc9c..28e5629 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -43,11 +43,6 @@
struct page *empty_zero_page;
EXPORT_SYMBOL(empty_zero_page);
-pgprot_t pgprot_default;
-EXPORT_SYMBOL(pgprot_default);
-
-static pmdval_t prot_sect_kernel;
-
struct cachepolicy {
const char policy[16];
u64 mair;
@@ -122,33 +117,6 @@
}
early_param("cachepolicy", early_cachepolicy);
-/*
- * Adjust the PMD section entries according to the CPU in use.
- */
-static void __init init_mem_pgprot(void)
-{
- pteval_t default_pgprot;
- int i;
-
- default_pgprot = PTE_ATTRINDX(MT_NORMAL);
- prot_sect_kernel = PMD_TYPE_SECT | PMD_SECT_AF | PMD_ATTRINDX(MT_NORMAL);
-
-#ifdef CONFIG_SMP
- /*
- * Mark memory with the "shared" attribute for SMP systems
- */
- default_pgprot |= PTE_SHARED;
- prot_sect_kernel |= PMD_SECT_S;
-#endif
-
- for (i = 0; i < 16; i++) {
- unsigned long v = pgprot_val(protection_map[i]);
- protection_map[i] = __pgprot(v | default_pgprot);
- }
-
- pgprot_default = __pgprot(PTE_TYPE_PAGE | PTE_AF | default_pgprot);
-}
-
pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
unsigned long size, pgprot_t vma_prot)
{
@@ -203,10 +171,18 @@
do {
next = pmd_addr_end(addr, end);
/* try section mapping first */
- if (((addr | next | phys) & ~SECTION_MASK) == 0)
- set_pmd(pmd, __pmd(phys | prot_sect_kernel));
- else
+ if (((addr | next | phys) & ~SECTION_MASK) == 0) {
+ pmd_t old_pmd =*pmd;
+ set_pmd(pmd, __pmd(phys | PROT_SECT_NORMAL_EXEC));
+ /*
+ * Check for previous table entries created during
+ * boot (__create_page_tables) and flush them.
+ */
+ if (!pmd_none(old_pmd))
+ flush_tlb_all();
+ } else {
alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys));
+ }
phys += next - addr;
} while (pmd++, addr = next, addr != end);
}
@@ -252,50 +228,22 @@
} while (pgd++, addr = next, addr != end);
}
-#ifdef CONFIG_EARLY_PRINTK
-/*
- * Create an early I/O mapping using the pgd/pmd entries already populated
- * in head.S as this function is called too early to allocated any memory. The
- * mapping size is 2MB with 4KB pages or 64KB or 64KB pages.
- */
-void __iomem * __init early_io_map(phys_addr_t phys, unsigned long virt)
-{
- unsigned long size, mask;
- bool page64k = IS_ENABLED(CONFIG_ARM64_64K_PAGES);
- pgd_t *pgd;
- pud_t *pud;
- pmd_t *pmd;
- pte_t *pte;
-
- /*
- * No early pte entries with !ARM64_64K_PAGES configuration, so using
- * sections (pmd).
- */
- size = page64k ? PAGE_SIZE : SECTION_SIZE;
- mask = ~(size - 1);
-
- pgd = pgd_offset_k(virt);
- pud = pud_offset(pgd, virt);
- if (pud_none(*pud))
- return NULL;
- pmd = pmd_offset(pud, virt);
-
- if (page64k) {
- if (pmd_none(*pmd))
- return NULL;
- pte = pte_offset_kernel(pmd, virt);
- set_pte(pte, __pte((phys & mask) | PROT_DEVICE_nGnRE));
- } else {
- set_pmd(pmd, __pmd((phys & mask) | PROT_SECT_DEVICE_nGnRE));
- }
-
- return (void __iomem *)((virt & mask) + (phys & ~mask));
-}
-#endif
-
static void __init map_mem(void)
{
struct memblock_region *reg;
+ phys_addr_t limit;
+
+ /*
+ * Temporarily limit the memblock range. We need to do this as
+ * create_mapping requires puds, pmds and ptes to be allocated from
+ * memory addressable from the initial direct kernel mapping.
+ *
+ * The initial direct kernel mapping, located at swapper_pg_dir,
+ * gives us PGDIR_SIZE memory starting from PHYS_OFFSET (which must be
+ * aligned to 2MB as per Documentation/arm64/booting.txt).
+ */
+ limit = PHYS_OFFSET + PGDIR_SIZE;
+ memblock_set_current_limit(limit);
/* map all the memory banks */
for_each_memblock(memory, reg) {
@@ -305,8 +253,27 @@
if (start >= end)
break;
+#ifndef CONFIG_ARM64_64K_PAGES
+ /*
+ * For the first memory bank align the start address and
+ * current memblock limit to prevent create_mapping() from
+ * allocating pte page tables from unmapped memory.
+ * When 64K pages are enabled, the pte page table for the
+ * first PGDIR_SIZE is already present in swapper_pg_dir.
+ */
+ if (start < limit)
+ start = ALIGN(start, PMD_SIZE);
+ if (end < limit) {
+ limit = end & PMD_MASK;
+ memblock_set_current_limit(limit);
+ }
+#endif
+
create_mapping(start, __phys_to_virt(start), end - start);
}
+
+ /* Limit no longer required. */
+ memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE);
}
/*
@@ -317,13 +284,6 @@
{
void *zero_page;
- /*
- * Maximum PGDIR_SIZE addressable via the initial direct kernel
- * mapping in swapper_pg_dir.
- */
- memblock_set_current_limit((PHYS_OFFSET & PGDIR_MASK) + PGDIR_SIZE);
-
- init_mem_pgprot();
map_mem();
/*
@@ -339,7 +299,6 @@
bootmem_init();
empty_zero_page = virt_to_page(zero_page);
- __flush_dcache_page(empty_zero_page);
/*
* TTBR0 is only used for the identity mapping at this stage. Make it
@@ -423,7 +382,7 @@
if (!p)
return -ENOMEM;
- set_pmd(pmd, __pmd(__pa(p) | prot_sect_kernel));
+ set_pmd(pmd, __pmd(__pa(p) | PROT_SECT_NORMAL));
} else
vmemmap_verify((pte_t *)pmd, node, addr, next);
} while (addr = next, addr != end);
diff --git a/arch/arm64/mm/pgd.c b/arch/arm64/mm/pgd.c
index 7083cda..62c6101 100644
--- a/arch/arm64/mm/pgd.c
+++ b/arch/arm64/mm/pgd.c
@@ -32,17 +32,10 @@
pgd_t *pgd_alloc(struct mm_struct *mm)
{
- pgd_t *new_pgd;
-
if (PGD_SIZE == PAGE_SIZE)
- new_pgd = (pgd_t *)get_zeroed_page(GFP_KERNEL);
+ return (pgd_t *)get_zeroed_page(GFP_KERNEL);
else
- new_pgd = kzalloc(PGD_SIZE, GFP_KERNEL);
-
- if (!new_pgd)
- return NULL;
-
- return new_pgd;
+ return kzalloc(PGD_SIZE, GFP_KERNEL);
}
void pgd_free(struct mm_struct *mm, pgd_t *pgd)
diff --git a/arch/arm64/mm/proc-macros.S b/arch/arm64/mm/proc-macros.S
index 8957b82..005d29e 100644
--- a/arch/arm64/mm/proc-macros.S
+++ b/arch/arm64/mm/proc-macros.S
@@ -38,8 +38,7 @@
*/
.macro dcache_line_size, reg, tmp
mrs \tmp, ctr_el0 // read CTR
- lsr \tmp, \tmp, #16
- and \tmp, \tmp, #0xf // cache line size encoding
+ ubfm \tmp, \tmp, #16, #19 // cache line size encoding
mov \reg, #4 // bytes per word
lsl \reg, \reg, \tmp // actual cache line size
.endm
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index 25929f6..7358c18 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -80,8 +80,77 @@
ret
ENDPROC(cpu_do_idle)
+#ifdef CONFIG_ARM64_CPU_SUSPEND
+/**
+ * cpu_do_suspend - save CPU registers context
+ *
+ * x0: virtual address of context pointer
+ */
+ENTRY(cpu_do_suspend)
+ mrs x2, tpidr_el0
+ mrs x3, tpidrro_el0
+ mrs x4, contextidr_el1
+ mrs x5, mair_el1
+ mrs x6, cpacr_el1
+ mrs x7, ttbr1_el1
+ mrs x8, tcr_el1
+ mrs x9, vbar_el1
+ mrs x10, mdscr_el1
+ mrs x11, oslsr_el1
+ mrs x12, sctlr_el1
+ stp x2, x3, [x0]
+ stp x4, x5, [x0, #16]
+ stp x6, x7, [x0, #32]
+ stp x8, x9, [x0, #48]
+ stp x10, x11, [x0, #64]
+ str x12, [x0, #80]
+ ret
+ENDPROC(cpu_do_suspend)
+
+/**
+ * cpu_do_resume - restore CPU register context
+ *
+ * x0: Physical address of context pointer
+ * x1: ttbr0_el1 to be restored
+ *
+ * Returns:
+ * sctlr_el1 value in x0
+ */
+ENTRY(cpu_do_resume)
+ /*
+ * Invalidate local tlb entries before turning on MMU
+ */
+ tlbi vmalle1
+ ldp x2, x3, [x0]
+ ldp x4, x5, [x0, #16]
+ ldp x6, x7, [x0, #32]
+ ldp x8, x9, [x0, #48]
+ ldp x10, x11, [x0, #64]
+ ldr x12, [x0, #80]
+ msr tpidr_el0, x2
+ msr tpidrro_el0, x3
+ msr contextidr_el1, x4
+ msr mair_el1, x5
+ msr cpacr_el1, x6
+ msr ttbr0_el1, x1
+ msr ttbr1_el1, x7
+ msr tcr_el1, x8
+ msr vbar_el1, x9
+ msr mdscr_el1, x10
+ /*
+ * Restore oslsr_el1 by writing oslar_el1
+ */
+ ubfx x11, x11, #1, #1
+ msr oslar_el1, x11
+ mov x0, x12
+ dsb nsh // Make sure local tlb invalidation completed
+ isb
+ ret
+ENDPROC(cpu_do_resume)
+#endif
+
/*
- * cpu_switch_mm(pgd_phys, tsk)
+ * cpu_do_switch_mm(pgd_phys, tsk)
*
* Set the translation table base pointer to be pgd_phys.
*
@@ -104,19 +173,13 @@
* value of the SCTLR_EL1 register.
*/
ENTRY(__cpu_setup)
- /*
- * Preserve the link register across the function call.
- */
- mov x28, lr
- bl __flush_dcache_all
- mov lr, x28
ic iallu // I+BTB cache invalidate
+ tlbi vmalle1is // invalidate I + D TLBs
dsb sy
mov x0, #3 << 20
msr cpacr_el1, x0 // Enable FP/ASIMD
msr mdscr_el1, xzr // Reset mdscr_el1
- tlbi vmalle1is // invalidate I + D TLBs
/*
* Memory region attributes for LPAE:
*
diff --git a/arch/c6x/kernel/devicetree.c b/arch/c6x/kernel/devicetree.c
index bdb56f0..287d0e6 100644
--- a/arch/c6x/kernel/devicetree.c
+++ b/arch/c6x/kernel/devicetree.c
@@ -33,8 +33,7 @@
#ifdef CONFIG_BLK_DEV_INITRD
-void __init early_init_dt_setup_initrd_arch(unsigned long start,
- unsigned long end)
+void __init early_init_dt_setup_initrd_arch(u64 start, u64 end)
{
initrd_start = (unsigned long)__va(start);
initrd_end = (unsigned long)__va(end);
diff --git a/arch/metag/mm/init.c b/arch/metag/mm/init.c
index d05b845..bdc4811 100644
--- a/arch/metag/mm/init.c
+++ b/arch/metag/mm/init.c
@@ -419,10 +419,9 @@
#endif
#ifdef CONFIG_OF_FLATTREE
-void __init early_init_dt_setup_initrd_arch(unsigned long start,
- unsigned long end)
+void __init early_init_dt_setup_initrd_arch(u64 start, u64 end)
{
- pr_err("%s(%lx, %lx)\n",
+ pr_err("%s(%llx, %llx)\n",
__func__, start, end);
}
#endif /* CONFIG_OF_FLATTREE */
diff --git a/arch/microblaze/kernel/prom.c b/arch/microblaze/kernel/prom.c
index 0a2c68f..62e2e8f 100644
--- a/arch/microblaze/kernel/prom.c
+++ b/arch/microblaze/kernel/prom.c
@@ -136,8 +136,7 @@
}
#ifdef CONFIG_BLK_DEV_INITRD
-void __init early_init_dt_setup_initrd_arch(unsigned long start,
- unsigned long end)
+void __init early_init_dt_setup_initrd_arch(u64 start, u64 end)
{
initrd_start = (unsigned long)__va(start);
initrd_end = (unsigned long)__va(end);
diff --git a/arch/mips/Kbuild.platforms b/arch/mips/Kbuild.platforms
index 4b597d9..3a2f828 100644
--- a/arch/mips/Kbuild.platforms
+++ b/arch/mips/Kbuild.platforms
@@ -9,6 +9,7 @@
platforms += cobalt
platforms += dec
platforms += emma
+platforms += goldfish
platforms += jazz
platforms += jz4740
platforms += lantiq
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 7a58ab9..c4ebcc3 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -27,6 +27,7 @@
select HAVE_GENERIC_HARDIRQS
select GENERIC_IRQ_PROBE
select GENERIC_IRQ_SHOW
+ select GENERIC_PCI_IOMAP
select HAVE_ARCH_JUMP_LABEL
select ARCH_WANT_IPC_PARSE_VERSION
select IRQ_FORCED_THREADING
@@ -42,6 +43,8 @@
select MODULES_USE_ELF_REL if MODULES
select MODULES_USE_ELF_RELA if MODULES && 64BIT
select CLONE_BACKWARDS
+ select ARCH_BINFMT_ELF_STATE
+ select HAVE_CMPXCHG_LOCAL
menu "Machine selection"
@@ -288,6 +291,30 @@
the ICT (Institute of Computing Technology) and the Chinese Academy
of Sciences.
+config MIPS_RANCHU
+ bool "Ranchu (Virtual Platform)"
+ select BOOT_ELF32
+ select BOOT_RAW
+ select CEVT_R4K
+ select CSRC_R4K
+ select DMA_COHERENT
+ select IRQ_CPU
+ select MIPS_CPU_SCACHE
+ select SYS_HAS_CPU_MIPS32_R1
+ select SYS_HAS_CPU_MIPS32_R2
+ select SYS_HAS_CPU_MIPS32_R2_EVA
+ select SYS_HAS_CPU_MIPS32_R6
+ select SYS_HAS_EARLY_PRINTK
+ select SYS_SUPPORTS_32BIT_KERNEL
+ select SYS_SUPPORTS_64BIT_KERNEL
+ select SYS_HAS_CPU_MIPS64_R1
+ select SYS_HAS_CPU_MIPS64_R2
+ select SYS_HAS_CPU_MIPS64_R6
+ select SYS_SUPPORTS_BIG_ENDIAN
+ select SYS_SUPPORTS_LITTLE_ENDIAN
+ select SYS_SUPPORTS_HIGHMEM
+ select USE_OF
+
config MIPS_MALTA
bool "MIPS Malta board"
select ARCH_MAY_HAVE_PC_FDC
@@ -311,8 +338,11 @@
select SWAP_IO_SPACE
select SYS_HAS_CPU_MIPS32_R1
select SYS_HAS_CPU_MIPS32_R2
+ select SYS_HAS_CPU_MIPS32_R2_EVA
+ select SYS_HAS_CPU_MIPS32_R6
select SYS_HAS_CPU_MIPS64_R1
select SYS_HAS_CPU_MIPS64_R2
+ select SYS_HAS_CPU_MIPS64_R6
select SYS_HAS_CPU_NEVADA
select SYS_HAS_CPU_RM7000
select SYS_HAS_EARLY_PRINTK
@@ -324,6 +354,8 @@
select SYS_SUPPORTS_MULTITHREADING
select SYS_SUPPORTS_SMARTMIPS
select SYS_SUPPORTS_ZBOOT
+ select SYS_SUPPORTS_HIGHMEM
+ select PM_SLEEP
help
This enables support for the MIPS Technologies Malta evaluation
board.
@@ -607,7 +639,6 @@
select BOOT_ELF32
select DMA_COHERENT
select HAVE_PATA_PLATFORM
- select NR_CPUS_DEFAULT_2
select SIBYTE_SB1250
select SWAP_IO_SPACE
select SYS_HAS_CPU_SB1
@@ -621,7 +652,6 @@
select BOOT_ELF32
select DMA_COHERENT
select HAVE_PATA_PLATFORM
- select NR_CPUS_DEFAULT_2
select SIBYTE_SB1250
select SWAP_IO_SPACE
select SYS_HAS_CPU_SB1
@@ -633,7 +663,6 @@
bool "Sibyte BCM91250E-Sentosa"
select BOOT_ELF32
select DMA_COHERENT
- select NR_CPUS_DEFAULT_2
select SIBYTE_SB1250
select SWAP_IO_SPACE
select SYS_HAS_CPU_SB1
@@ -1233,6 +1262,7 @@
select CPU_HAS_PREFETCH
select CPU_SUPPORTS_32BIT_KERNEL
select CPU_SUPPORTS_HIGHMEM
+ select CPU_SUPPORTS_MSA
select HAVE_KVM
help
Choose this option to build a kernel for release 2 or later of the
@@ -1268,6 +1298,7 @@
select CPU_SUPPORTS_64BIT_KERNEL
select CPU_SUPPORTS_HIGHMEM
select CPU_SUPPORTS_HUGEPAGES
+ select CPU_SUPPORTS_MSA
help
Choose this option to build a kernel for release 2 or later of the
MIPS64 architecture. Many modern embedded systems with a 64-bit
@@ -1275,6 +1306,30 @@
specific type of processor in your system, choose those that one
otherwise CPU_MIPS64_R1 is a safe bet for any MIPS64 system.
+config CPU_MIPS32_R6
+ bool "MIPS32 Release 6"
+ depends on SYS_HAS_CPU_MIPS32_R6
+ select CPU_HAS_PREFETCH
+ select CPU_SUPPORTS_32BIT_KERNEL
+ select CPU_SUPPORTS_HIGHMEM
+ select CPU_SUPPORTS_MSA
+ help
+ Choose this option to build a kernel for release 6 or later of the
+ MIPS32 architecture.
+
+config CPU_MIPS64_R6
+ bool "MIPS64 Release 6"
+ depends on SYS_HAS_CPU_MIPS64_R6
+ select CPU_HAS_PREFETCH
+ select CPU_SUPPORTS_32BIT_KERNEL
+ select CPU_SUPPORTS_64BIT_KERNEL
+ select CPU_SUPPORTS_HIGHMEM
+ select CPU_SUPPORTS_HUGEPAGES
+ select CPU_SUPPORTS_MSA
+ help
+ Choose this option to build a kernel for release 6 or later of the
+ MIPS64 architecture.
+
config CPU_R3000
bool "R3000"
depends on SYS_HAS_CPU_R3000
@@ -1494,6 +1549,47 @@
Netlogic Microsystems XLP processors.
endchoice
+config CPU_MIPS32_R2_EVA
+ bool "MIPS32 Release 2 with EVA support"
+ depends on SYS_HAS_CPU_MIPS32_R2_EVA
+ depends on CPU_MIPS32_R2
+ select EVA
+ help
+ Choose this option to build a kernel for release 2 or later of the
+ MIPS32 architecture working in EVA mode. EVA is an Extended Virtual
+ Addressing but it actually allows extended direct physical memory
+ addressing in kernel (more than 512MB - 2GB or 3GB). If you know the
+ specific type of processor in your system, choose those that one
+ otherwise CPU_MIPS32_R1 is a safe bet for any MIPS32 system.
+ If unsure, select just CPU_MIPS32_R2 or even CPU_MIPS32_R1.
+
+config EVA_OLD_MALTA_MAP
+ bool "Old memory map on Malta (sys controller 1.418)"
+ depends on EVA
+ help
+ Choose this option to build EVA kernel for old Malta memory map.
+ All memory are located above 0x80000000 and first 256M is mirrored
+ to first 0x80000000. IOCU doesn't work with this option.
+ It is designed for systems with RocIt system controller 1.418/1.424
+ and it is kept just for MTI testing purposes. (1.424 can be used
+ with new memory map too).
+ May or may not work with SMP - address aliasing is crazy for YAMON.
+
+config EVA_3GB
+ bool "EVA support for 3GB memory"
+ depends on EVA
+ depends on EVA_OLD_MALTA_MAP
+ help
+ Choose this option to build a EVA kernel supporting up to 3GB of
+ physical memory. This option shifts uncacheble IO registers from KSEG1
+ to KSEG3 which becomes uncachable and KSEG1 (+KSEG0) can be used for
+ additional 1GB physical memory. Actually, to minimize changes in
+ drivers and code the same name (KSEG1) will still be used but it's
+ address will be changed. The physical I/O address is still the same.
+ On Malta board with old memory map it doesn't give you 3GB
+ (because of PCI bridges loop) but it can be used as a start point
+ for development.
+
if CPU_LOONGSON2F
config CPU_NOP_WORKAROUNDS
bool
@@ -1574,12 +1670,21 @@
config SYS_HAS_CPU_MIPS32_R2
bool
+config SYS_HAS_CPU_MIPS32_R2_EVA
+ bool
+
config SYS_HAS_CPU_MIPS64_R1
bool
config SYS_HAS_CPU_MIPS64_R2
bool
+config SYS_HAS_CPU_MIPS32_R6
+ bool
+
+config SYS_HAS_CPU_MIPS64_R6
+ bool
+
config SYS_HAS_CPU_R3000
bool
@@ -1666,11 +1771,11 @@
#
config CPU_MIPS32
bool
- default y if CPU_MIPS32_R1 || CPU_MIPS32_R2
+ default y if CPU_MIPS32_R1 || CPU_MIPS32_R2 || CPU_MIPS32_R6
config CPU_MIPS64
bool
- default y if CPU_MIPS64_R1 || CPU_MIPS64_R2
+ default y if CPU_MIPS64_R1 || CPU_MIPS64_R2 || CPU_MIPS64_R6
#
# These two indicate the revision of the architecture, either Release 1 or Release 2
@@ -1683,6 +1788,17 @@
bool
default y if CPU_MIPS32_R2 || CPU_MIPS64_R2 || CPU_CAVIUM_OCTEON
+config GENERIC_CSUM
+ bool
+
+config CPU_MIPSR6
+ bool
+ default y if CPU_MIPS64_R6 || CPU_MIPS32_R6
+ select GENERIC_CSUM
+
+config EVA
+ bool
+
config SYS_SUPPORTS_32BIT_KERNEL
bool
config SYS_SUPPORTS_64BIT_KERNEL
@@ -1708,7 +1824,7 @@
#
config HARDWARE_WATCHPOINTS
bool
- default y if CPU_MIPSR1 || CPU_MIPSR2
+ default y if CPU_MIPSR1 || CPU_MIPSR2 || CPU_MIPSR6
menu "Kernel type"
@@ -1748,6 +1864,23 @@
RTC emulation when determining guest CPU Frequency. Instead, the guest
processor frequency is automatically derived from the host frequency.
+config 64BIT_PHYS_ADDR
+ bool "Kernel supports 64 bit physical addresses" if EXPERIMENTAL
+ depends on 64BIT
+ help
+ Defines 64 bit physical addresses in kernel.
+ Increases page table sizes.
+
+ It is an alternative for HIGHMEM usage of huge physical memory.
+ Requires 64bit capable CPU and 64 bit kernel code model.
+
+ Note: without this option kernel can support up to 4GB physical
+ memory for 4KB pages and up to 64GB for 64KB pages.
+
+config ARCH_PHYS_ADDR_T_64BIT
+ def_bool 64BIT_PHYS_ADDR
+
+
choice
prompt "Kernel page size"
default PAGE_SIZE_4KB
@@ -1799,6 +1932,17 @@
endchoice
+config 48VMBITS
+ bool "48 bits virtual memory"
+ depends on PAGE_SIZE_16KB || PAGE_SIZE_64KB
+ depends on 64BIT
+ help
+ Define a maximum at least 48 bits of application virtual memory.
+ Default is 40 bits or less, depending from CPU.
+ In generic (small) application it is a small set of pages increase
+ in page tables.
+ If unsure, say N.
+
config FORCE_MAX_ZONEORDER
int "Maximum zone order"
range 14 64 if HUGETLB_PAGE && PAGE_SIZE_64KB
@@ -1881,60 +2025,50 @@
prompt "MIPS MT options"
config MIPS_MT_DISABLED
- bool "Disable multithreading support."
+ bool "Disable multithreading support"
help
- Use this option if your workload can't take advantage of
- MIPS hardware multithreading support. On systems that don't have
- the option of an MT-enabled processor this option will be the only
- option in this menu.
+ Use this option if your platform does not support the MT ASE
+ which is hardware multithreading support. On systems without
+ an MT-enabled processor, this will be the only option that is
+ available in this menu.
config MIPS_MT_SMP
bool "Use 1 TC on each available VPE for SMP"
depends on SYS_SUPPORTS_MULTITHREADING
select CPU_MIPSR2_IRQ_VI
select CPU_MIPSR2_IRQ_EI
+ select SYNC_R4K
select MIPS_MT
- select NR_CPUS_DEFAULT_2
select SMP
- select SYS_SUPPORTS_SCHED_SMT if SMP
- select SYS_SUPPORTS_SMP
select SMP_UP
+ select SYS_SUPPORTS_SMP
+ select SYS_SUPPORTS_SCHED_SMT
select MIPS_PERF_SHARED_TC_COUNTERS
help
- This is a kernel model which is known a VSMP but lately has been
- marketesed into SMVP.
- Virtual SMP uses the processor's VPEs to implement virtual
- processors. In currently available configuration of the 34K processor
- this allows for a dual processor. Both processors will share the same
- primary caches; each will obtain the half of the TLB for it's own
- exclusive use. For a layman this model can be described as similar to
- what Intel calls Hyperthreading.
-
- For further information see http://www.linux-mips.org/wiki/34K#VSMP
+ This is a kernel model which is known as SMVP. This is supported
+ on cores with the MT ASE and uses the available VPEs to implement
+ virtual processors which supports SMP. This is equivalent to the
+ Intel Hyperthreading feature. For further information go to
+ <http://www.imgtec.com/mips/mips-multithreading.asp>.
config MIPS_MT_SMTC
- bool "SMTC: Use all TCs on all VPEs for SMP"
+ bool "Use all TCs on all VPEs for SMP (DEPRECATED)"
depends on CPU_MIPS32_R2
#depends on CPU_MIPS64_R2 # once there is hardware ...
depends on SYS_SUPPORTS_MULTITHREADING
+ depends on !CPU_MIPSR6
select CPU_MIPSR2_IRQ_VI
select CPU_MIPSR2_IRQ_EI
select MIPS_MT
- select NR_CPUS_DEFAULT_8
select SMP
- select SYS_SUPPORTS_SMP
select SMP_UP
+ select SYS_SUPPORTS_SMP
+ select NR_CPUS_DEFAULT_8
help
- This is a kernel model which is known a SMTC or lately has been
- marketesed into SMVP.
- is presenting the available TC's of the core as processors to Linux.
- On currently available 34K processors this means a Linux system will
- see up to 5 processors. The implementation of the SMTC kernel differs
- significantly from VSMP and cannot efficiently coexist in the same
- kernel binary so the choice between VSMP and SMTC is a compile time
- decision.
-
- For further information see http://www.linux-mips.org/wiki/34K#SMTC
+ This is a kernel model which is known as SMTC. This is
+ supported on cores with the MT ASE and presents all TCs
+ available on all VPEs to support SMP. For further
+ information see <http://www.linux-mips.org/wiki/34K#SMTC>.
endchoice
@@ -1961,6 +2095,23 @@
default y
depends on MIPS_MT_SMP || MIPS_MT_SMTC
+config MIPS_INCOMPATIBLE_ARCH_EMULATION
+ bool "Emulation of incompatible architecture"
+ default y if CPU_MIPSR6
+ help
+ Emulation of 32x32bit or 32x64bit FPU ELFs on incompatible FPU.
+ CP0_Status.FR bit controls switch between both models but
+ some CPU may not have this capability.
+ It also can emulate MIPS32/64 R2 architecture on MIPS R6.
+ If unsure, leave N here.
+
+config MIPS_GOLDFISH_SWITCH
+ bool "MIPS Goldfish switch class driver"
+ default n
+ depends on MIPS_RANCHU
+ help
+ Goldfish switch driver implementation for MIPS.
+
config MIPS_VPE_LOADER
bool "VPE loader support."
depends on SYS_SUPPORTS_MULTITHREADING
@@ -2011,16 +2162,13 @@
help
config MIPS_CMP
- bool "MIPS CMP framework support"
- depends on SYS_SUPPORTS_MIPS_CMP
+ bool "MIPS CMP support"
+ depends on SYS_SUPPORTS_MIPS_CMP && !MIPS_MT_SMTC && !CPU_MIPSR1
select SYNC_R4K
- select SYS_SUPPORTS_SMP
- select SYS_SUPPORTS_SCHED_SMT if SMP
select WEAK_ORDERING
default n
help
- This is a placeholder option for the GCMP work. It will need to
- be handled differently...
+ Enable Coherency Manager processor (CMP) support.
config SB1_PASS_1_WORKAROUNDS
bool
@@ -2038,12 +2186,6 @@
default y
-config 64BIT_PHYS_ADDR
- bool
-
-config ARCH_PHYS_ADDR_T_64BIT
- def_bool 64BIT_PHYS_ADDR
-
config CPU_HAS_SMARTMIPS
depends on SYS_SUPPORTS_SMARTMIPS
bool "Support for the SmartMIPS ASE"
@@ -2063,6 +2205,21 @@
When this option is enabled the kernel will be built using the
microMIPS ISA
+config CPU_HAS_MSA
+ bool "Support for the MIPS SIMD Architecture (EXPERIMENTAL)"
+ depends on CPU_SUPPORTS_MSA
+ default y
+ help
+ MIPS SIMD Architecture (MSA) introduces 128 bit wide vector registers
+ and a set of SIMD instructions to operate on them. When this option
+ is enabled the kernel will support allocating & switching MSA
+ vector register contexts. If you know that your kernel will only be
+ running on CPUs which do not support MSA or that your userland will
+ not be making use of it then you may wish to say N here to reduce
+ the size & complexity of your kernel.
+
+ If unsure, say Y.
+
config CPU_HAS_WB
bool
@@ -2115,6 +2272,8 @@
config HIGHMEM
bool "High Memory Support"
depends on 32BIT && CPU_SUPPORTS_HIGHMEM && SYS_SUPPORTS_HIGHMEM
+ depends on ( !SMP || NR_CPUS = 1 || NR_CPUS = 2 || NR_CPUS = 3 || NR_CPUS = 4 || NR_CPUS = 5 || NR_CPUS = 6 || NR_CPUS = 7 || NR_CPUS = 8 )
+ depends on !CPU_MIPS32_R2_EVA
config CPU_SUPPORTS_HIGHMEM
bool
@@ -2128,6 +2287,9 @@
config SYS_SUPPORTS_MICROMIPS
bool
+config CPU_SUPPORTS_MSA
+ bool
+
config ARCH_FLATMEM_ENABLE
def_bool y
depends on !NUMA && !CPU_LOONGSON2
@@ -2205,12 +2367,6 @@
config SYS_SUPPORTS_SMP
bool
-config NR_CPUS_DEFAULT_1
- bool
-
-config NR_CPUS_DEFAULT_2
- bool
-
config NR_CPUS_DEFAULT_4
bool
@@ -2228,10 +2384,8 @@
config NR_CPUS
int "Maximum number of CPUs (2-64)"
- range 1 64 if NR_CPUS_DEFAULT_1
+ range 2 64
depends on SMP
- default "1" if NR_CPUS_DEFAULT_1
- default "2" if NR_CPUS_DEFAULT_2
default "4" if NR_CPUS_DEFAULT_4
default "8" if NR_CPUS_DEFAULT_8
default "16" if NR_CPUS_DEFAULT_16
@@ -2387,6 +2541,20 @@
select OF_EARLY_FLATTREE
select IRQ_DOMAIN
+config MIPS_APPENDED_DTB
+ bool "Use appended device tree blob to vmlinux.bin (EXPERIMENTAL)"
+ depends on OF && MIPS_RANCHU
+ help
+ With this option, the boot code will look for a device tree binary
+ DTB) appended to raw vmlinux.bin (without decompressor).
+ (e.g. cat vmlinux.bin <filename>.dtb > vmlinux_w_dtb).
+
+ Beware that there is very little in terms of protection against
+ this option being confused by leftover garbage in memory that might
+ look like a DTB header after a reboot if no actual DTB is appended
+ to vmlinux.bin. Do not leave this option active in a production kernel
+ if you don't intend to always append a DTB.
+
endmenu
config LOCKDEP_SUPPORT
@@ -2412,7 +2580,6 @@
bool "Support for PCI controller"
depends on HW_HAS_PCI
select PCI_DOMAINS
- select GENERIC_PCI_IOMAP
select NO_GENERIC_PCI_IOPORT_MAP
help
Find out whether you have a PCI motherboard. PCI is the name of a
@@ -2507,6 +2674,7 @@
config MIPS32_COMPAT
bool "Kernel support for Linux/MIPS 32-bit binary compatibility"
depends on 64BIT
+ default y if CPU_SUPPORTS_32BIT_KERNEL && SYS_SUPPORTS_32BIT_KERNEL
help
Select this option if you want Linux/MIPS 32-bit binary
compatibility. Since all software available for Linux/MIPS is
@@ -2526,6 +2694,7 @@
config MIPS32_O32
bool "Kernel support for o32 binaries"
depends on MIPS32_COMPAT
+ default y if CPU_SUPPORTS_32BIT_KERNEL && SYS_SUPPORTS_32BIT_KERNEL
help
Select this option if you want to run o32 binaries. These are pure
32-bit binaries as used by the 32-bit Linux/MIPS port. Most of
@@ -2544,6 +2713,10 @@
If unsure, say N.
+comment "64bit kernel, but support of 32bit applications is disabled!"
+ depends on 64BIT && !MIPS32_O32 && !MIPS32_N32
+ depends on CPU_SUPPORTS_32BIT_KERNEL && SYS_SUPPORTS_32BIT_KERNEL
+
config BINFMT_ELF32
bool
default y if MIPS32_O32 || MIPS32_N32
diff --git a/arch/mips/Makefile b/arch/mips/Makefile
index dd58a04..65bc810 100644
--- a/arch/mips/Makefile
+++ b/arch/mips/Makefile
@@ -88,11 +88,20 @@
# crossformat linking we rely on the elf2ecoff tool for format conversion.
#
cflags-y += -G 0 -mno-abicalls -fno-pic -pipe
-cflags-y += -msoft-float
+cflags-y += -msoft-float -Wa,-msoft-float
LDFLAGS_vmlinux += -G 0 -static -n -nostdlib
KBUILD_AFLAGS_MODULE += -mlong-calls
KBUILD_CFLAGS_MODULE += -mlong-calls
+#
+# pass -msoft-float to GAS if it supports it. However on newer binutils
+# (specifically newer than 2.24.51.20140728) we then also need to explicitly
+# set ".set hardfloat" in all files which manipulate floating point registers.
+#
+ifneq ($(call as-option,-Wa$(comma)-msoft-float,),)
+ cflags-y += -DGAS_HAS_SET_HARDFLOAT -Wa,-msoft-float
+endif
+
cflags-y += -ffreestanding
#
@@ -119,6 +128,11 @@
cflags-$(CONFIG_SB1XXX_CORELIS) += $(call cc-option,-mno-sched-prolog) \
-fno-omit-frame-pointer
+ifeq ($(CONFIG_CPU_HAS_MSA),y)
+toolchain-msa := $(call cc-option-yn,-mhard-float -mfp64 -Wa$(comma)-mmsa)
+cflags-$(toolchain-msa) += -DTOOLCHAIN_SUPPORTS_MSA
+endif
+
#
# CPU-dependent compiler/assembler options for optimization.
#
@@ -133,10 +147,14 @@
-Wa,-mips32 -Wa,--trap
cflags-$(CONFIG_CPU_MIPS32_R2) += $(call cc-option,-march=mips32r2,-mips32r2 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS32) \
-Wa,-mips32r2 -Wa,--trap
+cflags-$(CONFIG_CPU_MIPS32_R6) += $(call cc-option,-march=mips32r6,-mips32r6 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS32) \
+ -Wa,-mips32r6 -Wa,--trap
cflags-$(CONFIG_CPU_MIPS64_R1) += $(call cc-option,-march=mips64,-mips64 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS64) \
-Wa,-mips64 -Wa,--trap
cflags-$(CONFIG_CPU_MIPS64_R2) += $(call cc-option,-march=mips64r2,-mips64r2 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS64) \
-Wa,-mips64r2 -Wa,--trap
+cflags-$(CONFIG_CPU_MIPS64_R6) += $(call cc-option,-march=mips64r6,-mips64r6 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS64) \
+ -Wa,-mips64r6 -Wa,--trap
cflags-$(CONFIG_CPU_R5000) += -march=r5000 -Wa,--trap
cflags-$(CONFIG_CPU_R5432) += $(call cc-option,-march=r5400,-march=r5000) \
-Wa,--trap
@@ -194,6 +212,8 @@
ifdef CONFIG_PHYSICAL_START
load-y = $(CONFIG_PHYSICAL_START)
endif
+entry-y = 0x$(shell $(NM) vmlinux 2>/dev/null \
+ | grep "\bkernel_entry\b" | cut -f1 -d \ )
cflags-y += -I$(srctree)/arch/mips/include/asm/mach-generic
drivers-$(CONFIG_PCI) += arch/mips/pci/
@@ -225,6 +245,9 @@
KBUILD_CPPFLAGS += -DVMLINUX_LOAD_ADDRESS=$(load-y)
KBUILD_CPPFLAGS += -DDATAOFFSET=$(if $(dataoffset-y),$(dataoffset-y),0)
+bootvars-y = VMLINUX_LOAD_ADDRESS=$(load-y) \
+ VMLINUX_ENTRY_ADDRESS=$(entry-y)
+
LDFLAGS += -m $(ld-emul)
ifdef CONFIG_MIPS
@@ -250,9 +273,25 @@
# suspend and hibernation support
drivers-$(CONFIG_PM) += arch/mips/power/
+# boot image targets (arch/mips/boot/)
+boot-y := vmlinux.bin
+boot-y += vmlinux.ecoff
+boot-y += vmlinux.srec
+ifeq ($(shell expr $(load-y) \< 0xffffffff80000000 2> /dev/null), 0)
+boot-y += uImage
+boot-y += uImage.gz
+endif
+
+# compressed boot image targets (arch/mips/boot/compressed/)
+bootz-y := vmlinuz
+bootz-y += vmlinuz.bin
+bootz-y += vmlinuz.ecoff
+bootz-y += vmlinuz.srec
+
ifdef CONFIG_LASAT
rom.bin rom.sw: vmlinux
- $(Q)$(MAKE) $(build)=arch/mips/lasat/image $@
+ $(Q)$(MAKE) $(build)=arch/mips/lasat/image \
+ $(bootvars-y) $@
endif
#
@@ -276,13 +315,14 @@
all: $(all-y)
# boot
-vmlinux.bin vmlinux.ecoff vmlinux.srec: $(vmlinux-32) FORCE
- $(Q)$(MAKE) $(build)=arch/mips/boot VMLINUX=$(vmlinux-32) arch/mips/boot/$@
+$(boot-y): $(vmlinux-32) FORCE
+ $(Q)$(MAKE) $(build)=arch/mips/boot VMLINUX=$(vmlinux-32) \
+ $(bootvars-y) arch/mips/boot/$@
# boot/compressed
-vmlinuz vmlinuz.bin vmlinuz.ecoff vmlinuz.srec: $(vmlinux-32) FORCE
+$(bootz-y): $(vmlinux-32) FORCE
$(Q)$(MAKE) $(build)=arch/mips/boot/compressed \
- VMLINUX_LOAD_ADDRESS=$(load-y) 32bit-bfd=$(32bit-bfd) $@
+ $(bootvars-y) 32bit-bfd=$(32bit-bfd) $@
CLEAN_FILES += vmlinux.32 vmlinux.64
@@ -319,6 +359,8 @@
echo ' vmlinuz.ecoff - ECOFF zboot image'
echo ' vmlinuz.bin - Raw binary zboot image'
echo ' vmlinuz.srec - SREC zboot image'
+ echo ' uImage - U-Boot image'
+ echo ' uImage.gz - U-Boot image (gzip)'
echo
echo ' These will be default as appropriate for a configured platform.'
endef
diff --git a/arch/mips/boot/.gitignore b/arch/mips/boot/.gitignore
index f210b09..a73d6e2 100644
--- a/arch/mips/boot/.gitignore
+++ b/arch/mips/boot/.gitignore
@@ -4,3 +4,4 @@
zImage
zImage.tmp
calc_vmlinuz_load_addr
+uImage
diff --git a/arch/mips/boot/Makefile b/arch/mips/boot/Makefile
index 851261e..1466c00 100644
--- a/arch/mips/boot/Makefile
+++ b/arch/mips/boot/Makefile
@@ -40,3 +40,18 @@
cmd_srec = $(OBJCOPY) -S -O srec $(strip-flags) $(VMLINUX) $@
$(obj)/vmlinux.srec: $(VMLINUX) FORCE
$(call if_changed,srec)
+
+UIMAGE_LOADADDR = $(VMLINUX_LOAD_ADDRESS)
+UIMAGE_ENTRYADDR = $(VMLINUX_ENTRY_ADDRESS)
+
+$(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE
+ $(call if_changed,gzip)
+
+targets += uImage.gz
+$(obj)/uImage.gz: $(obj)/vmlinux.bin.gz FORCE
+ $(call if_changed,uimage,gzip)
+
+targets += uImage
+$(obj)/uImage: $(obj)/uImage.gz FORCE
+ @ln -sf $(notdir $<) $@
+ @echo ' Image $@ is ready'
diff --git a/arch/mips/boot/compressed/Makefile b/arch/mips/boot/compressed/Makefile
index bbaa1d4..0048c08 100644
--- a/arch/mips/boot/compressed/Makefile
+++ b/arch/mips/boot/compressed/Makefile
@@ -18,12 +18,14 @@
# Disable Function Tracer
KBUILD_CFLAGS := $(shell echo $(KBUILD_CFLAGS) | sed -e "s/-pg//")
+KBUILD_CFLAGS := $(filter-out -fstack-protector, $(KBUILD_CFLAGS))
+
KBUILD_CFLAGS := $(LINUXINCLUDE) $(KBUILD_CFLAGS) -D__KERNEL__ \
-DBOOT_HEAP_SIZE=$(BOOT_HEAP_SIZE) -D"VMLINUX_LOAD_ADDRESS_ULL=$(VMLINUX_LOAD_ADDRESS)ull"
KBUILD_AFLAGS := $(LINUXINCLUDE) $(KBUILD_AFLAGS) -D__ASSEMBLY__ \
-DBOOT_HEAP_SIZE=$(BOOT_HEAP_SIZE) \
- -DKERNEL_ENTRY=0x$(shell $(NM) $(objtree)/$(KBUILD_IMAGE) 2>/dev/null | grep " kernel_entry" | cut -f1 -d \ )
+ -DKERNEL_ENTRY=$(VMLINUX_ENTRY_ADDRESS)
targets := head.o decompress.o dbg.o uart-16550.o uart-alchemy.o
diff --git a/arch/mips/boot/dts/include/dt-bindings b/arch/mips/boot/dts/include/dt-bindings
new file mode 120000
index 0000000..08c00e4
--- /dev/null
+++ b/arch/mips/boot/dts/include/dt-bindings
@@ -0,0 +1 @@
+../../../../../include/dt-bindings
\ No newline at end of file
diff --git a/arch/mips/configs/malta_android_defconfig b/arch/mips/configs/malta_android_defconfig
new file mode 100644
index 0000000..ec3635d
--- /dev/null
+++ b/arch/mips/configs/malta_android_defconfig
@@ -0,0 +1,2914 @@
+#
+# Automatically generated file; DO NOT EDIT.
+# Linux/mips 3.10.14 Kernel Configuration
+#
+CONFIG_MIPS=y
+
+#
+# Machine selection
+#
+CONFIG_ZONE_DMA=y
+# CONFIG_MIPS_ALCHEMY is not set
+# CONFIG_AR7 is not set
+# CONFIG_ATH79 is not set
+# CONFIG_BCM47XX is not set
+# CONFIG_BCM63XX is not set
+# CONFIG_MIPS_COBALT is not set
+# CONFIG_MACH_DECSTATION is not set
+# CONFIG_MACH_JAZZ is not set
+# CONFIG_MACH_JZ4740 is not set
+# CONFIG_LANTIQ is not set
+# CONFIG_LASAT is not set
+# CONFIG_MACH_LOONGSON is not set
+# CONFIG_MACH_LOONGSON1 is not set
+CONFIG_MIPS_MALTA=y
+# CONFIG_MIPS_SEAD3 is not set
+# CONFIG_NEC_MARKEINS is not set
+# CONFIG_MACH_VR41XX is not set
+# CONFIG_NXP_STB220 is not set
+# CONFIG_NXP_STB225 is not set
+# CONFIG_PMC_MSP is not set
+# CONFIG_POWERTV is not set
+# CONFIG_RALINK is not set
+# CONFIG_SGI_IP22 is not set
+# CONFIG_SGI_IP27 is not set
+# CONFIG_SGI_IP28 is not set
+# CONFIG_SGI_IP32 is not set
+# CONFIG_SIBYTE_CRHINE is not set
+# CONFIG_SIBYTE_CARMEL is not set
+# CONFIG_SIBYTE_CRHONE is not set
+# CONFIG_SIBYTE_RHONE is not set
+# CONFIG_SIBYTE_SWARM is not set
+# CONFIG_SIBYTE_LITTLESUR is not set
+# CONFIG_SIBYTE_SENTOSA is not set
+# CONFIG_SIBYTE_BIGSUR is not set
+# CONFIG_SNI_RM is not set
+# CONFIG_MACH_TX39XX is not set
+# CONFIG_MACH_TX49XX is not set
+# CONFIG_MIKROTIK_RB532 is not set
+# CONFIG_WR_PPMC is not set
+# CONFIG_CAVIUM_OCTEON_SIMULATOR is not set
+# CONFIG_CAVIUM_OCTEON_REFERENCE_BOARD is not set
+# CONFIG_NLM_XLR_BOARD is not set
+# CONFIG_NLM_XLP_BOARD is not set
+# CONFIG_ALCHEMY_GPIO_INDIRECT is not set
+CONFIG_RWSEM_GENERIC_SPINLOCK=y
+# CONFIG_ARCH_HAS_ILOG2_U32 is not set
+# CONFIG_ARCH_HAS_ILOG2_U64 is not set
+CONFIG_GENERIC_HWEIGHT=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_SCHED_OMIT_FRAME_POINTER=y
+CONFIG_ARCH_MAY_HAVE_PC_FDC=y
+CONFIG_BOOT_RAW=y
+CONFIG_CEVT_R4K=y
+# CONFIG_CEVT_GIC is not set
+CONFIG_CSRC_R4K=y
+CONFIG_CSRC_GIC=y
+# CONFIG_ARCH_DMA_ADDR_T_64BIT is not set
+CONFIG_DMA_NONCOHERENT=y
+CONFIG_NEED_DMA_MAP_STATE=y
+CONFIG_SYS_HAS_EARLY_PRINTK=y
+CONFIG_I8259=y
+CONFIG_MIPS_BONITO64=y
+CONFIG_MIPS_MSC=y
+CONFIG_SYNC_R4K=y
+# CONFIG_MIPS_MACHINE is not set
+# CONFIG_NO_IOPORT is not set
+CONFIG_GENERIC_ISA_DMA=y
+CONFIG_ISA_DMA_API=y
+# CONFIG_CPU_BIG_ENDIAN is not set
+CONFIG_CPU_LITTLE_ENDIAN=y
+CONFIG_SYS_SUPPORTS_BIG_ENDIAN=y
+CONFIG_SYS_SUPPORTS_LITTLE_ENDIAN=y
+# CONFIG_MIPS_HUGE_TLB_SUPPORT is not set
+CONFIG_IRQ_CPU=y
+CONFIG_IRQ_GIC=y
+CONFIG_PCI_GT64XXX_PCI0=y
+CONFIG_SWAP_IO_SPACE=y
+CONFIG_BOOT_ELF32=y
+CONFIG_MIPS_L1_CACHE_SHIFT=6
+
+#
+# CPU selection
+#
+# CONFIG_CPU_MIPS32_R1 is not set
+CONFIG_CPU_MIPS32_R2=y
+# CONFIG_CPU_MIPS64_R1 is not set
+# CONFIG_CPU_MIPS64_R2 is not set
+# CONFIG_CPU_NEVADA is not set
+# CONFIG_CPU_RM7000 is not set
+# CONFIG_CPU_MIPS32_R2_EVA is not set
+CONFIG_SYS_SUPPORTS_ZBOOT=y
+CONFIG_SYS_HAS_CPU_MIPS32_R1=y
+CONFIG_SYS_HAS_CPU_MIPS32_R2=y
+CONFIG_SYS_HAS_CPU_MIPS32_R2_EVA=y
+CONFIG_SYS_HAS_CPU_MIPS64_R1=y
+CONFIG_SYS_HAS_CPU_MIPS64_R2=y
+CONFIG_SYS_HAS_CPU_NEVADA=y
+CONFIG_SYS_HAS_CPU_RM7000=y
+CONFIG_WEAK_ORDERING=y
+CONFIG_CPU_MIPS32=y
+CONFIG_CPU_MIPSR2=y
+CONFIG_SYS_SUPPORTS_32BIT_KERNEL=y
+CONFIG_SYS_SUPPORTS_64BIT_KERNEL=y
+CONFIG_CPU_SUPPORTS_32BIT_KERNEL=y
+CONFIG_HARDWARE_WATCHPOINTS=y
+
+#
+# Kernel type
+#
+CONFIG_32BIT=y
+CONFIG_PAGE_SIZE_4KB=y
+# CONFIG_PAGE_SIZE_16KB is not set
+# CONFIG_PAGE_SIZE_64KB is not set
+CONFIG_FORCE_MAX_ZONEORDER=11
+CONFIG_BOARD_SCACHE=y
+CONFIG_MIPS_CPU_SCACHE=y
+CONFIG_CPU_HAS_PREFETCH=y
+CONFIG_CPU_GENERIC_DUMP_TLB=y
+CONFIG_CPU_R4K_FPU=y
+CONFIG_CPU_R4K_CACHE_TLB=y
+# CONFIG_MIPS_MT_DISABLED is not set
+CONFIG_MIPS_MT_SMP=y
+# CONFIG_MIPS_MT_SMTC is not set
+CONFIG_MIPS_MT=y
+# CONFIG_SCHED_SMT is not set
+CONFIG_SYS_SUPPORTS_SCHED_SMT=y
+CONFIG_SYS_SUPPORTS_MULTITHREADING=y
+CONFIG_MIPS_MT_FPAFF=y
+CONFIG_MIPS_CMP=y
+# CONFIG_ARCH_PHYS_ADDR_T_64BIT is not set
+# CONFIG_CPU_HAS_SMARTMIPS is not set
+CONFIG_CPU_MIPSR2_IRQ_VI=y
+CONFIG_CPU_MIPSR2_IRQ_EI=y
+CONFIG_CPU_HAS_SYNC=y
+# CONFIG_HIGHMEM is not set
+CONFIG_CPU_SUPPORTS_HIGHMEM=y
+CONFIG_SYS_SUPPORTS_HIGHMEM=y
+CONFIG_SYS_SUPPORTS_SMARTMIPS=y
+CONFIG_ARCH_FLATMEM_ENABLE=y
+CONFIG_HW_PERF_EVENTS=y
+CONFIG_FLATMEM=y
+CONFIG_FLAT_NODE_MEM_MAP=y
+CONFIG_HAVE_MEMBLOCK=y
+CONFIG_HAVE_MEMBLOCK_NODE_MAP=y
+CONFIG_ARCH_DISCARD_MEMBLOCK=y
+# CONFIG_HAVE_BOOTMEM_INFO_NODE is not set
+CONFIG_PAGEFLAGS_EXTENDED=y
+CONFIG_SPLIT_PTLOCK_CPUS=4
+CONFIG_COMPACTION=y
+CONFIG_MIGRATION=y
+# CONFIG_PHYS_ADDR_T_64BIT is not set
+CONFIG_ZONE_DMA_FLAG=1
+CONFIG_BOUNCE=y
+CONFIG_VIRT_TO_BUS=y
+CONFIG_KSM=y
+CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
+CONFIG_CROSS_MEMORY_ATTACH=y
+# CONFIG_CLEANCACHE is not set
+# CONFIG_FRONTSWAP is not set
+CONFIG_SMP=y
+CONFIG_SMP_UP=y
+CONFIG_SYS_SUPPORTS_MIPS_CMP=y
+CONFIG_SYS_SUPPORTS_SMP=y
+CONFIG_NR_CPUS=8
+CONFIG_MIPS_PERF_SHARED_TC_COUNTERS=y
+# CONFIG_HZ_48 is not set
+CONFIG_HZ_100=y
+# CONFIG_HZ_128 is not set
+# CONFIG_HZ_250 is not set
+# CONFIG_HZ_256 is not set
+# CONFIG_HZ_1000 is not set
+# CONFIG_HZ_1024 is not set
+CONFIG_SYS_SUPPORTS_ARBIT_HZ=y
+CONFIG_HZ=100
+# CONFIG_PREEMPT_NONE is not set
+# CONFIG_PREEMPT_VOLUNTARY is not set
+CONFIG_PREEMPT=y
+CONFIG_PREEMPT_COUNT=y
+# CONFIG_KEXEC is not set
+# CONFIG_CRASH_DUMP is not set
+CONFIG_SECCOMP=y
+CONFIG_LOCKDEP_SUPPORT=y
+CONFIG_STACKTRACE_SUPPORT=y
+CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
+CONFIG_IRQ_WORK=y
+CONFIG_BUILDTIME_EXTABLE_SORT=y
+
+#
+# General setup
+#
+CONFIG_INIT_ENV_ARG_LIMIT=32
+CONFIG_CROSS_COMPILE=""
+CONFIG_LOCALVERSION=""
+CONFIG_LOCALVERSION_AUTO=y
+CONFIG_HAVE_KERNEL_GZIP=y
+CONFIG_HAVE_KERNEL_BZIP2=y
+CONFIG_HAVE_KERNEL_LZMA=y
+CONFIG_HAVE_KERNEL_LZO=y
+CONFIG_KERNEL_GZIP=y
+# CONFIG_KERNEL_BZIP2 is not set
+# CONFIG_KERNEL_LZMA is not set
+# CONFIG_KERNEL_LZO is not set
+CONFIG_DEFAULT_HOSTNAME="(none)"
+CONFIG_SWAP=y
+CONFIG_SYSVIPC=y
+CONFIG_SYSVIPC_SYSCTL=y
+# CONFIG_POSIX_MQUEUE is not set
+# CONFIG_FHANDLE is not set
+# CONFIG_AUDIT is not set
+CONFIG_HAVE_GENERIC_HARDIRQS=y
+
+#
+# IRQ subsystem
+#
+CONFIG_GENERIC_HARDIRQS=y
+CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_GENERIC_IRQ_SHOW=y
+CONFIG_IRQ_FORCED_THREADING=y
+CONFIG_GENERIC_CLOCKEVENTS=y
+CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
+CONFIG_GENERIC_CMOS_UPDATE=y
+
+#
+# Timers subsystem
+#
+CONFIG_TICK_ONESHOT=y
+CONFIG_NO_HZ_COMMON=y
+# CONFIG_HZ_PERIODIC is not set
+CONFIG_NO_HZ_IDLE=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+
+#
+# CPU/Task time and stats accounting
+#
+CONFIG_TICK_CPU_ACCOUNTING=y
+# CONFIG_BSD_PROCESS_ACCT is not set
+# CONFIG_TASKSTATS is not set
+
+#
+# RCU Subsystem
+#
+CONFIG_TREE_PREEMPT_RCU=y
+CONFIG_PREEMPT_RCU=y
+CONFIG_RCU_STALL_COMMON=y
+CONFIG_RCU_FANOUT=32
+CONFIG_RCU_FANOUT_LEAF=16
+# CONFIG_RCU_FANOUT_EXACT is not set
+# CONFIG_RCU_FAST_NO_HZ is not set
+# CONFIG_TREE_RCU_TRACE is not set
+# CONFIG_RCU_BOOST is not set
+# CONFIG_RCU_NOCB_CPU is not set
+# CONFIG_IKCONFIG is not set
+CONFIG_LOG_BUF_SHIFT=15
+CONFIG_CGROUPS=y
+CONFIG_CGROUP_DEBUG=y
+CONFIG_CGROUP_FREEZER=y
+# CONFIG_CGROUP_DEVICE is not set
+# CONFIG_CPUSETS is not set
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_RESOURCE_COUNTERS=y
+# CONFIG_MEMCG is not set
+# CONFIG_CGROUP_PERF is not set
+CONFIG_CGROUP_SCHED=y
+CONFIG_FAIR_GROUP_SCHED=y
+# CONFIG_CFS_BANDWIDTH is not set
+CONFIG_RT_GROUP_SCHED=y
+# CONFIG_BLK_CGROUP is not set
+# CONFIG_CHECKPOINT_RESTORE is not set
+CONFIG_NAMESPACES=y
+CONFIG_UTS_NS=y
+CONFIG_IPC_NS=y
+CONFIG_PID_NS=y
+CONFIG_NET_NS=y
+# CONFIG_SCHED_AUTOGROUP is not set
+# CONFIG_SYSFS_DEPRECATED is not set
+CONFIG_RELAY=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE=""
+CONFIG_RD_GZIP=y
+# CONFIG_RD_BZIP2 is not set
+# CONFIG_RD_LZMA is not set
+# CONFIG_RD_XZ is not set
+# CONFIG_RD_LZO is not set
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+CONFIG_SYSCTL=y
+CONFIG_ANON_INODES=y
+CONFIG_HOTPLUG=y
+CONFIG_HAVE_PCSPKR_PLATFORM=y
+CONFIG_PANIC_TIMEOUT=5
+CONFIG_EXPERT=y
+# CONFIG_SYSCTL_SYSCALL is not set
+CONFIG_KALLSYMS=y
+CONFIG_KALLSYMS_ALL=y
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+CONFIG_ELF_CORE=y
+CONFIG_PCSPKR_PLATFORM=y
+CONFIG_BASE_FULL=y
+CONFIG_FUTEX=y
+CONFIG_EPOLL=y
+CONFIG_SIGNALFD=y
+CONFIG_TIMERFD=y
+CONFIG_EVENTFD=y
+CONFIG_SHMEM=y
+CONFIG_AIO=y
+CONFIG_PCI_QUIRKS=y
+CONFIG_EMBEDDED=y
+CONFIG_HAVE_PERF_EVENTS=y
+CONFIG_PERF_USE_VMALLOC=y
+
+#
+# Kernel Performance Events And Counters
+#
+CONFIG_PERF_EVENTS=y
+# CONFIG_DEBUG_PERF_USE_VMALLOC is not set
+CONFIG_VM_EVENT_COUNTERS=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_SLAB=y
+# CONFIG_SLUB is not set
+# CONFIG_SLOB is not set
+# CONFIG_PROFILING is not set
+CONFIG_HAVE_OPROFILE=y
+# CONFIG_JUMP_LABEL is not set
+# CONFIG_HAVE_64BIT_ALIGNED_ACCESS is not set
+CONFIG_HAVE_KPROBES=y
+CONFIG_HAVE_KRETPROBES=y
+CONFIG_HAVE_DMA_ATTRS=y
+CONFIG_USE_GENERIC_SMP_HELPERS=y
+CONFIG_GENERIC_SMP_IDLE_THREAD=y
+CONFIG_HAVE_DMA_API_DEBUG=y
+CONFIG_HAVE_ARCH_JUMP_LABEL=y
+CONFIG_ARCH_WANT_IPC_PARSE_VERSION=y
+CONFIG_HAVE_MOD_ARCH_SPECIFIC=y
+CONFIG_CLONE_BACKWARDS=y
+
+#
+# GCOV-based kernel profiling
+#
+CONFIG_HAVE_GENERIC_DMA_COHERENT=y
+CONFIG_SLABINFO=y
+CONFIG_RT_MUTEXES=y
+CONFIG_BASE_SMALL=0
+# CONFIG_MODULES is not set
+CONFIG_BLOCK=y
+CONFIG_LBDAF=y
+CONFIG_BLK_DEV_BSG=y
+CONFIG_BLK_DEV_BSGLIB=y
+# CONFIG_BLK_DEV_INTEGRITY is not set
+
+#
+# Partition Types
+#
+# CONFIG_PARTITION_ADVANCED is not set
+CONFIG_AMIGA_PARTITION=y
+CONFIG_MSDOS_PARTITION=y
+CONFIG_EFI_PARTITION=y
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+# CONFIG_DEFAULT_DEADLINE is not set
+CONFIG_DEFAULT_CFQ=y
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="cfq"
+CONFIG_UNINLINE_SPIN_UNLOCK=y
+CONFIG_MUTEX_SPIN_ON_OWNER=y
+CONFIG_FREEZER=y
+
+#
+# Bus options (PCI, PCMCIA, EISA, ISA, TC)
+#
+CONFIG_HW_HAS_PCI=y
+CONFIG_PCI=y
+CONFIG_PCI_DOMAINS=y
+# CONFIG_PCI_DEBUG is not set
+# CONFIG_PCI_REALLOC_ENABLE_AUTO is not set
+# CONFIG_PCI_STUB is not set
+# CONFIG_PCI_IOV is not set
+# CONFIG_PCI_PRI is not set
+# CONFIG_PCI_PASID is not set
+# CONFIG_PCIEPORTBUS is not set
+CONFIG_MMU=y
+CONFIG_I8253=y
+# CONFIG_PCCARD is not set
+# CONFIG_HOTPLUG_PCI is not set
+# CONFIG_RAPIDIO is not set
+
+#
+# Executable file formats
+#
+CONFIG_BINFMT_ELF=y
+CONFIG_ARCH_BINFMT_ELF_RANDOMIZE_PIE=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_BINFMT_SCRIPT=y
+# CONFIG_HAVE_AOUT is not set
+# CONFIG_BINFMT_MISC is not set
+CONFIG_COREDUMP=y
+CONFIG_TRAD_SIGNALS=y
+
+#
+# Power management options
+#
+CONFIG_HAS_WAKELOCK=y
+CONFIG_WAKELOCK=y
+CONFIG_PM_SLEEP=y
+# CONFIG_PM_AUTOSLEEP is not set
+# CONFIG_PM_WAKELOCKS is not set
+CONFIG_PM_RUNTIME=y
+CONFIG_PM=y
+CONFIG_PM_DEBUG=y
+# CONFIG_PM_ADVANCED_DEBUG is not set
+CONFIG_PM_SLEEP_DEBUG=y
+CONFIG_SUSPEND_TIME=y
+CONFIG_MIPS_EXTERNAL_TIMER=y
+CONFIG_NET=y
+
+#
+# Networking options
+#
+CONFIG_PACKET=y
+# CONFIG_PACKET_DIAG is not set
+CONFIG_UNIX=y
+# CONFIG_UNIX_DIAG is not set
+CONFIG_XFRM=y
+CONFIG_XFRM_ALGO=y
+CONFIG_XFRM_USER=y
+# CONFIG_XFRM_SUB_POLICY is not set
+CONFIG_XFRM_MIGRATE=y
+# CONFIG_XFRM_STATISTICS is not set
+CONFIG_XFRM_IPCOMP=y
+CONFIG_NET_KEY=y
+CONFIG_NET_KEY_MIGRATE=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+# CONFIG_IP_FIB_TRIE_STATS is not set
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_ROUTE_CLASSID=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+# CONFIG_IP_PNP_RARP is not set
+CONFIG_NET_IPIP=y
+# CONFIG_NET_IPGRE_DEMUX is not set
+CONFIG_NET_IP_TUNNEL=y
+CONFIG_IP_MROUTE=y
+# CONFIG_IP_MROUTE_MULTIPLE_TABLES is not set
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
+# CONFIG_ARPD is not set
+CONFIG_SYN_COOKIES=y
+# CONFIG_NET_IPVTI is not set
+CONFIG_INET_AH=y
+CONFIG_INET_ESP=y
+CONFIG_INET_IPCOMP=y
+CONFIG_INET_XFRM_TUNNEL=y
+CONFIG_INET_TUNNEL=y
+CONFIG_INET_XFRM_MODE_TRANSPORT=y
+CONFIG_INET_XFRM_MODE_TUNNEL=y
+CONFIG_INET_XFRM_MODE_BEET=y
+# CONFIG_INET_LRO is not set
+CONFIG_INET_DIAG=y
+CONFIG_INET_TCP_DIAG=y
+# CONFIG_INET_UDP_DIAG is not set
+# CONFIG_TCP_CONG_ADVANCED is not set
+CONFIG_TCP_CONG_CUBIC=y
+CONFIG_DEFAULT_TCP_CONG="cubic"
+CONFIG_TCP_MD5SIG=y
+CONFIG_IPV6=y
+CONFIG_IPV6_PRIVACY=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_IPV6_OPTIMISTIC_DAD=y
+CONFIG_INET6_AH=y
+CONFIG_INET6_ESP=y
+CONFIG_INET6_IPCOMP=y
+CONFIG_IPV6_MIP6=y
+CONFIG_INET6_XFRM_TUNNEL=y
+CONFIG_INET6_TUNNEL=y
+CONFIG_INET6_XFRM_MODE_TRANSPORT=y
+CONFIG_INET6_XFRM_MODE_TUNNEL=y
+CONFIG_INET6_XFRM_MODE_BEET=y
+# CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set
+CONFIG_IPV6_SIT=y
+# CONFIG_IPV6_SIT_6RD is not set
+CONFIG_IPV6_NDISC_NODETYPE=y
+CONFIG_IPV6_TUNNEL=y
+# CONFIG_IPV6_GRE is not set
+CONFIG_IPV6_MULTIPLE_TABLES=y
+# CONFIG_IPV6_SUBTREES is not set
+CONFIG_IPV6_MROUTE=y
+# CONFIG_IPV6_MROUTE_MULTIPLE_TABLES is not set
+CONFIG_IPV6_PIMSM_V2=y
+CONFIG_ANDROID_PARANOID_NETWORK=y
+CONFIG_NET_ACTIVITY_STATS=y
+CONFIG_NETWORK_SECMARK=y
+# CONFIG_NETWORK_PHY_TIMESTAMPING is not set
+CONFIG_NETFILTER=y
+# CONFIG_NETFILTER_DEBUG is not set
+CONFIG_NETFILTER_ADVANCED=y
+CONFIG_BRIDGE_NETFILTER=y
+
+#
+# Core Netfilter Configuration
+#
+CONFIG_NETFILTER_NETLINK=y
+# CONFIG_NETFILTER_NETLINK_ACCT is not set
+CONFIG_NETFILTER_NETLINK_QUEUE=y
+CONFIG_NETFILTER_NETLINK_LOG=y
+CONFIG_NF_CONNTRACK=y
+CONFIG_NF_CONNTRACK_MARK=y
+CONFIG_NF_CONNTRACK_SECMARK=y
+CONFIG_NF_CONNTRACK_PROCFS=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+# CONFIG_NF_CONNTRACK_TIMEOUT is not set
+# CONFIG_NF_CONNTRACK_TIMESTAMP is not set
+CONFIG_NF_CT_PROTO_DCCP=y
+CONFIG_NF_CT_PROTO_GRE=y
+CONFIG_NF_CT_PROTO_SCTP=y
+CONFIG_NF_CT_PROTO_UDPLITE=y
+CONFIG_NF_CONNTRACK_AMANDA=y
+CONFIG_NF_CONNTRACK_FTP=y
+CONFIG_NF_CONNTRACK_H323=y
+CONFIG_NF_CONNTRACK_IRC=y
+CONFIG_NF_CONNTRACK_BROADCAST=y
+CONFIG_NF_CONNTRACK_NETBIOS_NS=y
+# CONFIG_NF_CONNTRACK_SNMP is not set
+CONFIG_NF_CONNTRACK_PPTP=y
+CONFIG_NF_CONNTRACK_SANE=y
+# CONFIG_NF_CONNTRACK_SIP is not set
+CONFIG_NF_CONNTRACK_TFTP=y
+CONFIG_NF_CT_NETLINK=y
+# CONFIG_NF_CT_NETLINK_TIMEOUT is not set
+# CONFIG_NETFILTER_NETLINK_QUEUE_CT is not set
+CONFIG_NETFILTER_TPROXY=y
+CONFIG_NETFILTER_XTABLES=y
+
+#
+# Xtables combined modules
+#
+CONFIG_NETFILTER_XT_MARK=y
+CONFIG_NETFILTER_XT_CONNMARK=y
+
+#
+# Xtables targets
+#
+# CONFIG_NETFILTER_XT_TARGET_CHECKSUM is not set
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
+# CONFIG_NETFILTER_XT_TARGET_CONNSECMARK is not set
+# CONFIG_NETFILTER_XT_TARGET_CT is not set
+# CONFIG_NETFILTER_XT_TARGET_DSCP is not set
+CONFIG_NETFILTER_XT_TARGET_HL=y
+# CONFIG_NETFILTER_XT_TARGET_HMARK is not set
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y
+# CONFIG_NETFILTER_XT_TARGET_LOG is not set
+CONFIG_NETFILTER_XT_TARGET_MARK=y
+CONFIG_NETFILTER_XT_TARGET_NFLOG=y
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
+# CONFIG_NETFILTER_XT_TARGET_NOTRACK is not set
+CONFIG_NETFILTER_XT_TARGET_RATEEST=y
+# CONFIG_NETFILTER_XT_TARGET_TEE is not set
+CONFIG_NETFILTER_XT_TARGET_TPROXY=y
+CONFIG_NETFILTER_XT_TARGET_TRACE=y
+CONFIG_NETFILTER_XT_TARGET_SECMARK=y
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
+CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=y
+
+#
+# Xtables matches
+#
+# CONFIG_NETFILTER_XT_MATCH_ADDRTYPE is not set
+# CONFIG_NETFILTER_XT_MATCH_BPF is not set
+# CONFIG_NETFILTER_XT_MATCH_CLUSTER is not set
+CONFIG_NETFILTER_XT_MATCH_COMMENT=y
+CONFIG_NETFILTER_XT_MATCH_CONNBYTES=y
+# CONFIG_NETFILTER_XT_MATCH_CONNLABEL is not set
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
+# CONFIG_NETFILTER_XT_MATCH_CPU is not set
+CONFIG_NETFILTER_XT_MATCH_DCCP=y
+# CONFIG_NETFILTER_XT_MATCH_DEVGROUP is not set
+# CONFIG_NETFILTER_XT_MATCH_DSCP is not set
+CONFIG_NETFILTER_XT_MATCH_ECN=y
+CONFIG_NETFILTER_XT_MATCH_ESP=y
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_HELPER=y
+CONFIG_NETFILTER_XT_MATCH_HL=y
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
+# CONFIG_NETFILTER_XT_MATCH_IPVS is not set
+CONFIG_NETFILTER_XT_MATCH_LENGTH=y
+CONFIG_NETFILTER_XT_MATCH_LIMIT=y
+CONFIG_NETFILTER_XT_MATCH_MAC=y
+CONFIG_NETFILTER_XT_MATCH_MARK=y
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y
+# CONFIG_NETFILTER_XT_MATCH_NFACCT is not set
+# CONFIG_NETFILTER_XT_MATCH_OSF is not set
+CONFIG_NETFILTER_XT_MATCH_OWNER=y
+CONFIG_NETFILTER_XT_MATCH_POLICY=y
+# CONFIG_NETFILTER_XT_MATCH_PHYSDEV is not set
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA2=y
+CONFIG_NETFILTER_XT_MATCH_RATEEST=y
+CONFIG_NETFILTER_XT_MATCH_REALM=y
+CONFIG_NETFILTER_XT_MATCH_RECENT=y
+CONFIG_NETFILTER_XT_MATCH_SCTP=y
+CONFIG_NETFILTER_XT_MATCH_SOCKET=y
+CONFIG_NETFILTER_XT_MATCH_STATE=y
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=y
+CONFIG_NETFILTER_XT_MATCH_STRING=y
+CONFIG_NETFILTER_XT_MATCH_TCPMSS=y
+CONFIG_NETFILTER_XT_MATCH_TIME=y
+CONFIG_NETFILTER_XT_MATCH_U32=y
+# CONFIG_IP_SET is not set
+CONFIG_IP_VS=y
+CONFIG_IP_VS_IPV6=y
+# CONFIG_IP_VS_DEBUG is not set
+CONFIG_IP_VS_TAB_BITS=12
+
+#
+# IPVS transport protocol load balancing support
+#
+CONFIG_IP_VS_PROTO_TCP=y
+CONFIG_IP_VS_PROTO_UDP=y
+CONFIG_IP_VS_PROTO_AH_ESP=y
+CONFIG_IP_VS_PROTO_ESP=y
+CONFIG_IP_VS_PROTO_AH=y
+# CONFIG_IP_VS_PROTO_SCTP is not set
+
+#
+# IPVS scheduler
+#
+CONFIG_IP_VS_RR=y
+CONFIG_IP_VS_WRR=y
+CONFIG_IP_VS_LC=y
+CONFIG_IP_VS_WLC=y
+CONFIG_IP_VS_LBLC=y
+CONFIG_IP_VS_LBLCR=y
+CONFIG_IP_VS_DH=y
+CONFIG_IP_VS_SH=y
+CONFIG_IP_VS_SED=y
+CONFIG_IP_VS_NQ=y
+
+#
+# IPVS SH scheduler
+#
+CONFIG_IP_VS_SH_TAB_BITS=8
+
+#
+# IPVS application helper
+#
+# CONFIG_IP_VS_NFCT is not set
+
+#
+# IP: Netfilter Configuration
+#
+CONFIG_NF_DEFRAG_IPV4=y
+CONFIG_NF_CONNTRACK_IPV4=y
+CONFIG_NF_CONNTRACK_PROC_COMPAT=y
+CONFIG_IP_NF_IPTABLES=y
+CONFIG_IP_NF_MATCH_AH=y
+CONFIG_IP_NF_MATCH_ECN=y
+# CONFIG_IP_NF_MATCH_RPFILTER is not set
+CONFIG_IP_NF_MATCH_TTL=y
+CONFIG_IP_NF_FILTER=y
+CONFIG_IP_NF_TARGET_REJECT=y
+CONFIG_IP_NF_TARGET_REJECT_SKERR=y
+CONFIG_IP_NF_TARGET_ULOG=y
+# CONFIG_NF_NAT_IPV4 is not set
+CONFIG_IP_NF_MANGLE=y
+CONFIG_IP_NF_TARGET_CLUSTERIP=y
+CONFIG_IP_NF_TARGET_ECN=y
+CONFIG_IP_NF_TARGET_TTL=y
+CONFIG_IP_NF_RAW=y
+CONFIG_IP_NF_ARPTABLES=y
+CONFIG_IP_NF_ARPFILTER=y
+CONFIG_IP_NF_ARP_MANGLE=y
+
+#
+# IPv6: Netfilter Configuration
+#
+CONFIG_NF_DEFRAG_IPV6=y
+CONFIG_NF_CONNTRACK_IPV6=y
+CONFIG_IP6_NF_IPTABLES=y
+CONFIG_IP6_NF_MATCH_AH=y
+CONFIG_IP6_NF_MATCH_EUI64=y
+CONFIG_IP6_NF_MATCH_FRAG=y
+CONFIG_IP6_NF_MATCH_OPTS=y
+CONFIG_IP6_NF_MATCH_HL=y
+CONFIG_IP6_NF_MATCH_IPV6HEADER=y
+CONFIG_IP6_NF_MATCH_MH=y
+# CONFIG_IP6_NF_MATCH_RPFILTER is not set
+CONFIG_IP6_NF_MATCH_RT=y
+CONFIG_IP6_NF_TARGET_HL=y
+CONFIG_IP6_NF_FILTER=y
+CONFIG_IP6_NF_TARGET_REJECT=y
+CONFIG_IP6_NF_TARGET_REJECT_SKERR=y
+CONFIG_IP6_NF_MANGLE=y
+CONFIG_IP6_NF_RAW=y
+# CONFIG_NF_NAT_IPV6 is not set
+CONFIG_BRIDGE_NF_EBTABLES=y
+CONFIG_BRIDGE_EBT_BROUTE=y
+CONFIG_BRIDGE_EBT_T_FILTER=y
+CONFIG_BRIDGE_EBT_T_NAT=y
+CONFIG_BRIDGE_EBT_802_3=y
+CONFIG_BRIDGE_EBT_AMONG=y
+CONFIG_BRIDGE_EBT_ARP=y
+CONFIG_BRIDGE_EBT_IP=y
+CONFIG_BRIDGE_EBT_IP6=y
+CONFIG_BRIDGE_EBT_LIMIT=y
+CONFIG_BRIDGE_EBT_MARK=y
+CONFIG_BRIDGE_EBT_PKTTYPE=y
+CONFIG_BRIDGE_EBT_STP=y
+CONFIG_BRIDGE_EBT_VLAN=y
+CONFIG_BRIDGE_EBT_ARPREPLY=y
+CONFIG_BRIDGE_EBT_DNAT=y
+CONFIG_BRIDGE_EBT_MARK_T=y
+CONFIG_BRIDGE_EBT_REDIRECT=y
+CONFIG_BRIDGE_EBT_SNAT=y
+CONFIG_BRIDGE_EBT_LOG=y
+CONFIG_BRIDGE_EBT_ULOG=y
+CONFIG_BRIDGE_EBT_NFLOG=y
+# CONFIG_IP_DCCP is not set
+CONFIG_IP_SCTP=y
+# CONFIG_SCTP_DBG_MSG is not set
+# CONFIG_SCTP_DBG_OBJCNT is not set
+CONFIG_SCTP_DEFAULT_COOKIE_HMAC_MD5=y
+# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_SHA1 is not set
+# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_NONE is not set
+CONFIG_SCTP_COOKIE_HMAC_MD5=y
+# CONFIG_SCTP_COOKIE_HMAC_SHA1 is not set
+# CONFIG_RDS is not set
+# CONFIG_TIPC is not set
+# CONFIG_ATM is not set
+# CONFIG_L2TP is not set
+CONFIG_STP=y
+CONFIG_GARP=y
+CONFIG_BRIDGE=y
+CONFIG_BRIDGE_IGMP_SNOOPING=y
+# CONFIG_BRIDGE_VLAN_FILTERING is not set
+CONFIG_HAVE_NET_DSA=y
+CONFIG_VLAN_8021Q=y
+CONFIG_VLAN_8021Q_GVRP=y
+# CONFIG_VLAN_8021Q_MVRP is not set
+# CONFIG_DECNET is not set
+CONFIG_LLC=y
+# CONFIG_LLC2 is not set
+# CONFIG_IPX is not set
+CONFIG_ATALK=y
+CONFIG_DEV_APPLETALK=y
+CONFIG_IPDDP=y
+CONFIG_IPDDP_ENCAP=y
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+CONFIG_PHONET=y
+# CONFIG_IEEE802154 is not set
+CONFIG_NET_SCHED=y
+
+#
+# Queueing/Scheduling
+#
+CONFIG_NET_SCH_CBQ=y
+CONFIG_NET_SCH_HTB=y
+CONFIG_NET_SCH_HFSC=y
+CONFIG_NET_SCH_PRIO=y
+# CONFIG_NET_SCH_MULTIQ is not set
+CONFIG_NET_SCH_RED=y
+# CONFIG_NET_SCH_SFB is not set
+CONFIG_NET_SCH_SFQ=y
+CONFIG_NET_SCH_TEQL=y
+CONFIG_NET_SCH_TBF=y
+CONFIG_NET_SCH_GRED=y
+CONFIG_NET_SCH_DSMARK=y
+CONFIG_NET_SCH_NETEM=y
+# CONFIG_NET_SCH_DRR is not set
+# CONFIG_NET_SCH_MQPRIO is not set
+# CONFIG_NET_SCH_CHOKE is not set
+# CONFIG_NET_SCH_QFQ is not set
+# CONFIG_NET_SCH_CODEL is not set
+# CONFIG_NET_SCH_FQ_CODEL is not set
+CONFIG_NET_SCH_INGRESS=y
+# CONFIG_NET_SCH_PLUG is not set
+
+#
+# Classification
+#
+CONFIG_NET_CLS=y
+CONFIG_NET_CLS_BASIC=y
+CONFIG_NET_CLS_TCINDEX=y
+CONFIG_NET_CLS_ROUTE4=y
+CONFIG_NET_CLS_FW=y
+CONFIG_NET_CLS_U32=y
+# CONFIG_CLS_U32_PERF is not set
+# CONFIG_CLS_U32_MARK is not set
+CONFIG_NET_CLS_RSVP=y
+CONFIG_NET_CLS_RSVP6=y
+CONFIG_NET_CLS_FLOW=y
+# CONFIG_NET_CLS_CGROUP is not set
+CONFIG_NET_EMATCH=y
+CONFIG_NET_EMATCH_STACK=32
+# CONFIG_NET_EMATCH_CMP is not set
+# CONFIG_NET_EMATCH_NBYTE is not set
+CONFIG_NET_EMATCH_U32=y
+# CONFIG_NET_EMATCH_META is not set
+# CONFIG_NET_EMATCH_TEXT is not set
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_POLICE=y
+CONFIG_NET_ACT_GACT=y
+CONFIG_GACT_PROB=y
+CONFIG_NET_ACT_MIRRED=y
+CONFIG_NET_ACT_IPT=y
+CONFIG_NET_ACT_NAT=y
+CONFIG_NET_ACT_PEDIT=y
+CONFIG_NET_ACT_SIMP=y
+CONFIG_NET_ACT_SKBEDIT=y
+# CONFIG_NET_ACT_CSUM is not set
+CONFIG_NET_CLS_IND=y
+CONFIG_NET_SCH_FIFO=y
+# CONFIG_DCB is not set
+# CONFIG_BATMAN_ADV is not set
+# CONFIG_OPENVSWITCH is not set
+# CONFIG_VSOCKETS is not set
+# CONFIG_NETLINK_MMAP is not set
+# CONFIG_NETLINK_DIAG is not set
+CONFIG_RPS=y
+CONFIG_RFS_ACCEL=y
+CONFIG_XPS=y
+# CONFIG_NETPRIO_CGROUP is not set
+CONFIG_BQL=y
+
+#
+# Network testing
+#
+# CONFIG_NET_PKTGEN is not set
+# CONFIG_HAMRADIO is not set
+# CONFIG_CAN is not set
+# CONFIG_IRDA is not set
+# CONFIG_BT is not set
+# CONFIG_AF_RXRPC is not set
+CONFIG_FIB_RULES=y
+CONFIG_WIRELESS=y
+CONFIG_WIRELESS_EXT=y
+CONFIG_WEXT_CORE=y
+CONFIG_WEXT_PROC=y
+CONFIG_WEXT_SPY=y
+CONFIG_WEXT_PRIV=y
+CONFIG_CFG80211=y
+# CONFIG_NL80211_TESTMODE is not set
+# CONFIG_CFG80211_DEVELOPER_WARNINGS is not set
+# CONFIG_CFG80211_REG_DEBUG is not set
+# CONFIG_CFG80211_CERTIFICATION_ONUS is not set
+CONFIG_CFG80211_DEFAULT_PS=y
+# CONFIG_CFG80211_INTERNAL_REGDB is not set
+# CONFIG_CFG80211_WEXT is not set
+CONFIG_LIB80211=y
+CONFIG_LIB80211_CRYPT_WEP=y
+CONFIG_LIB80211_CRYPT_CCMP=y
+CONFIG_LIB80211_CRYPT_TKIP=y
+# CONFIG_LIB80211_DEBUG is not set
+# CONFIG_CFG80211_ALLOW_RECONNECT is not set
+CONFIG_MAC80211=y
+CONFIG_MAC80211_HAS_RC=y
+CONFIG_MAC80211_RC_PID=y
+CONFIG_MAC80211_RC_MINSTREL=y
+CONFIG_MAC80211_RC_MINSTREL_HT=y
+CONFIG_MAC80211_RC_DEFAULT_PID=y
+# CONFIG_MAC80211_RC_DEFAULT_MINSTREL is not set
+CONFIG_MAC80211_RC_DEFAULT="pid"
+CONFIG_MAC80211_MESH=y
+# CONFIG_MAC80211_LEDS is not set
+# CONFIG_MAC80211_MESSAGE_TRACING is not set
+# CONFIG_MAC80211_DEBUG_MENU is not set
+# CONFIG_WIMAX is not set
+CONFIG_RFKILL=y
+CONFIG_RFKILL_PM=y
+# CONFIG_RFKILL_INPUT is not set
+# CONFIG_NET_9P is not set
+# CONFIG_CAIF is not set
+# CONFIG_CEPH_LIB is not set
+# CONFIG_NFC is not set
+
+#
+# Device Drivers
+#
+
+#
+# Generic Driver Options
+#
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
+# CONFIG_DEVTMPFS_MOUNT is not set
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+CONFIG_FW_LOADER=y
+CONFIG_FIRMWARE_IN_KERNEL=y
+CONFIG_EXTRA_FIRMWARE=""
+CONFIG_FW_LOADER_USER_HELPER=y
+# CONFIG_DEBUG_DRIVER is not set
+# CONFIG_DEBUG_DEVRES is not set
+# CONFIG_SYS_HYPERVISOR is not set
+# CONFIG_GENERIC_CPU_DEVICES is not set
+CONFIG_DMA_SHARED_BUFFER=y
+
+#
+# Bus devices
+#
+CONFIG_CONNECTOR=y
+CONFIG_PROC_EVENTS=y
+CONFIG_MTD=y
+# CONFIG_MTD_REDBOOT_PARTS is not set
+# CONFIG_MTD_CMDLINE_PARTS is not set
+# CONFIG_MTD_AR7_PARTS is not set
+
+#
+# User Modules And Translation Layers
+#
+CONFIG_MTD_BLKDEVS=y
+CONFIG_MTD_BLOCK=y
+# CONFIG_FTL is not set
+# CONFIG_NFTL is not set
+# CONFIG_INFTL is not set
+# CONFIG_RFD_FTL is not set
+# CONFIG_SSFDC is not set
+# CONFIG_SM_FTL is not set
+CONFIG_MTD_OOPS=y
+# CONFIG_MTD_SWAP is not set
+
+#
+# RAM/ROM/Flash chip drivers
+#
+CONFIG_MTD_CFI=y
+# CONFIG_MTD_JEDECPROBE is not set
+CONFIG_MTD_GEN_PROBE=y
+# CONFIG_MTD_CFI_ADV_OPTIONS is not set
+CONFIG_MTD_MAP_BANK_WIDTH_1=y
+CONFIG_MTD_MAP_BANK_WIDTH_2=y
+CONFIG_MTD_MAP_BANK_WIDTH_4=y
+# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
+CONFIG_MTD_CFI_I1=y
+CONFIG_MTD_CFI_I2=y
+# CONFIG_MTD_CFI_I4 is not set
+# CONFIG_MTD_CFI_I8 is not set
+CONFIG_MTD_CFI_INTELEXT=y
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_MTD_CFI_STAA=y
+CONFIG_MTD_CFI_UTIL=y
+# CONFIG_MTD_RAM is not set
+# CONFIG_MTD_ROM is not set
+# CONFIG_MTD_ABSENT is not set
+
+#
+# Mapping drivers for chip access
+#
+# CONFIG_MTD_COMPLEX_MAPPINGS is not set
+CONFIG_MTD_PHYSMAP=y
+# CONFIG_MTD_PHYSMAP_COMPAT is not set
+# CONFIG_MTD_INTEL_VR_NOR is not set
+# CONFIG_MTD_PLATRAM is not set
+
+#
+# Self-contained MTD device drivers
+#
+# CONFIG_MTD_PMC551 is not set
+# CONFIG_MTD_SLRAM is not set
+# CONFIG_MTD_PHRAM is not set
+# CONFIG_MTD_MTDRAM is not set
+# CONFIG_MTD_BLOCK2MTD is not set
+
+#
+# Disk-On-Chip Device Drivers
+#
+# CONFIG_MTD_DOCG3 is not set
+# CONFIG_MTD_NAND_IDS is not set
+# CONFIG_MTD_NAND is not set
+# CONFIG_MTD_ONENAND is not set
+
+#
+# LPDDR flash memory drivers
+#
+# CONFIG_MTD_LPDDR is not set
+CONFIG_MTD_UBI=y
+CONFIG_MTD_UBI_WL_THRESHOLD=4096
+CONFIG_MTD_UBI_BEB_LIMIT=20
+# CONFIG_MTD_UBI_FASTMAP is not set
+CONFIG_MTD_UBI_GLUEBI=y
+# CONFIG_PARPORT is not set
+CONFIG_BLK_DEV=y
+CONFIG_BLK_DEV_FD=y
+# CONFIG_BLK_DEV_PCIESSD_MTIP32XX is not set
+# CONFIG_BLK_CPQ_DA is not set
+# CONFIG_BLK_CPQ_CISS_DA is not set
+# CONFIG_BLK_DEV_DAC960 is not set
+CONFIG_BLK_DEV_UMEM=y
+# CONFIG_BLK_DEV_COW_COMMON is not set
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_LOOP_MIN_COUNT=8
+CONFIG_BLK_DEV_CRYPTOLOOP=y
+# CONFIG_BLK_DEV_DRBD is not set
+CONFIG_BLK_DEV_NBD=y
+# CONFIG_BLK_DEV_NVME is not set
+# CONFIG_BLK_DEV_SX8 is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=16
+CONFIG_BLK_DEV_RAM_SIZE=8192
+# CONFIG_BLK_DEV_XIP is not set
+CONFIG_CDROM_PKTCDVD=y
+CONFIG_CDROM_PKTCDVD_BUFFERS=8
+# CONFIG_CDROM_PKTCDVD_WCACHE is not set
+CONFIG_ATA_OVER_ETH=y
+# CONFIG_BLK_DEV_HD is not set
+# CONFIG_BLK_DEV_RBD is not set
+# CONFIG_BLK_DEV_RSXX is not set
+
+#
+# Misc devices
+#
+# CONFIG_SENSORS_LIS3LV02D is not set
+# CONFIG_DUMMY_IRQ is not set
+# CONFIG_PHANTOM is not set
+# CONFIG_INTEL_MID_PTI is not set
+# CONFIG_SGI_IOC4 is not set
+# CONFIG_TIFM_CORE is not set
+# CONFIG_ATMEL_SSC is not set
+# CONFIG_ENCLOSURE_SERVICES is not set
+# CONFIG_HP_ILO is not set
+CONFIG_UID_STAT=y
+# CONFIG_PCH_PHUB is not set
+# CONFIG_SRAM is not set
+# CONFIG_C2PORT is not set
+
+#
+# EEPROM support
+#
+# CONFIG_EEPROM_93CX6 is not set
+# CONFIG_CB710_CORE is not set
+
+#
+# Texas Instruments shared transport line discipline
+#
+
+#
+# Altera FPGA firmware download module
+#
+CONFIG_HAVE_IDE=y
+CONFIG_IDE=y
+
+#
+# Please see Documentation/ide/ide.txt for help/info on IDE drives
+#
+CONFIG_IDE_XFER_MODE=y
+CONFIG_IDE_ATAPI=y
+# CONFIG_BLK_DEV_IDE_SATA is not set
+CONFIG_IDE_GD=y
+CONFIG_IDE_GD_ATA=y
+# CONFIG_IDE_GD_ATAPI is not set
+CONFIG_BLK_DEV_IDECD=y
+CONFIG_BLK_DEV_IDECD_VERBOSE_ERRORS=y
+# CONFIG_BLK_DEV_IDETAPE is not set
+# CONFIG_IDE_TASK_IOCTL is not set
+CONFIG_IDE_PROC_FS=y
+
+#
+# IDE chipset support/bugfixes
+#
+CONFIG_IDE_GENERIC=y
+# CONFIG_BLK_DEV_PLATFORM is not set
+CONFIG_BLK_DEV_IDEDMA_SFF=y
+
+#
+# PCI IDE chipsets support
+#
+CONFIG_BLK_DEV_IDEPCI=y
+CONFIG_IDEPCI_PCIBUS_ORDER=y
+# CONFIG_BLK_DEV_OFFBOARD is not set
+CONFIG_BLK_DEV_GENERIC=y
+# CONFIG_BLK_DEV_OPTI621 is not set
+CONFIG_BLK_DEV_IDEDMA_PCI=y
+# CONFIG_BLK_DEV_AEC62XX is not set
+# CONFIG_BLK_DEV_ALI15X3 is not set
+# CONFIG_BLK_DEV_AMD74XX is not set
+# CONFIG_BLK_DEV_CMD64X is not set
+# CONFIG_BLK_DEV_TRIFLEX is not set
+# CONFIG_BLK_DEV_CS5520 is not set
+# CONFIG_BLK_DEV_CS5530 is not set
+# CONFIG_BLK_DEV_HPT366 is not set
+# CONFIG_BLK_DEV_JMICRON is not set
+# CONFIG_BLK_DEV_SC1200 is not set
+CONFIG_BLK_DEV_PIIX=y
+# CONFIG_BLK_DEV_IT8172 is not set
+CONFIG_BLK_DEV_IT8213=y
+# CONFIG_BLK_DEV_IT821X is not set
+# CONFIG_BLK_DEV_NS87415 is not set
+# CONFIG_BLK_DEV_PDC202XX_OLD is not set
+# CONFIG_BLK_DEV_PDC202XX_NEW is not set
+# CONFIG_BLK_DEV_SVWKS is not set
+# CONFIG_BLK_DEV_SIIMAGE is not set
+# CONFIG_BLK_DEV_SLC90E66 is not set
+# CONFIG_BLK_DEV_TRM290 is not set
+# CONFIG_BLK_DEV_VIA82CXXX is not set
+CONFIG_BLK_DEV_TC86C001=y
+CONFIG_BLK_DEV_IDEDMA=y
+
+#
+# SCSI device support
+#
+CONFIG_SCSI_MOD=y
+CONFIG_RAID_ATTRS=y
+CONFIG_SCSI=y
+CONFIG_SCSI_DMA=y
+CONFIG_SCSI_TGT=y
+CONFIG_SCSI_NETLINK=y
+CONFIG_SCSI_PROC_FS=y
+
+#
+# SCSI support type (disk, tape, CD-ROM)
+#
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_ST=y
+CONFIG_CHR_DEV_OSST=y
+CONFIG_BLK_DEV_SR=y
+CONFIG_BLK_DEV_SR_VENDOR=y
+CONFIG_CHR_DEV_SG=y
+# CONFIG_CHR_DEV_SCH is not set
+CONFIG_SCSI_MULTI_LUN=y
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_LOGGING=y
+CONFIG_SCSI_SCAN_ASYNC=y
+
+#
+# SCSI Transports
+#
+CONFIG_SCSI_SPI_ATTRS=y
+CONFIG_SCSI_FC_ATTRS=y
+# CONFIG_SCSI_FC_TGT_ATTRS is not set
+CONFIG_SCSI_ISCSI_ATTRS=y
+# CONFIG_SCSI_SAS_ATTRS is not set
+# CONFIG_SCSI_SAS_LIBSAS is not set
+# CONFIG_SCSI_SRP_ATTRS is not set
+CONFIG_SCSI_LOWLEVEL=y
+CONFIG_ISCSI_TCP=y
+# CONFIG_ISCSI_BOOT_SYSFS is not set
+# CONFIG_SCSI_CXGB3_ISCSI is not set
+# CONFIG_SCSI_CXGB4_ISCSI is not set
+# CONFIG_SCSI_BNX2_ISCSI is not set
+# CONFIG_SCSI_BNX2X_FCOE is not set
+# CONFIG_BE2ISCSI is not set
+CONFIG_BLK_DEV_3W_XXXX_RAID=y
+# CONFIG_SCSI_HPSA is not set
+CONFIG_SCSI_3W_9XXX=y
+# CONFIG_SCSI_3W_SAS is not set
+CONFIG_SCSI_ACARD=y
+CONFIG_SCSI_AACRAID=y
+CONFIG_SCSI_AIC7XXX=y
+CONFIG_AIC7XXX_CMDS_PER_DEVICE=32
+CONFIG_AIC7XXX_RESET_DELAY_MS=15000
+# CONFIG_AIC7XXX_DEBUG_ENABLE is not set
+CONFIG_AIC7XXX_DEBUG_MASK=0
+CONFIG_AIC7XXX_REG_PRETTY_PRINT=y
+# CONFIG_SCSI_AIC7XXX_OLD is not set
+# CONFIG_SCSI_AIC79XX is not set
+# CONFIG_SCSI_AIC94XX is not set
+# CONFIG_SCSI_MVSAS is not set
+# CONFIG_SCSI_MVUMI is not set
+# CONFIG_SCSI_DPT_I2O is not set
+# CONFIG_SCSI_ADVANSYS is not set
+# CONFIG_SCSI_ARCMSR is not set
+# CONFIG_MEGARAID_NEWGEN is not set
+# CONFIG_MEGARAID_LEGACY is not set
+# CONFIG_MEGARAID_SAS is not set
+# CONFIG_SCSI_MPT2SAS is not set
+# CONFIG_SCSI_MPT3SAS is not set
+# CONFIG_SCSI_UFSHCD is not set
+# CONFIG_SCSI_HPTIOP is not set
+# CONFIG_SCSI_BUSLOGIC is not set
+# CONFIG_LIBFC is not set
+# CONFIG_LIBFCOE is not set
+# CONFIG_FCOE is not set
+# CONFIG_SCSI_DMX3191D is not set
+# CONFIG_SCSI_EATA is not set
+# CONFIG_SCSI_FUTURE_DOMAIN is not set
+# CONFIG_SCSI_GDTH is not set
+# CONFIG_SCSI_IPS is not set
+# CONFIG_SCSI_INITIO is not set
+# CONFIG_SCSI_INIA100 is not set
+# CONFIG_SCSI_STEX is not set
+# CONFIG_SCSI_SYM53C8XX_2 is not set
+# CONFIG_SCSI_QLOGIC_1280 is not set
+# CONFIG_SCSI_QLA_FC is not set
+# CONFIG_SCSI_QLA_ISCSI is not set
+# CONFIG_SCSI_LPFC is not set
+# CONFIG_SCSI_DC395x is not set
+# CONFIG_SCSI_DC390T is not set
+# CONFIG_SCSI_NSP32 is not set
+# CONFIG_SCSI_DEBUG is not set
+# CONFIG_SCSI_PMCRAID is not set
+# CONFIG_SCSI_PM8001 is not set
+# CONFIG_SCSI_SRP is not set
+# CONFIG_SCSI_BFA_FC is not set
+# CONFIG_SCSI_CHELSIO_FCOE is not set
+# CONFIG_SCSI_DH is not set
+# CONFIG_SCSI_OSD_INITIATOR is not set
+# CONFIG_ATA is not set
+CONFIG_MD=y
+CONFIG_BLK_DEV_MD=y
+CONFIG_MD_AUTODETECT=y
+CONFIG_MD_LINEAR=y
+CONFIG_MD_RAID0=y
+CONFIG_MD_RAID1=y
+CONFIG_MD_RAID10=y
+CONFIG_MD_RAID456=y
+CONFIG_MD_MULTIPATH=y
+CONFIG_MD_FAULTY=y
+# CONFIG_BCACHE is not set
+CONFIG_BLK_DEV_DM=y
+# CONFIG_DM_DEBUG is not set
+CONFIG_DM_CRYPT=y
+CONFIG_DM_SNAPSHOT=y
+# CONFIG_DM_THIN_PROVISIONING is not set
+# CONFIG_DM_CACHE is not set
+CONFIG_DM_MIRROR=y
+# CONFIG_DM_RAID is not set
+# CONFIG_DM_LOG_USERSPACE is not set
+CONFIG_DM_ZERO=y
+CONFIG_DM_MULTIPATH=y
+# CONFIG_DM_MULTIPATH_QL is not set
+# CONFIG_DM_MULTIPATH_ST is not set
+# CONFIG_DM_DELAY is not set
+CONFIG_DM_UEVENT=y
+# CONFIG_DM_FLAKEY is not set
+# CONFIG_DM_VERITY is not set
+# CONFIG_TARGET_CORE is not set
+# CONFIG_FUSION is not set
+
+#
+# IEEE 1394 (FireWire) support
+#
+# CONFIG_FIREWIRE is not set
+# CONFIG_FIREWIRE_NOSY is not set
+# CONFIG_I2O is not set
+CONFIG_NETDEVICES=y
+CONFIG_NET_CORE=y
+CONFIG_BONDING=y
+CONFIG_DUMMY=y
+CONFIG_EQUALIZER=y
+# CONFIG_NET_FC is not set
+CONFIG_MII=y
+CONFIG_IFB=y
+# CONFIG_NET_TEAM is not set
+CONFIG_MACVLAN=y
+# CONFIG_MACVTAP is not set
+# CONFIG_VXLAN is not set
+# CONFIG_NETCONSOLE is not set
+# CONFIG_NETPOLL is not set
+# CONFIG_NET_POLL_CONTROLLER is not set
+CONFIG_TUN=y
+CONFIG_VETH=y
+# CONFIG_ARCNET is not set
+
+#
+# CAIF transport drivers
+#
+
+#
+# Distributed Switch Architecture drivers
+#
+# CONFIG_NET_DSA_MV88E6XXX is not set
+# CONFIG_NET_DSA_MV88E6060 is not set
+# CONFIG_NET_DSA_MV88E6XXX_NEED_PPU is not set
+# CONFIG_NET_DSA_MV88E6131 is not set
+# CONFIG_NET_DSA_MV88E6123_61_65 is not set
+CONFIG_ETHERNET=y
+CONFIG_MDIO=y
+# CONFIG_NET_VENDOR_3COM is not set
+CONFIG_NET_VENDOR_ADAPTEC=y
+# CONFIG_ADAPTEC_STARFIRE is not set
+CONFIG_NET_VENDOR_ALTEON=y
+# CONFIG_ACENIC is not set
+CONFIG_NET_VENDOR_AMD=y
+# CONFIG_AMD8111_ETH is not set
+CONFIG_PCNET32=y
+CONFIG_NET_VENDOR_ATHEROS=y
+# CONFIG_ATL2 is not set
+# CONFIG_ATL1 is not set
+# CONFIG_ATL1E is not set
+# CONFIG_ATL1C is not set
+# CONFIG_ALX is not set
+CONFIG_NET_CADENCE=y
+# CONFIG_ARM_AT91_ETHER is not set
+# CONFIG_MACB is not set
+CONFIG_NET_VENDOR_BROADCOM=y
+# CONFIG_B44 is not set
+# CONFIG_BNX2 is not set
+# CONFIG_CNIC is not set
+# CONFIG_TIGON3 is not set
+# CONFIG_BNX2X is not set
+CONFIG_NET_VENDOR_BROCADE=y
+# CONFIG_BNA is not set
+# CONFIG_NET_CALXEDA_XGMAC is not set
+CONFIG_NET_VENDOR_CHELSIO=y
+# CONFIG_CHELSIO_T1 is not set
+CONFIG_CHELSIO_T3=y
+# CONFIG_CHELSIO_T4 is not set
+# CONFIG_CHELSIO_T4VF is not set
+CONFIG_NET_VENDOR_CISCO=y
+# CONFIG_ENIC is not set
+# CONFIG_DM9000 is not set
+# CONFIG_DNET is not set
+CONFIG_NET_VENDOR_DEC=y
+# CONFIG_NET_TULIP is not set
+CONFIG_NET_VENDOR_DLINK=y
+# CONFIG_DL2K is not set
+# CONFIG_SUNDANCE is not set
+CONFIG_NET_VENDOR_EMULEX=y
+# CONFIG_BE2NET is not set
+CONFIG_NET_VENDOR_EXAR=y
+# CONFIG_S2IO is not set
+# CONFIG_VXGE is not set
+CONFIG_NET_VENDOR_HP=y
+# CONFIG_HP100 is not set
+CONFIG_NET_VENDOR_INTEL=y
+# CONFIG_E100 is not set
+# CONFIG_E1000 is not set
+# CONFIG_E1000E is not set
+# CONFIG_IGB is not set
+# CONFIG_IGBVF is not set
+# CONFIG_IXGB is not set
+# CONFIG_IXGBE is not set
+CONFIG_NET_VENDOR_I825XX=y
+# CONFIG_IP1000 is not set
+# CONFIG_JME is not set
+CONFIG_NET_VENDOR_MARVELL=y
+# CONFIG_MVMDIO is not set
+# CONFIG_SKGE is not set
+# CONFIG_SKY2 is not set
+CONFIG_NET_VENDOR_MELLANOX=y
+# CONFIG_MLX4_EN is not set
+# CONFIG_MLX4_CORE is not set
+CONFIG_NET_VENDOR_MICREL=y
+# CONFIG_KS8851_MLL is not set
+# CONFIG_KSZ884X_PCI is not set
+CONFIG_NET_VENDOR_MYRI=y
+# CONFIG_MYRI10GE is not set
+# CONFIG_FEALNX is not set
+CONFIG_NET_VENDOR_NATSEMI=y
+# CONFIG_NATSEMI is not set
+# CONFIG_NS83820 is not set
+CONFIG_NET_VENDOR_8390=y
+CONFIG_AX88796=y
+# CONFIG_AX88796_93CX6 is not set
+# CONFIG_NE2K_PCI is not set
+CONFIG_NET_VENDOR_NVIDIA=y
+# CONFIG_FORCEDETH is not set
+CONFIG_NET_VENDOR_OKI=y
+# CONFIG_PCH_GBE is not set
+# CONFIG_ETHOC is not set
+CONFIG_NET_PACKET_ENGINE=y
+# CONFIG_HAMACHI is not set
+# CONFIG_YELLOWFIN is not set
+CONFIG_NET_VENDOR_QLOGIC=y
+# CONFIG_QLA3XXX is not set
+# CONFIG_QLCNIC is not set
+# CONFIG_QLGE is not set
+CONFIG_NETXEN_NIC=y
+CONFIG_NET_VENDOR_REALTEK=y
+# CONFIG_8139CP is not set
+# CONFIG_8139TOO is not set
+# CONFIG_R8169 is not set
+CONFIG_NET_VENDOR_RDC=y
+# CONFIG_R6040 is not set
+CONFIG_NET_VENDOR_SEEQ=y
+CONFIG_NET_VENDOR_SILAN=y
+# CONFIG_SC92031 is not set
+CONFIG_NET_VENDOR_SIS=y
+# CONFIG_SIS900 is not set
+# CONFIG_SIS190 is not set
+# CONFIG_SFC is not set
+CONFIG_NET_VENDOR_SMSC=y
+# CONFIG_SMC91X is not set
+# CONFIG_EPIC100 is not set
+# CONFIG_SMSC911X is not set
+# CONFIG_SMSC9420 is not set
+CONFIG_NET_VENDOR_STMICRO=y
+# CONFIG_STMMAC_ETH is not set
+CONFIG_NET_VENDOR_SUN=y
+# CONFIG_HAPPYMEAL is not set
+# CONFIG_SUNGEM is not set
+# CONFIG_CASSINI is not set
+# CONFIG_NIU is not set
+CONFIG_NET_VENDOR_TEHUTI=y
+# CONFIG_TEHUTI is not set
+CONFIG_NET_VENDOR_TI=y
+# CONFIG_TLAN is not set
+CONFIG_NET_VENDOR_TOSHIBA=y
+CONFIG_TC35815=y
+CONFIG_NET_VENDOR_VIA=y
+# CONFIG_VIA_RHINE is not set
+# CONFIG_VIA_VELOCITY is not set
+CONFIG_NET_VENDOR_WIZNET=y
+# CONFIG_WIZNET_W5100 is not set
+# CONFIG_WIZNET_W5300 is not set
+# CONFIG_FDDI is not set
+# CONFIG_HIPPI is not set
+CONFIG_PHYLIB=y
+
+#
+# MII PHY device drivers
+#
+# CONFIG_AT803X_PHY is not set
+# CONFIG_AMD_PHY is not set
+CONFIG_MARVELL_PHY=y
+CONFIG_DAVICOM_PHY=y
+CONFIG_QSEMI_PHY=y
+CONFIG_LXT_PHY=y
+CONFIG_CICADA_PHY=y
+CONFIG_VITESSE_PHY=y
+CONFIG_SMSC_PHY=y
+CONFIG_BROADCOM_PHY=y
+# CONFIG_BCM87XX_PHY is not set
+CONFIG_ICPLUS_PHY=y
+CONFIG_REALTEK_PHY=y
+# CONFIG_NATIONAL_PHY is not set
+# CONFIG_STE10XP is not set
+# CONFIG_LSI_ET1011C_PHY is not set
+# CONFIG_MICREL_PHY is not set
+# CONFIG_FIXED_PHY is not set
+CONFIG_MDIO_BITBANG=y
+CONFIG_PPP=y
+CONFIG_PPP_BSDCOMP=y
+CONFIG_PPP_DEFLATE=y
+# CONFIG_PPP_FILTER is not set
+CONFIG_PPP_MPPE=y
+# CONFIG_PPP_MULTILINK is not set
+# CONFIG_PPPOE is not set
+CONFIG_PPPOLAC=y
+CONFIG_PPPOPNS=y
+# CONFIG_PPP_ASYNC is not set
+# CONFIG_PPP_SYNC_TTY is not set
+# CONFIG_SLIP is not set
+CONFIG_SLHC=y
+
+#
+# USB Network Adapters
+#
+# CONFIG_USB_CATC is not set
+# CONFIG_USB_KAWETH is not set
+# CONFIG_USB_PEGASUS is not set
+# CONFIG_USB_RTL8150 is not set
+# CONFIG_USB_RTL8152 is not set
+CONFIG_USB_USBNET=y
+CONFIG_USB_NET_AX8817X=y
+CONFIG_USB_NET_AX88179_178A=y
+CONFIG_USB_NET_CDCETHER=y
+# CONFIG_USB_NET_CDC_EEM is not set
+CONFIG_USB_NET_CDC_NCM=y
+# CONFIG_USB_NET_CDC_MBIM is not set
+# CONFIG_USB_NET_DM9601 is not set
+# CONFIG_USB_NET_SMSC75XX is not set
+# CONFIG_USB_NET_SMSC95XX is not set
+# CONFIG_USB_NET_GL620A is not set
+CONFIG_USB_NET_NET1080=y
+# CONFIG_USB_NET_PLUSB is not set
+# CONFIG_USB_NET_MCS7830 is not set
+# CONFIG_USB_NET_RNDIS_HOST is not set
+CONFIG_USB_NET_CDC_SUBSET=y
+# CONFIG_USB_ALI_M5632 is not set
+# CONFIG_USB_AN2720 is not set
+CONFIG_USB_BELKIN=y
+CONFIG_USB_ARMLINUX=y
+# CONFIG_USB_EPSON2888 is not set
+# CONFIG_USB_KC2190 is not set
+CONFIG_USB_NET_ZAURUS=y
+# CONFIG_USB_NET_CX82310_ETH is not set
+# CONFIG_USB_NET_KALMIA is not set
+# CONFIG_USB_NET_QMI_WWAN is not set
+# CONFIG_USB_HSO is not set
+# CONFIG_USB_NET_INT51X1 is not set
+# CONFIG_USB_CDC_PHONET is not set
+# CONFIG_USB_IPHETH is not set
+# CONFIG_USB_SIERRA_NET is not set
+# CONFIG_USB_VL600 is not set
+CONFIG_WLAN=y
+# CONFIG_LIBERTAS_THINFIRM is not set
+# CONFIG_AIRO is not set
+CONFIG_ATMEL=y
+CONFIG_PCI_ATMEL=y
+# CONFIG_AT76C50X_USB is not set
+CONFIG_PRISM54=y
+# CONFIG_USB_ZD1201 is not set
+# CONFIG_USB_NET_RNDIS_WLAN is not set
+# CONFIG_RTL8180 is not set
+# CONFIG_RTL8187 is not set
+# CONFIG_ADM8211 is not set
+# CONFIG_MAC80211_HWSIM is not set
+# CONFIG_MWL8K is not set
+# CONFIG_WIFI_CONTROL_FUNC is not set
+# CONFIG_ATH_CARDS is not set
+# CONFIG_B43 is not set
+# CONFIG_B43LEGACY is not set
+# CONFIG_BRCMFMAC is not set
+CONFIG_HOSTAP=y
+CONFIG_HOSTAP_FIRMWARE=y
+CONFIG_HOSTAP_FIRMWARE_NVRAM=y
+CONFIG_HOSTAP_PLX=y
+CONFIG_HOSTAP_PCI=y
+CONFIG_IPW2100=y
+CONFIG_IPW2100_MONITOR=y
+# CONFIG_IPW2100_DEBUG is not set
+CONFIG_LIBIPW=y
+# CONFIG_LIBIPW_DEBUG is not set
+# CONFIG_IWLWIFI is not set
+# CONFIG_IWL4965 is not set
+# CONFIG_IWL3945 is not set
+CONFIG_LIBERTAS=y
+# CONFIG_LIBERTAS_USB is not set
+# CONFIG_LIBERTAS_DEBUG is not set
+# CONFIG_LIBERTAS_MESH is not set
+# CONFIG_P54_COMMON is not set
+# CONFIG_RT2X00 is not set
+# CONFIG_RTLWIFI is not set
+# CONFIG_WL_TI is not set
+# CONFIG_ZD1211RW is not set
+# CONFIG_MWIFIEX is not set
+
+#
+# Enable WiMAX (Networking options) to see the WiMAX drivers
+#
+# CONFIG_WAN is not set
+# CONFIG_VMXNET3 is not set
+# CONFIG_ISDN is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+CONFIG_INPUT_FF_MEMLESS=y
+# CONFIG_INPUT_POLLDEV is not set
+# CONFIG_INPUT_SPARSEKMAP is not set
+# CONFIG_INPUT_MATRIXKMAP is not set
+
+#
+# Userland interfaces
+#
+CONFIG_INPUT_MOUSEDEV=y
+CONFIG_INPUT_MOUSEDEV_PSAUX=y
+CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024
+CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768
+# CONFIG_INPUT_JOYDEV is not set
+CONFIG_INPUT_EVDEV=y
+# CONFIG_INPUT_EVBUG is not set
+CONFIG_INPUT_KEYRESET=y
+
+#
+# Input Device Drivers
+#
+CONFIG_INPUT_KEYBOARD=y
+CONFIG_KEYBOARD_ATKBD=y
+# CONFIG_KEYBOARD_LKKBD is not set
+# CONFIG_KEYBOARD_NEWTON is not set
+# CONFIG_KEYBOARD_OPENCORES is not set
+# CONFIG_KEYBOARD_STOWAWAY is not set
+# CONFIG_KEYBOARD_SUNKBD is not set
+# CONFIG_KEYBOARD_XTKBD is not set
+CONFIG_INPUT_MOUSE=y
+CONFIG_MOUSE_PS2=y
+CONFIG_MOUSE_PS2_ALPS=y
+CONFIG_MOUSE_PS2_LOGIPS2PP=y
+CONFIG_MOUSE_PS2_SYNAPTICS=y
+CONFIG_MOUSE_PS2_CYPRESS=y
+CONFIG_MOUSE_PS2_TRACKPOINT=y
+# CONFIG_MOUSE_PS2_ELANTECH is not set
+# CONFIG_MOUSE_PS2_SENTELIC is not set
+# CONFIG_MOUSE_PS2_TOUCHKIT is not set
+# CONFIG_MOUSE_SERIAL is not set
+# CONFIG_MOUSE_APPLETOUCH is not set
+# CONFIG_MOUSE_BCM5974 is not set
+# CONFIG_MOUSE_VSXXXAA is not set
+# CONFIG_MOUSE_SYNAPTICS_USB is not set
+CONFIG_INPUT_JOYSTICK=y
+# CONFIG_JOYSTICK_ANALOG is not set
+# CONFIG_JOYSTICK_A3D is not set
+# CONFIG_JOYSTICK_ADI is not set
+# CONFIG_JOYSTICK_COBRA is not set
+# CONFIG_JOYSTICK_GF2K is not set
+# CONFIG_JOYSTICK_GRIP is not set
+# CONFIG_JOYSTICK_GRIP_MP is not set
+# CONFIG_JOYSTICK_GUILLEMOT is not set
+# CONFIG_JOYSTICK_INTERACT is not set
+# CONFIG_JOYSTICK_SIDEWINDER is not set
+# CONFIG_JOYSTICK_TMDC is not set
+# CONFIG_JOYSTICK_IFORCE is not set
+# CONFIG_JOYSTICK_WARRIOR is not set
+# CONFIG_JOYSTICK_MAGELLAN is not set
+# CONFIG_JOYSTICK_SPACEORB is not set
+# CONFIG_JOYSTICK_SPACEBALL is not set
+# CONFIG_JOYSTICK_STINGER is not set
+# CONFIG_JOYSTICK_TWIDJOY is not set
+# CONFIG_JOYSTICK_ZHENHUA is not set
+# CONFIG_JOYSTICK_JOYDUMP is not set
+CONFIG_JOYSTICK_XPAD=y
+CONFIG_JOYSTICK_XPAD_FF=y
+CONFIG_JOYSTICK_XPAD_LEDS=y
+CONFIG_INPUT_TABLET=y
+CONFIG_TABLET_USB_ACECAD=y
+CONFIG_TABLET_USB_AIPTEK=y
+CONFIG_TABLET_USB_GTCO=y
+CONFIG_TABLET_USB_HANWANG=y
+CONFIG_TABLET_USB_KBTAB=y
+CONFIG_TABLET_USB_WACOM=y
+CONFIG_INPUT_TOUCHSCREEN=y
+# CONFIG_TOUCHSCREEN_AD7879 is not set
+# CONFIG_TOUCHSCREEN_CYTTSP_CORE is not set
+# CONFIG_TOUCHSCREEN_DYNAPRO is not set
+# CONFIG_TOUCHSCREEN_HAMPSHIRE is not set
+# CONFIG_TOUCHSCREEN_FUJITSU is not set
+# CONFIG_TOUCHSCREEN_GUNZE is not set
+# CONFIG_TOUCHSCREEN_ELO is not set
+# CONFIG_TOUCHSCREEN_WACOM_W8001 is not set
+# CONFIG_TOUCHSCREEN_MTOUCH is not set
+# CONFIG_TOUCHSCREEN_INEXIO is not set
+# CONFIG_TOUCHSCREEN_MK712 is not set
+# CONFIG_TOUCHSCREEN_PENMOUNT is not set
+# CONFIG_TOUCHSCREEN_TOUCHRIGHT is not set
+# CONFIG_TOUCHSCREEN_TOUCHWIN is not set
+# CONFIG_TOUCHSCREEN_USB_COMPOSITE is not set
+# CONFIG_TOUCHSCREEN_TOUCHIT213 is not set
+# CONFIG_TOUCHSCREEN_TSC_SERIO is not set
+CONFIG_INPUT_MISC=y
+# CONFIG_INPUT_AD714X is not set
+# CONFIG_INPUT_PCSPKR is not set
+# CONFIG_INPUT_ATI_REMOTE2 is not set
+CONFIG_INPUT_KEYCHORD=y
+# CONFIG_INPUT_KEYSPAN_REMOTE is not set
+# CONFIG_INPUT_POWERMATE is not set
+# CONFIG_INPUT_YEALINK is not set
+# CONFIG_INPUT_CM109 is not set
+CONFIG_INPUT_UINPUT=y
+CONFIG_INPUT_GPIO=y
+# CONFIG_INPUT_ADXL34X is not set
+# CONFIG_INPUT_IMS_PCU is not set
+# CONFIG_INPUT_CMA3000 is not set
+
+#
+# Hardware I/O ports
+#
+CONFIG_SERIO=y
+CONFIG_SERIO_I8042=y
+CONFIG_SERIO_SERPORT=y
+# CONFIG_SERIO_PCIPS2 is not set
+CONFIG_SERIO_LIBPS2=y
+# CONFIG_SERIO_RAW is not set
+# CONFIG_SERIO_ALTERA_PS2 is not set
+# CONFIG_SERIO_PS2MULT is not set
+# CONFIG_SERIO_ARC_PS2 is not set
+# CONFIG_GAMEPORT is not set
+
+#
+# Character devices
+#
+CONFIG_TTY=y
+# CONFIG_VT is not set
+CONFIG_UNIX98_PTYS=y
+# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_SERIAL_NONSTANDARD is not set
+# CONFIG_NOZOMI is not set
+# CONFIG_N_GSM is not set
+# CONFIG_TRACE_SINK is not set
+CONFIG_DEVMEM=y
+CONFIG_DEVKMEM=y
+
+#
+# Serial drivers
+#
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_DEPRECATED_OPTIONS=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_PCI=y
+CONFIG_SERIAL_8250_NR_UARTS=4
+CONFIG_SERIAL_8250_RUNTIME_UARTS=4
+# CONFIG_SERIAL_8250_EXTENDED is not set
+# CONFIG_SERIAL_8250_DW is not set
+
+#
+# Non-8250 serial port support
+#
+# CONFIG_SERIAL_MFD_HSU is not set
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+# CONFIG_SERIAL_JSM is not set
+# CONFIG_SERIAL_SCCNXP is not set
+# CONFIG_SERIAL_TIMBERDALE is not set
+# CONFIG_SERIAL_ALTERA_JTAGUART is not set
+# CONFIG_SERIAL_ALTERA_UART is not set
+# CONFIG_SERIAL_PCH_UART is not set
+# CONFIG_SERIAL_ARC is not set
+# CONFIG_SERIAL_RP2 is not set
+# CONFIG_TTY_PRINTK is not set
+# CONFIG_IPMI_HANDLER is not set
+CONFIG_HW_RANDOM=y
+# CONFIG_HW_RANDOM_TIMERIOMEM is not set
+# CONFIG_R3964 is not set
+# CONFIG_APPLICOM is not set
+# CONFIG_RAW_DRIVER is not set
+# CONFIG_TCG_TPM is not set
+CONFIG_DEVPORT=y
+# CONFIG_I2C is not set
+# CONFIG_SPI is not set
+
+#
+# Qualcomm MSM SSBI bus support
+#
+# CONFIG_SSBI is not set
+# CONFIG_HSI is not set
+
+#
+# PPS support
+#
+# CONFIG_PPS is not set
+
+#
+# PPS generators support
+#
+
+#
+# PTP clock support
+#
+# CONFIG_PTP_1588_CLOCK is not set
+
+#
+# Enable PHYLIB and NETWORK_PHY_TIMESTAMPING to see the additional clocks.
+#
+# CONFIG_PTP_1588_CLOCK_PCH is not set
+CONFIG_ARCH_HAVE_CUSTOM_GPIO_H=y
+CONFIG_GPIO_DEVRES=y
+# CONFIG_W1 is not set
+CONFIG_POWER_SUPPLY=y
+# CONFIG_POWER_SUPPLY_DEBUG is not set
+# CONFIG_PDA_POWER is not set
+# CONFIG_TEST_POWER is not set
+# CONFIG_BATTERY_DS2780 is not set
+# CONFIG_BATTERY_DS2781 is not set
+# CONFIG_BATTERY_BQ27x00 is not set
+# CONFIG_CHARGER_MAX8903 is not set
+# CONFIG_BATTERY_GOLDFISH is not set
+# CONFIG_POWER_RESET is not set
+# CONFIG_POWER_AVS is not set
+# CONFIG_HWMON is not set
+# CONFIG_THERMAL is not set
+# CONFIG_WATCHDOG is not set
+CONFIG_SSB_POSSIBLE=y
+
+#
+# Sonics Silicon Backplane
+#
+# CONFIG_SSB is not set
+CONFIG_BCMA_POSSIBLE=y
+
+#
+# Broadcom specific AMBA
+#
+# CONFIG_BCMA is not set
+
+#
+# Multifunction device drivers
+#
+# CONFIG_MFD_CORE is not set
+# CONFIG_MFD_CROS_EC is not set
+# CONFIG_HTC_PASIC3 is not set
+# CONFIG_LPC_ICH is not set
+# CONFIG_LPC_SCH is not set
+# CONFIG_MFD_JANZ_CMODIO is not set
+# CONFIG_MFD_VIPERBOARD is not set
+# CONFIG_MFD_RDC321X is not set
+# CONFIG_MFD_RTSX_PCI is not set
+# CONFIG_MFD_SM501 is not set
+# CONFIG_ABX500_CORE is not set
+# CONFIG_MFD_SYSCON is not set
+# CONFIG_MFD_TI_AM335X_TSCADC is not set
+# CONFIG_MFD_TMIO is not set
+# CONFIG_MFD_VX855 is not set
+# CONFIG_REGULATOR is not set
+CONFIG_MEDIA_SUPPORT=y
+
+#
+# Multimedia core support
+#
+# CONFIG_MEDIA_CAMERA_SUPPORT is not set
+# CONFIG_MEDIA_ANALOG_TV_SUPPORT is not set
+# CONFIG_MEDIA_DIGITAL_TV_SUPPORT is not set
+# CONFIG_MEDIA_RADIO_SUPPORT is not set
+# CONFIG_MEDIA_RC_SUPPORT is not set
+# CONFIG_VIDEO_ADV_DEBUG is not set
+# CONFIG_VIDEO_FIXED_MINOR_RANGES is not set
+
+#
+# Media drivers
+#
+# CONFIG_MEDIA_USB_SUPPORT is not set
+# CONFIG_MEDIA_PCI_SUPPORT is not set
+
+#
+# Supported MMC/SDIO adapters
+#
+# CONFIG_CYPRESS_FIRMWARE is not set
+
+#
+# Media ancillary drivers (tuners, sensors, i2c, frontends)
+#
+
+#
+# Customise DVB Frontends
+#
+
+#
+# Tools to develop new frontends
+#
+# CONFIG_DVB_DUMMY_FE is not set
+
+#
+# Graphics support
+#
+CONFIG_VGA_ARB=y
+CONFIG_VGA_ARB_MAX_GPUS=16
+# CONFIG_DRM is not set
+# CONFIG_VGASTATE is not set
+# CONFIG_VIDEO_OUTPUT_CONTROL is not set
+CONFIG_FB=y
+# CONFIG_FIRMWARE_EDID is not set
+# CONFIG_FB_DDC is not set
+# CONFIG_FB_BOOT_VESA_SUPPORT is not set
+CONFIG_FB_CFB_FILLRECT=y
+CONFIG_FB_CFB_COPYAREA=y
+CONFIG_FB_CFB_IMAGEBLIT=y
+# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set
+# CONFIG_FB_SYS_FILLRECT is not set
+# CONFIG_FB_SYS_COPYAREA is not set
+# CONFIG_FB_SYS_IMAGEBLIT is not set
+# CONFIG_FB_FOREIGN_ENDIAN is not set
+# CONFIG_FB_SYS_FOPS is not set
+# CONFIG_FB_SVGALIB is not set
+# CONFIG_FB_MACMODES is not set
+# CONFIG_FB_BACKLIGHT is not set
+# CONFIG_FB_MODE_HELPERS is not set
+CONFIG_FB_TILEBLITTING=y
+
+#
+# Frame buffer hardware drivers
+#
+CONFIG_FB_CIRRUS=y
+# CONFIG_FB_PM2 is not set
+# CONFIG_FB_CYBER2000 is not set
+# CONFIG_FB_ASILIANT is not set
+# CONFIG_FB_IMSTT is not set
+# CONFIG_FB_UVESA is not set
+# CONFIG_FB_S1D13XXX is not set
+# CONFIG_FB_NVIDIA is not set
+# CONFIG_FB_RIVA is not set
+# CONFIG_FB_I740 is not set
+CONFIG_FB_MATROX=y
+# CONFIG_FB_MATROX_MILLENIUM is not set
+# CONFIG_FB_MATROX_MYSTIQUE is not set
+CONFIG_FB_MATROX_G=y
+# CONFIG_FB_MATROX_I2C is not set
+# CONFIG_FB_RADEON is not set
+# CONFIG_FB_ATY128 is not set
+# CONFIG_FB_ATY is not set
+# CONFIG_FB_S3 is not set
+# CONFIG_FB_SAVAGE is not set
+# CONFIG_FB_SIS is not set
+# CONFIG_FB_NEOMAGIC is not set
+# CONFIG_FB_KYRO is not set
+# CONFIG_FB_3DFX is not set
+# CONFIG_FB_VOODOO1 is not set
+# CONFIG_FB_VT8623 is not set
+# CONFIG_FB_TRIDENT is not set
+# CONFIG_FB_ARK is not set
+# CONFIG_FB_PM3 is not set
+# CONFIG_FB_CARMINE is not set
+# CONFIG_FB_SMSCUFX is not set
+# CONFIG_FB_UDL is not set
+# CONFIG_FB_GOLDFISH is not set
+# CONFIG_FB_VIRTUAL is not set
+# CONFIG_FB_METRONOME is not set
+# CONFIG_FB_MB862XX is not set
+# CONFIG_FB_BROADSHEET is not set
+# CONFIG_FB_AUO_K190X is not set
+# CONFIG_EXYNOS_VIDEO is not set
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+CONFIG_LCD_CLASS_DEVICE=y
+# CONFIG_LCD_PLATFORM is not set
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
+CONFIG_BACKLIGHT_GENERIC=y
+# CONFIG_ADF is not set
+# CONFIG_LOGO is not set
+CONFIG_SOUND=y
+# CONFIG_SOUND_OSS_CORE is not set
+CONFIG_SND=y
+CONFIG_SND_TIMER=y
+CONFIG_SND_PCM=y
+CONFIG_SND_RAWMIDI=y
+CONFIG_SND_COMPRESS_OFFLOAD=y
+CONFIG_SND_JACK=y
+# CONFIG_SND_SEQUENCER is not set
+# CONFIG_SND_MIXER_OSS is not set
+# CONFIG_SND_PCM_OSS is not set
+# CONFIG_SND_HRTIMER is not set
+# CONFIG_SND_DYNAMIC_MINORS is not set
+CONFIG_SND_SUPPORT_OLD_API=y
+CONFIG_SND_VERBOSE_PROCFS=y
+# CONFIG_SND_VERBOSE_PRINTK is not set
+# CONFIG_SND_DEBUG is not set
+# CONFIG_SND_RAWMIDI_SEQ is not set
+# CONFIG_SND_OPL3_LIB_SEQ is not set
+# CONFIG_SND_OPL4_LIB_SEQ is not set
+# CONFIG_SND_SBAWE_SEQ is not set
+# CONFIG_SND_EMU10K1_SEQ is not set
+CONFIG_SND_DRIVERS=y
+# CONFIG_SND_DUMMY is not set
+# CONFIG_SND_ALOOP is not set
+# CONFIG_SND_MTPAV is not set
+# CONFIG_SND_SERIAL_U16550 is not set
+# CONFIG_SND_MPU401 is not set
+CONFIG_SND_PCI=y
+# CONFIG_SND_AD1889 is not set
+# CONFIG_SND_ALS300 is not set
+# CONFIG_SND_ALS4000 is not set
+# CONFIG_SND_ALI5451 is not set
+# CONFIG_SND_ATIIXP is not set
+# CONFIG_SND_ATIIXP_MODEM is not set
+# CONFIG_SND_AU8810 is not set
+# CONFIG_SND_AU8820 is not set
+# CONFIG_SND_AU8830 is not set
+# CONFIG_SND_AW2 is not set
+# CONFIG_SND_AZT3328 is not set
+# CONFIG_SND_BT87X is not set
+# CONFIG_SND_CA0106 is not set
+# CONFIG_SND_CMIPCI is not set
+# CONFIG_SND_OXYGEN is not set
+# CONFIG_SND_CS4281 is not set
+# CONFIG_SND_CS46XX is not set
+# CONFIG_SND_CS5530 is not set
+# CONFIG_SND_CS5535AUDIO is not set
+# CONFIG_SND_CTXFI is not set
+# CONFIG_SND_DARLA20 is not set
+# CONFIG_SND_GINA20 is not set
+# CONFIG_SND_LAYLA20 is not set
+# CONFIG_SND_DARLA24 is not set
+# CONFIG_SND_GINA24 is not set
+# CONFIG_SND_LAYLA24 is not set
+# CONFIG_SND_MONA is not set
+# CONFIG_SND_MIA is not set
+# CONFIG_SND_ECHO3G is not set
+# CONFIG_SND_INDIGO is not set
+# CONFIG_SND_INDIGOIO is not set
+# CONFIG_SND_INDIGODJ is not set
+# CONFIG_SND_INDIGOIOX is not set
+# CONFIG_SND_INDIGODJX is not set
+# CONFIG_SND_EMU10K1 is not set
+# CONFIG_SND_EMU10K1X is not set
+# CONFIG_SND_ENS1370 is not set
+# CONFIG_SND_ENS1371 is not set
+# CONFIG_SND_ES1938 is not set
+# CONFIG_SND_ES1968 is not set
+# CONFIG_SND_FM801 is not set
+# CONFIG_SND_HDA_INTEL is not set
+# CONFIG_SND_HDSP is not set
+# CONFIG_SND_HDSPM is not set
+# CONFIG_SND_ICE1712 is not set
+# CONFIG_SND_ICE1724 is not set
+# CONFIG_SND_INTEL8X0 is not set
+# CONFIG_SND_INTEL8X0M is not set
+# CONFIG_SND_KORG1212 is not set
+# CONFIG_SND_LOLA is not set
+# CONFIG_SND_LX6464ES is not set
+# CONFIG_SND_MAESTRO3 is not set
+# CONFIG_SND_MIXART is not set
+# CONFIG_SND_NM256 is not set
+# CONFIG_SND_PCXHR is not set
+# CONFIG_SND_RIPTIDE is not set
+# CONFIG_SND_RME32 is not set
+# CONFIG_SND_RME96 is not set
+# CONFIG_SND_RME9652 is not set
+# CONFIG_SND_SONICVIBES is not set
+# CONFIG_SND_TRIDENT is not set
+# CONFIG_SND_VIA82XX is not set
+# CONFIG_SND_VIA82XX_MODEM is not set
+# CONFIG_SND_VIRTUOSO is not set
+# CONFIG_SND_VX222 is not set
+# CONFIG_SND_YMFPCI is not set
+CONFIG_SND_MIPS=y
+CONFIG_SND_USB=y
+# CONFIG_SND_USB_AUDIO is not set
+# CONFIG_SND_USB_UA101 is not set
+# CONFIG_SND_USB_CAIAQ is not set
+# CONFIG_SND_USB_6FIRE is not set
+CONFIG_SND_SOC=y
+# CONFIG_SND_ATMEL_SOC is not set
+# CONFIG_SND_SOC_ALL_CODECS is not set
+# CONFIG_SND_SIMPLE_CARD is not set
+# CONFIG_SOUND_PRIME is not set
+
+#
+# HID support
+#
+CONFIG_HID=y
+# CONFIG_HID_BATTERY_STRENGTH is not set
+CONFIG_HIDRAW=y
+CONFIG_UHID=y
+CONFIG_HID_GENERIC=y
+
+#
+# Special HID drivers
+#
+CONFIG_HID_A4TECH=y
+CONFIG_HID_ACRUX=y
+CONFIG_HID_ACRUX_FF=y
+CONFIG_HID_APPLE=y
+# CONFIG_HID_APPLEIR is not set
+# CONFIG_HID_AUREAL is not set
+CONFIG_HID_BELKIN=y
+CONFIG_HID_CHERRY=y
+CONFIG_HID_CHICONY=y
+CONFIG_HID_PRODIKEYS=y
+CONFIG_HID_CYPRESS=y
+CONFIG_HID_DRAGONRISE=y
+CONFIG_DRAGONRISE_FF=y
+CONFIG_HID_EMS_FF=y
+CONFIG_HID_ELECOM=y
+CONFIG_HID_EZKEY=y
+CONFIG_HID_HOLTEK=y
+# CONFIG_HOLTEK_FF is not set
+CONFIG_HID_KEYTOUCH=y
+CONFIG_HID_KYE=y
+CONFIG_HID_UCLOGIC=y
+CONFIG_HID_WALTOP=y
+CONFIG_HID_GYRATION=y
+# CONFIG_HID_ICADE is not set
+CONFIG_HID_TWINHAN=y
+CONFIG_HID_KENSINGTON=y
+CONFIG_HID_LCPOWER=y
+# CONFIG_HID_LENOVO_TPKBD is not set
+CONFIG_HID_LOGITECH=y
+CONFIG_HID_LOGITECH_DJ=y
+CONFIG_LOGITECH_FF=y
+CONFIG_LOGIRUMBLEPAD2_FF=y
+CONFIG_LOGIG940_FF=y
+CONFIG_LOGIWHEELS_FF=y
+CONFIG_HID_MAGICMOUSE=y
+CONFIG_HID_MICROSOFT=y
+CONFIG_HID_MONTEREY=y
+CONFIG_HID_MULTITOUCH=y
+CONFIG_HID_NTRIG=y
+CONFIG_HID_ORTEK=y
+CONFIG_HID_PANTHERLORD=y
+CONFIG_PANTHERLORD_FF=y
+CONFIG_HID_PETALYNX=y
+CONFIG_HID_PICOLCD=y
+# CONFIG_HID_PICOLCD_FB is not set
+# CONFIG_HID_PICOLCD_BACKLIGHT is not set
+# CONFIG_HID_PICOLCD_LCD is not set
+# CONFIG_HID_PICOLCD_LEDS is not set
+CONFIG_HID_PRIMAX=y
+# CONFIG_HID_PS3REMOTE is not set
+CONFIG_HID_ROCCAT=y
+CONFIG_HID_SAITEK=y
+CONFIG_HID_SAMSUNG=y
+CONFIG_HID_SONY=y
+CONFIG_HID_SPEEDLINK=y
+# CONFIG_HID_STEELSERIES is not set
+CONFIG_HID_SUNPLUS=y
+CONFIG_HID_GREENASIA=y
+CONFIG_GREENASIA_FF=y
+CONFIG_HID_SMARTJOYPLUS=y
+CONFIG_SMARTJOYPLUS_FF=y
+CONFIG_HID_TIVO=y
+CONFIG_HID_TOPSEED=y
+# CONFIG_HID_THINGM is not set
+CONFIG_HID_THRUSTMASTER=y
+# CONFIG_THRUSTMASTER_FF is not set
+CONFIG_HID_WACOM=y
+CONFIG_HID_WIIMOTE=y
+CONFIG_HID_WIIMOTE_EXT=y
+CONFIG_HID_ZEROPLUS=y
+# CONFIG_ZEROPLUS_FF is not set
+CONFIG_HID_ZYDACRON=y
+# CONFIG_HID_SENSOR_HUB is not set
+
+#
+# USB HID support
+#
+CONFIG_USB_HID=y
+# CONFIG_HID_PID is not set
+CONFIG_USB_HIDDEV=y
+CONFIG_USB_ARCH_HAS_OHCI=y
+CONFIG_USB_ARCH_HAS_EHCI=y
+CONFIG_USB_ARCH_HAS_XHCI=y
+CONFIG_USB_SUPPORT=y
+CONFIG_USB_COMMON=y
+CONFIG_USB_ARCH_HAS_HCD=y
+CONFIG_USB=y
+# CONFIG_USB_DEBUG is not set
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+
+#
+# Miscellaneous USB options
+#
+CONFIG_USB_DEFAULT_PERSIST=y
+# CONFIG_USB_DYNAMIC_MINORS is not set
+# CONFIG_USB_OTG is not set
+# CONFIG_USB_OTG_WHITELIST is not set
+# CONFIG_USB_OTG_BLACKLIST_HUB is not set
+# CONFIG_USB_MON is not set
+# CONFIG_USB_WUSB_CBAF is not set
+
+#
+# USB Host Controller Drivers
+#
+# CONFIG_USB_C67X00_HCD is not set
+# CONFIG_USB_XHCI_HCD is not set
+CONFIG_USB_EHCI_HCD=y
+# CONFIG_USB_EHCI_ROOT_HUB_TT is not set
+CONFIG_USB_EHCI_TT_NEWSCHED=y
+CONFIG_USB_EHCI_PCI=y
+# CONFIG_USB_EHCI_HCD_PLATFORM is not set
+# CONFIG_USB_OXU210HP_HCD is not set
+# CONFIG_USB_ISP116X_HCD is not set
+# CONFIG_USB_ISP1760_HCD is not set
+# CONFIG_USB_ISP1362_HCD is not set
+# CONFIG_USB_OHCI_HCD is not set
+# CONFIG_USB_UHCI_HCD is not set
+# CONFIG_USB_SL811_HCD is not set
+# CONFIG_USB_R8A66597_HCD is not set
+# CONFIG_USB_MUSB_HDRC is not set
+# CONFIG_USB_RENESAS_USBHS is not set
+
+#
+# USB Device Class drivers
+#
+# CONFIG_USB_ACM is not set
+# CONFIG_USB_PRINTER is not set
+# CONFIG_USB_WDM is not set
+# CONFIG_USB_TMC is not set
+
+#
+# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may
+#
+
+#
+# also be needed; see USB_STORAGE Help for more info
+#
+# CONFIG_USB_STORAGE is not set
+
+#
+# USB Imaging devices
+#
+# CONFIG_USB_MDC800 is not set
+# CONFIG_USB_MICROTEK is not set
+# CONFIG_USB_DWC3 is not set
+# CONFIG_USB_CHIPIDEA is not set
+
+#
+# USB port drivers
+#
+# CONFIG_USB_SERIAL is not set
+
+#
+# USB Miscellaneous drivers
+#
+# CONFIG_USB_EMI62 is not set
+# CONFIG_USB_EMI26 is not set
+# CONFIG_USB_ADUTUX is not set
+# CONFIG_USB_SEVSEG is not set
+# CONFIG_USB_RIO500 is not set
+# CONFIG_USB_LEGOTOWER is not set
+# CONFIG_USB_LCD is not set
+# CONFIG_USB_LED is not set
+# CONFIG_USB_CYPRESS_CY7C63 is not set
+# CONFIG_USB_CYTHERM is not set
+# CONFIG_USB_IDMOUSE is not set
+# CONFIG_USB_FTDI_ELAN is not set
+# CONFIG_USB_APPLEDISPLAY is not set
+# CONFIG_USB_SISUSBVGA is not set
+# CONFIG_USB_LD is not set
+# CONFIG_USB_TRANCEVIBRATOR is not set
+# CONFIG_USB_IOWARRIOR is not set
+# CONFIG_USB_TEST is not set
+# CONFIG_USB_ISIGHTFW is not set
+# CONFIG_USB_YUREX is not set
+# CONFIG_USB_EZUSB_FX2 is not set
+# CONFIG_USB_PHY is not set
+CONFIG_USB_OTG_WAKELOCK=y
+CONFIG_USB_GADGET=y
+# CONFIG_USB_GADGET_DEBUG is not set
+# CONFIG_USB_GADGET_DEBUG_FILES is not set
+CONFIG_USB_GADGET_VBUS_DRAW=2
+CONFIG_USB_GADGET_STORAGE_NUM_BUFFERS=2
+
+#
+# USB Peripheral Controller
+#
+# CONFIG_USB_FUSB300 is not set
+# CONFIG_USB_R8A66597 is not set
+# CONFIG_USB_PXA27X is not set
+# CONFIG_USB_MV_UDC is not set
+# CONFIG_USB_MV_U3D is not set
+# CONFIG_USB_M66592 is not set
+# CONFIG_USB_AMD5536UDC is not set
+# CONFIG_USB_NET2272 is not set
+# CONFIG_USB_NET2280 is not set
+# CONFIG_USB_GOKU is not set
+# CONFIG_USB_EG20T is not set
+# CONFIG_USB_DUMMY_HCD is not set
+CONFIG_USB_LIBCOMPOSITE=y
+CONFIG_USB_F_ACM=y
+CONFIG_USB_U_SERIAL=y
+# CONFIG_USB_ZERO is not set
+# CONFIG_USB_AUDIO is not set
+# CONFIG_USB_ETH is not set
+# CONFIG_USB_G_NCM is not set
+# CONFIG_USB_GADGETFS is not set
+# CONFIG_USB_FUNCTIONFS is not set
+# CONFIG_USB_MASS_STORAGE is not set
+# CONFIG_USB_G_SERIAL is not set
+# CONFIG_USB_MIDI_GADGET is not set
+# CONFIG_USB_G_PRINTER is not set
+CONFIG_USB_G_ANDROID=y
+# CONFIG_USB_ANDROID_RNDIS_DWORD_ALIGNED is not set
+# CONFIG_USB_CDC_COMPOSITE is not set
+# CONFIG_USB_G_NOKIA is not set
+# CONFIG_USB_G_ACM_MS is not set
+# CONFIG_USB_G_MULTI is not set
+# CONFIG_USB_G_HID is not set
+# CONFIG_USB_G_DBGP is not set
+# CONFIG_UWB is not set
+# CONFIG_MMC is not set
+# CONFIG_MEMSTICK is not set
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+
+#
+# LED drivers
+#
+# CONFIG_LEDS_OT200 is not set
+
+#
+# LED Triggers
+#
+# CONFIG_LEDS_TRIGGERS is not set
+CONFIG_SWITCH=y
+# CONFIG_ACCESSIBILITY is not set
+# CONFIG_INFINIBAND is not set
+CONFIG_RTC_LIB=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_HCTOSYS=y
+CONFIG_RTC_SYSTOHC=y
+CONFIG_RTC_HCTOSYS_DEVICE="rtc0"
+# CONFIG_RTC_DEBUG is not set
+
+#
+# RTC interfaces
+#
+CONFIG_RTC_INTF_SYSFS=y
+CONFIG_RTC_INTF_PROC=y
+CONFIG_RTC_INTF_DEV=y
+# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set
+# CONFIG_RTC_DRV_TEST is not set
+
+#
+# SPI RTC drivers
+#
+
+#
+# Platform RTC drivers
+#
+CONFIG_RTC_DRV_CMOS=y
+# CONFIG_RTC_DRV_DS1286 is not set
+# CONFIG_RTC_DRV_DS1511 is not set
+# CONFIG_RTC_DRV_DS1553 is not set
+# CONFIG_RTC_DRV_DS1742 is not set
+# CONFIG_RTC_DRV_STK17TA8 is not set
+# CONFIG_RTC_DRV_M48T86 is not set
+# CONFIG_RTC_DRV_M48T35 is not set
+# CONFIG_RTC_DRV_M48T59 is not set
+# CONFIG_RTC_DRV_MSM6242 is not set
+# CONFIG_RTC_DRV_BQ4802 is not set
+# CONFIG_RTC_DRV_RP5C01 is not set
+# CONFIG_RTC_DRV_V3020 is not set
+# CONFIG_RTC_DRV_DS2404 is not set
+
+#
+# on-CPU RTC drivers
+#
+
+#
+# HID Sensor RTC drivers
+#
+# CONFIG_RTC_DRV_HID_SENSOR_TIME is not set
+# CONFIG_DMADEVICES is not set
+# CONFIG_AUXDISPLAY is not set
+CONFIG_UIO=y
+CONFIG_UIO_CIF=y
+# CONFIG_UIO_PDRV is not set
+# CONFIG_UIO_PDRV_GENIRQ is not set
+# CONFIG_UIO_DMEM_GENIRQ is not set
+# CONFIG_UIO_AEC is not set
+# CONFIG_UIO_SERCOS3 is not set
+# CONFIG_UIO_PCI_GENERIC is not set
+# CONFIG_UIO_NETX is not set
+# CONFIG_VIRT_DRIVERS is not set
+
+#
+# Virtio drivers
+#
+# CONFIG_VIRTIO_PCI is not set
+# CONFIG_VIRTIO_MMIO is not set
+
+#
+# Microsoft Hyper-V guest support
+#
+CONFIG_STAGING=y
+# CONFIG_ET131X is not set
+# CONFIG_USBIP_CORE is not set
+# CONFIG_W35UND is not set
+# CONFIG_PRISM2_USB is not set
+# CONFIG_ECHO is not set
+# CONFIG_ASUS_OLED is not set
+# CONFIG_R8712U is not set
+# CONFIG_RTS5139 is not set
+# CONFIG_TRANZPORT is not set
+# CONFIG_LINE6_USB is not set
+# CONFIG_DX_SEP is not set
+# CONFIG_ZSMALLOC is not set
+# CONFIG_FB_SM7XX is not set
+# CONFIG_CRYSTALHD is not set
+# CONFIG_FB_XGI is not set
+# CONFIG_BCM_WIMAX is not set
+# CONFIG_FT1000 is not set
+
+#
+# Speakup console speech
+#
+# CONFIG_STAGING_MEDIA is not set
+
+#
+# Android
+#
+CONFIG_ANDROID=y
+CONFIG_ANDROID_BINDER_IPC=y
+CONFIG_ASHMEM=y
+CONFIG_ANDROID_LOGGER=y
+CONFIG_ANDROID_TIMED_OUTPUT=y
+CONFIG_ANDROID_LOW_MEMORY_KILLER=y
+CONFIG_ANDROID_LOW_MEMORY_KILLER_AUTODETECT_OOM_ADJ_VALUES=y
+CONFIG_ANDROID_INTF_ALARM_DEV=y
+CONFIG_SYNC=y
+# CONFIG_SW_SYNC is not set
+CONFIG_ION=y
+# CONFIG_ION_TEST is not set
+# CONFIG_USB_WPAN_HCD is not set
+# CONFIG_WIMAX_GDM72XX is not set
+CONFIG_NET_VENDOR_SILICOM=y
+# CONFIG_CED1401 is not set
+# CONFIG_DGRP is not set
+# CONFIG_USB_DWC2 is not set
+
+#
+# Hardware Spinlock drivers
+#
+CONFIG_CLKSRC_I8253=y
+CONFIG_CLKEVT_I8253=y
+CONFIG_I8253_LOCK=y
+CONFIG_CLKBLD_I8253=y
+# CONFIG_MAILBOX is not set
+CONFIG_IOMMU_SUPPORT=y
+
+#
+# Remoteproc drivers
+#
+# CONFIG_STE_MODEM_RPROC is not set
+
+#
+# Rpmsg drivers
+#
+# CONFIG_PM_DEVFREQ is not set
+# CONFIG_EXTCON is not set
+# CONFIG_MEMORY is not set
+# CONFIG_IIO is not set
+# CONFIG_VME_BUS is not set
+# CONFIG_PWM is not set
+# CONFIG_IPACK_BUS is not set
+# CONFIG_RESET_CONTROLLER is not set
+
+#
+# Firmware Drivers
+#
+# CONFIG_FIRMWARE_MEMMAP is not set
+
+#
+# File systems
+#
+CONFIG_EXT2_FS=y
+# CONFIG_EXT2_FS_XATTR is not set
+# CONFIG_EXT2_FS_XIP is not set
+CONFIG_EXT3_FS=y
+CONFIG_EXT3_DEFAULTS_TO_ORDERED=y
+CONFIG_EXT3_FS_XATTR=y
+# CONFIG_EXT3_FS_POSIX_ACL is not set
+# CONFIG_EXT3_FS_SECURITY is not set
+CONFIG_EXT4_FS=y
+# CONFIG_EXT4_FS_POSIX_ACL is not set
+CONFIG_EXT4_FS_SECURITY=y
+# CONFIG_EXT4_DEBUG is not set
+CONFIG_JBD=y
+CONFIG_JBD2=y
+CONFIG_FS_MBCACHE=y
+CONFIG_REISERFS_FS=y
+# CONFIG_REISERFS_CHECK is not set
+CONFIG_REISERFS_PROC_INFO=y
+CONFIG_REISERFS_FS_XATTR=y
+CONFIG_REISERFS_FS_POSIX_ACL=y
+CONFIG_REISERFS_FS_SECURITY=y
+CONFIG_JFS_FS=y
+CONFIG_JFS_POSIX_ACL=y
+CONFIG_JFS_SECURITY=y
+# CONFIG_JFS_DEBUG is not set
+# CONFIG_JFS_STATISTICS is not set
+CONFIG_XFS_FS=y
+CONFIG_XFS_QUOTA=y
+CONFIG_XFS_POSIX_ACL=y
+# CONFIG_XFS_RT is not set
+# CONFIG_XFS_WARN is not set
+# CONFIG_XFS_DEBUG is not set
+# CONFIG_GFS2_FS is not set
+# CONFIG_OCFS2_FS is not set
+# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
+CONFIG_FS_POSIX_ACL=y
+CONFIG_EXPORTFS=y
+CONFIG_FILE_LOCKING=y
+CONFIG_FSNOTIFY=y
+CONFIG_DNOTIFY=y
+CONFIG_INOTIFY_USER=y
+# CONFIG_FANOTIFY is not set
+CONFIG_QUOTA=y
+# CONFIG_QUOTA_NETLINK_INTERFACE is not set
+CONFIG_PRINT_QUOTA_WARNING=y
+# CONFIG_QUOTA_DEBUG is not set
+CONFIG_QUOTA_TREE=y
+# CONFIG_QFMT_V1 is not set
+CONFIG_QFMT_V2=y
+CONFIG_QUOTACTL=y
+# CONFIG_AUTOFS4_FS is not set
+CONFIG_FUSE_FS=y
+# CONFIG_CUSE is not set
+CONFIG_GENERIC_ACL=y
+
+#
+# Caches
+#
+# CONFIG_FSCACHE is not set
+
+#
+# CD-ROM/DVD Filesystems
+#
+CONFIG_ISO9660_FS=y
+CONFIG_JOLIET=y
+CONFIG_ZISOFS=y
+CONFIG_UDF_FS=y
+CONFIG_UDF_NLS=y
+
+#
+# DOS/FAT/NT Filesystems
+#
+CONFIG_FAT_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_FAT_DEFAULT_CODEPAGE=437
+CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
+# CONFIG_NTFS_FS is not set
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_PROC_SYSCTL=y
+CONFIG_PROC_PAGE_MONITOR=y
+CONFIG_SYSFS=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_TMPFS_XATTR=y
+# CONFIG_HUGETLB_PAGE is not set
+CONFIG_CONFIGFS_FS=y
+CONFIG_MISC_FILESYSTEMS=y
+# CONFIG_ADFS_FS is not set
+CONFIG_AFFS_FS=y
+CONFIG_HFS_FS=y
+CONFIG_HFSPLUS_FS=y
+CONFIG_BEFS_FS=y
+# CONFIG_BEFS_DEBUG is not set
+CONFIG_BFS_FS=y
+CONFIG_EFS_FS=y
+CONFIG_JFFS2_FS=y
+CONFIG_JFFS2_FS_DEBUG=0
+CONFIG_JFFS2_FS_WRITEBUFFER=y
+# CONFIG_JFFS2_FS_WBUF_VERIFY is not set
+# CONFIG_JFFS2_SUMMARY is not set
+CONFIG_JFFS2_FS_XATTR=y
+CONFIG_JFFS2_FS_POSIX_ACL=y
+CONFIG_JFFS2_FS_SECURITY=y
+CONFIG_JFFS2_COMPRESSION_OPTIONS=y
+CONFIG_JFFS2_ZLIB=y
+# CONFIG_JFFS2_LZO is not set
+CONFIG_JFFS2_RTIME=y
+CONFIG_JFFS2_RUBIN=y
+# CONFIG_JFFS2_CMODE_NONE is not set
+CONFIG_JFFS2_CMODE_PRIORITY=y
+# CONFIG_JFFS2_CMODE_SIZE is not set
+# CONFIG_JFFS2_CMODE_FAVOURLZO is not set
+# CONFIG_UBIFS_FS is not set
+# CONFIG_LOGFS is not set
+CONFIG_CRAMFS=y
+# CONFIG_SQUASHFS is not set
+CONFIG_VXFS_FS=y
+CONFIG_MINIX_FS=y
+CONFIG_MINIX_FS_NATIVE_ENDIAN=y
+# CONFIG_OMFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_QNX6FS_FS is not set
+CONFIG_ROMFS_FS=y
+CONFIG_ROMFS_BACKED_BY_BLOCK=y
+# CONFIG_ROMFS_BACKED_BY_MTD is not set
+# CONFIG_ROMFS_BACKED_BY_BOTH is not set
+CONFIG_ROMFS_ON_BLOCK=y
+CONFIG_PSTORE=y
+CONFIG_PSTORE_CONSOLE=y
+CONFIG_PSTORE_RAM=y
+CONFIG_SYSV_FS=y
+CONFIG_UFS_FS=y
+# CONFIG_UFS_FS_WRITE is not set
+# CONFIG_UFS_DEBUG is not set
+# CONFIG_F2FS_FS is not set
+CONFIG_NETWORK_FILESYSTEMS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V2=y
+CONFIG_NFS_V3=y
+# CONFIG_NFS_V3_ACL is not set
+# CONFIG_NFS_V4 is not set
+# CONFIG_NFS_SWAP is not set
+CONFIG_ROOT_NFS=y
+CONFIG_NFSD=y
+CONFIG_NFSD_V3=y
+# CONFIG_NFSD_V3_ACL is not set
+# CONFIG_NFSD_V4 is not set
+CONFIG_LOCKD=y
+CONFIG_LOCKD_V4=y
+CONFIG_NFS_COMMON=y
+CONFIG_SUNRPC=y
+# CONFIG_SUNRPC_DEBUG is not set
+# CONFIG_CEPH_FS is not set
+# CONFIG_CIFS is not set
+# CONFIG_NCP_FS is not set
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+CONFIG_NLS=y
+CONFIG_NLS_DEFAULT="iso8859-1"
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_CODEPAGE_737=y
+CONFIG_NLS_CODEPAGE_775=y
+CONFIG_NLS_CODEPAGE_850=y
+CONFIG_NLS_CODEPAGE_852=y
+CONFIG_NLS_CODEPAGE_855=y
+CONFIG_NLS_CODEPAGE_857=y
+CONFIG_NLS_CODEPAGE_860=y
+CONFIG_NLS_CODEPAGE_861=y
+CONFIG_NLS_CODEPAGE_862=y
+CONFIG_NLS_CODEPAGE_863=y
+CONFIG_NLS_CODEPAGE_864=y
+CONFIG_NLS_CODEPAGE_865=y
+CONFIG_NLS_CODEPAGE_866=y
+CONFIG_NLS_CODEPAGE_869=y
+CONFIG_NLS_CODEPAGE_936=y
+CONFIG_NLS_CODEPAGE_950=y
+CONFIG_NLS_CODEPAGE_932=y
+CONFIG_NLS_CODEPAGE_949=y
+CONFIG_NLS_CODEPAGE_874=y
+CONFIG_NLS_ISO8859_8=y
+CONFIG_NLS_CODEPAGE_1250=y
+CONFIG_NLS_CODEPAGE_1251=y
+CONFIG_NLS_ASCII=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_NLS_ISO8859_2=y
+CONFIG_NLS_ISO8859_3=y
+CONFIG_NLS_ISO8859_4=y
+CONFIG_NLS_ISO8859_5=y
+CONFIG_NLS_ISO8859_6=y
+CONFIG_NLS_ISO8859_7=y
+CONFIG_NLS_ISO8859_9=y
+CONFIG_NLS_ISO8859_13=y
+CONFIG_NLS_ISO8859_14=y
+CONFIG_NLS_ISO8859_15=y
+CONFIG_NLS_KOI8_R=y
+CONFIG_NLS_KOI8_U=y
+# CONFIG_NLS_MAC_ROMAN is not set
+# CONFIG_NLS_MAC_CELTIC is not set
+# CONFIG_NLS_MAC_CENTEURO is not set
+# CONFIG_NLS_MAC_CROATIAN is not set
+# CONFIG_NLS_MAC_CYRILLIC is not set
+# CONFIG_NLS_MAC_GAELIC is not set
+# CONFIG_NLS_MAC_GREEK is not set
+# CONFIG_NLS_MAC_ICELAND is not set
+# CONFIG_NLS_MAC_INUIT is not set
+# CONFIG_NLS_MAC_ROMANIAN is not set
+# CONFIG_NLS_MAC_TURKISH is not set
+CONFIG_NLS_UTF8=y
+# CONFIG_DLM is not set
+
+#
+# Kernel hacking
+#
+CONFIG_TRACE_IRQFLAGS_SUPPORT=y
+# CONFIG_PRINTK_TIME is not set
+CONFIG_DEFAULT_MESSAGE_LOGLEVEL=4
+CONFIG_ENABLE_WARN_DEPRECATED=y
+CONFIG_ENABLE_MUST_CHECK=y
+CONFIG_FRAME_WARN=1024
+# CONFIG_MAGIC_SYSRQ is not set
+# CONFIG_STRIP_ASM_SYMS is not set
+# CONFIG_READABLE_ASM is not set
+# CONFIG_UNUSED_SYMBOLS is not set
+# CONFIG_DEBUG_FS is not set
+# CONFIG_HEADERS_CHECK is not set
+# CONFIG_DEBUG_SECTION_MISMATCH is not set
+CONFIG_DEBUG_KERNEL=y
+# CONFIG_DEBUG_SHIRQ is not set
+# CONFIG_LOCKUP_DETECTOR is not set
+# CONFIG_PANIC_ON_OOPS is not set
+CONFIG_PANIC_ON_OOPS_VALUE=0
+# CONFIG_DETECT_HUNG_TASK is not set
+CONFIG_SCHED_DEBUG=y
+CONFIG_SCHEDSTATS=y
+CONFIG_TIMER_STATS=y
+# CONFIG_DEBUG_OBJECTS is not set
+# CONFIG_DEBUG_SLAB is not set
+CONFIG_HAVE_DEBUG_KMEMLEAK=y
+# CONFIG_DEBUG_KMEMLEAK is not set
+CONFIG_DEBUG_PREEMPT=y
+# CONFIG_DEBUG_RT_MUTEXES is not set
+# CONFIG_RT_MUTEX_TESTER is not set
+# CONFIG_DEBUG_SPINLOCK is not set
+# CONFIG_DEBUG_MUTEXES is not set
+# CONFIG_DEBUG_LOCK_ALLOC is not set
+# CONFIG_PROVE_LOCKING is not set
+# CONFIG_LOCK_STAT is not set
+# CONFIG_DEBUG_ATOMIC_SLEEP is not set
+# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
+CONFIG_STACKTRACE=y
+# CONFIG_DEBUG_STACK_USAGE is not set
+# CONFIG_DEBUG_KOBJECT is not set
+# CONFIG_DEBUG_INFO is not set
+# CONFIG_DEBUG_VM is not set
+# CONFIG_DEBUG_WRITECOUNT is not set
+# CONFIG_DEBUG_MEMORY_INIT is not set
+# CONFIG_DEBUG_LIST is not set
+# CONFIG_TEST_LIST_SORT is not set
+# CONFIG_DEBUG_SG is not set
+# CONFIG_DEBUG_NOTIFIERS is not set
+# CONFIG_DEBUG_CREDENTIALS is not set
+# CONFIG_BOOT_PRINTK_DELAY is not set
+
+#
+# RCU Debugging
+#
+# CONFIG_PROVE_RCU_DELAY is not set
+# CONFIG_SPARSE_RCU_POINTER is not set
+# CONFIG_RCU_TORTURE_TEST is not set
+CONFIG_RCU_CPU_STALL_TIMEOUT=21
+CONFIG_RCU_CPU_STALL_VERBOSE=y
+# CONFIG_RCU_CPU_STALL_INFO is not set
+# CONFIG_RCU_TRACE is not set
+# CONFIG_BACKTRACE_SELF_TEST is not set
+# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set
+# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
+# CONFIG_DEBUG_PER_CPU_MAPS is not set
+# CONFIG_NOTIFIER_ERROR_INJECTION is not set
+# CONFIG_FAULT_INJECTION is not set
+# CONFIG_DEBUG_PAGEALLOC is not set
+CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
+CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
+CONFIG_HAVE_DYNAMIC_FTRACE=y
+CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
+CONFIG_HAVE_C_RECORDMCOUNT=y
+CONFIG_TRACING_SUPPORT=y
+CONFIG_FTRACE=y
+# CONFIG_FUNCTION_TRACER is not set
+# CONFIG_IRQSOFF_TRACER is not set
+# CONFIG_PREEMPT_TRACER is not set
+# CONFIG_SCHED_TRACER is not set
+# CONFIG_ENABLE_DEFAULT_TRACERS is not set
+# CONFIG_TRACER_SNAPSHOT is not set
+CONFIG_BRANCH_PROFILE_NONE=y
+# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
+# CONFIG_PROFILE_ALL_BRANCHES is not set
+# CONFIG_STACK_TRACER is not set
+# CONFIG_BLK_DEV_IO_TRACE is not set
+# CONFIG_PROBE_EVENTS is not set
+# CONFIG_DMA_API_DEBUG is not set
+# CONFIG_ATOMIC64_SELFTEST is not set
+# CONFIG_ASYNC_RAID6_TEST is not set
+# CONFIG_SAMPLES is not set
+CONFIG_HAVE_ARCH_KGDB=y
+# CONFIG_KGDB is not set
+# CONFIG_TEST_STRING_HELPERS is not set
+# CONFIG_TEST_KSTRTOX is not set
+CONFIG_EARLY_PRINTK=y
+# CONFIG_CMDLINE_BOOL is not set
+# CONFIG_DEBUG_STACKOVERFLOW is not set
+# CONFIG_RUNTIME_DEBUG is not set
+# CONFIG_DEBUG_ZBOOT is not set
+
+#
+# Security options
+#
+# CONFIG_KEYS is not set
+# CONFIG_SECURITY_DMESG_RESTRICT is not set
+# CONFIG_SECURITY is not set
+# CONFIG_SECURITYFS is not set
+CONFIG_DEFAULT_SECURITY_DAC=y
+CONFIG_DEFAULT_SECURITY=""
+CONFIG_XOR_BLOCKS=y
+CONFIG_ASYNC_CORE=y
+CONFIG_ASYNC_MEMCPY=y
+CONFIG_ASYNC_XOR=y
+CONFIG_ASYNC_PQ=y
+CONFIG_ASYNC_RAID6_RECOV=y
+CONFIG_CRYPTO=y
+
+#
+# Crypto core or helper
+#
+CONFIG_CRYPTO_ALGAPI=y
+CONFIG_CRYPTO_ALGAPI2=y
+CONFIG_CRYPTO_AEAD=y
+CONFIG_CRYPTO_AEAD2=y
+CONFIG_CRYPTO_BLKCIPHER=y
+CONFIG_CRYPTO_BLKCIPHER2=y
+CONFIG_CRYPTO_HASH=y
+CONFIG_CRYPTO_HASH2=y
+CONFIG_CRYPTO_RNG2=y
+CONFIG_CRYPTO_PCOMP2=y
+CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_MANAGER2=y
+# CONFIG_CRYPTO_USER is not set
+CONFIG_CRYPTO_MANAGER_DISABLE_TESTS=y
+CONFIG_CRYPTO_GF128MUL=y
+CONFIG_CRYPTO_NULL=y
+# CONFIG_CRYPTO_PCRYPT is not set
+CONFIG_CRYPTO_WORKQUEUE=y
+CONFIG_CRYPTO_CRYPTD=y
+CONFIG_CRYPTO_AUTHENC=y
+
+#
+# Authenticated Encryption with Associated Data
+#
+# CONFIG_CRYPTO_CCM is not set
+# CONFIG_CRYPTO_GCM is not set
+# CONFIG_CRYPTO_SEQIV is not set
+
+#
+# Block modes
+#
+CONFIG_CRYPTO_CBC=y
+# CONFIG_CRYPTO_CTR is not set
+# CONFIG_CRYPTO_CTS is not set
+CONFIG_CRYPTO_ECB=y
+CONFIG_CRYPTO_LRW=y
+CONFIG_CRYPTO_PCBC=y
+# CONFIG_CRYPTO_XTS is not set
+
+#
+# Hash modes
+#
+# CONFIG_CRYPTO_CMAC is not set
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_XCBC=y
+# CONFIG_CRYPTO_VMAC is not set
+
+#
+# Digest
+#
+CONFIG_CRYPTO_CRC32C=y
+# CONFIG_CRYPTO_CRC32 is not set
+# CONFIG_CRYPTO_GHASH is not set
+CONFIG_CRYPTO_MD4=y
+CONFIG_CRYPTO_MD5=y
+CONFIG_CRYPTO_MICHAEL_MIC=y
+# CONFIG_CRYPTO_RMD128 is not set
+# CONFIG_CRYPTO_RMD160 is not set
+# CONFIG_CRYPTO_RMD256 is not set
+# CONFIG_CRYPTO_RMD320 is not set
+CONFIG_CRYPTO_SHA1=y
+CONFIG_CRYPTO_SHA256=y
+CONFIG_CRYPTO_SHA512=y
+CONFIG_CRYPTO_TGR192=y
+CONFIG_CRYPTO_WP512=y
+
+#
+# Ciphers
+#
+CONFIG_CRYPTO_AES=y
+CONFIG_CRYPTO_ANUBIS=y
+CONFIG_CRYPTO_ARC4=y
+CONFIG_CRYPTO_BLOWFISH=y
+CONFIG_CRYPTO_BLOWFISH_COMMON=y
+CONFIG_CRYPTO_CAMELLIA=y
+CONFIG_CRYPTO_CAST_COMMON=y
+CONFIG_CRYPTO_CAST5=y
+CONFIG_CRYPTO_CAST6=y
+CONFIG_CRYPTO_DES=y
+CONFIG_CRYPTO_FCRYPT=y
+CONFIG_CRYPTO_KHAZAD=y
+# CONFIG_CRYPTO_SALSA20 is not set
+# CONFIG_CRYPTO_SEED is not set
+CONFIG_CRYPTO_SERPENT=y
+CONFIG_CRYPTO_TEA=y
+CONFIG_CRYPTO_TWOFISH=y
+CONFIG_CRYPTO_TWOFISH_COMMON=y
+
+#
+# Compression
+#
+CONFIG_CRYPTO_DEFLATE=y
+# CONFIG_CRYPTO_ZLIB is not set
+# CONFIG_CRYPTO_LZO is not set
+
+#
+# Random Number Generation
+#
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+# CONFIG_CRYPTO_USER_API_HASH is not set
+# CONFIG_CRYPTO_USER_API_SKCIPHER is not set
+CONFIG_CRYPTO_HW=y
+# CONFIG_CRYPTO_DEV_HIFN_795X is not set
+# CONFIG_BINARY_PRINTF is not set
+
+#
+# Library routines
+#
+CONFIG_RAID6_PQ=y
+CONFIG_BITREVERSE=y
+CONFIG_NO_GENERIC_PCI_IOPORT_MAP=y
+CONFIG_GENERIC_PCI_IOMAP=y
+CONFIG_GENERIC_IO=y
+# CONFIG_CRC_CCITT is not set
+CONFIG_CRC16=y
+# CONFIG_CRC_T10DIF is not set
+CONFIG_CRC_ITU_T=y
+CONFIG_CRC32=y
+# CONFIG_CRC32_SELFTEST is not set
+CONFIG_CRC32_SLICEBY8=y
+# CONFIG_CRC32_SLICEBY4 is not set
+# CONFIG_CRC32_SARWATE is not set
+# CONFIG_CRC32_BIT is not set
+# CONFIG_CRC7 is not set
+CONFIG_LIBCRC32C=y
+# CONFIG_CRC8 is not set
+CONFIG_ZLIB_INFLATE=y
+CONFIG_ZLIB_DEFLATE=y
+# CONFIG_XZ_DEC is not set
+# CONFIG_XZ_DEC_BCJ is not set
+CONFIG_DECOMPRESS_GZIP=y
+CONFIG_GENERIC_ALLOCATOR=y
+CONFIG_REED_SOLOMON=y
+CONFIG_REED_SOLOMON_ENC8=y
+CONFIG_REED_SOLOMON_DEC8=y
+CONFIG_TEXTSEARCH=y
+CONFIG_TEXTSEARCH_KMP=y
+CONFIG_TEXTSEARCH_BM=y
+CONFIG_TEXTSEARCH_FSM=y
+CONFIG_HAS_IOMEM=y
+CONFIG_HAS_IOPORT=y
+CONFIG_HAS_DMA=y
+CONFIG_CPU_RMAP=y
+CONFIG_DQL=y
+CONFIG_NLATTR=y
+CONFIG_GENERIC_ATOMIC64=y
+CONFIG_ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE=y
+CONFIG_AVERAGE=y
+# CONFIG_CORDIC is not set
+# CONFIG_DDR is not set
+CONFIG_HAVE_KVM=y
+# CONFIG_VIRTUALIZATION is not set
diff --git a/arch/mips/configs/malta_defconfig b/arch/mips/configs/malta_defconfig
index ce1d3ee..58c98bee 100644
--- a/arch/mips/configs/malta_defconfig
+++ b/arch/mips/configs/malta_defconfig
@@ -226,6 +226,7 @@
CONFIG_MAC80211_MESH=y
CONFIG_RFKILL=m
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
CONFIG_CONNECTOR=m
CONFIG_MTD=y
CONFIG_MTD_CHAR=y
diff --git a/arch/mips/configs/malta_eva_defconfig b/arch/mips/configs/malta_eva_defconfig
new file mode 100644
index 0000000..0284888
--- /dev/null
+++ b/arch/mips/configs/malta_eva_defconfig
@@ -0,0 +1,447 @@
+CONFIG_MIPS_MALTA=y
+CONFIG_CPU_LITTLE_ENDIAN=y
+CONFIG_CPU_MIPS32_R2=y
+CONFIG_CPU_MIPS32_R2_EVA=y
+CONFIG_MIPS_MT_SMP=y
+CONFIG_NR_CPUS=2
+CONFIG_HZ_100=y
+CONFIG_SYSVIPC=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_LOG_BUF_SHIFT=15
+CONFIG_NAMESPACES=y
+CONFIG_RELAY=y
+CONFIG_EXPERT=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_SLAB=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SRCVERSION_ALL=y
+CONFIG_PCI=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=m
+CONFIG_NET_KEY=y
+CONFIG_NET_KEY_MIGRATE=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_NET_IPIP=m
+CONFIG_IP_MROUTE=y
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
+CONFIG_SYN_COOKIES=y
+CONFIG_INET_AH=m
+CONFIG_INET_ESP=m
+CONFIG_INET_IPCOMP=m
+CONFIG_INET_XFRM_MODE_TRANSPORT=m
+CONFIG_INET_XFRM_MODE_TUNNEL=m
+CONFIG_TCP_MD5SIG=y
+CONFIG_IPV6_PRIVACY=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_IPV6_OPTIMISTIC_DAD=y
+CONFIG_INET6_AH=m
+CONFIG_INET6_ESP=m
+CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_TUNNEL=m
+CONFIG_IPV6_MROUTE=y
+CONFIG_IPV6_PIMSM_V2=y
+CONFIG_NETWORK_SECMARK=y
+CONFIG_NETFILTER=y
+CONFIG_NF_CONNTRACK=m
+CONFIG_NF_CONNTRACK_SECMARK=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CT_PROTO_DCCP=m
+CONFIG_NF_CT_PROTO_UDPLITE=m
+CONFIG_NF_CONNTRACK_AMANDA=m
+CONFIG_NF_CONNTRACK_FTP=m
+CONFIG_NF_CONNTRACK_H323=m
+CONFIG_NF_CONNTRACK_IRC=m
+CONFIG_NF_CONNTRACK_PPTP=m
+CONFIG_NF_CONNTRACK_SANE=m
+CONFIG_NF_CONNTRACK_SIP=m
+CONFIG_NF_CONNTRACK_TFTP=m
+CONFIG_NF_CT_NETLINK=m
+CONFIG_NETFILTER_TPROXY=m
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
+CONFIG_NETFILTER_XT_TARGET_MARK=m
+CONFIG_NETFILTER_XT_TARGET_NFLOG=m
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
+CONFIG_NETFILTER_XT_TARGET_TPROXY=m
+CONFIG_NETFILTER_XT_TARGET_TRACE=m
+CONFIG_NETFILTER_XT_TARGET_SECMARK=m
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
+CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
+CONFIG_NETFILTER_XT_MATCH_COMMENT=m
+CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
+CONFIG_NETFILTER_XT_MATCH_DCCP=m
+CONFIG_NETFILTER_XT_MATCH_ESP=m
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
+CONFIG_NETFILTER_XT_MATCH_HELPER=m
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
+CONFIG_NETFILTER_XT_MATCH_LENGTH=m
+CONFIG_NETFILTER_XT_MATCH_LIMIT=m
+CONFIG_NETFILTER_XT_MATCH_MAC=m
+CONFIG_NETFILTER_XT_MATCH_MARK=m
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
+CONFIG_NETFILTER_XT_MATCH_OWNER=m
+CONFIG_NETFILTER_XT_MATCH_POLICY=m
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
+CONFIG_NETFILTER_XT_MATCH_QUOTA=m
+CONFIG_NETFILTER_XT_MATCH_RATEEST=m
+CONFIG_NETFILTER_XT_MATCH_REALM=m
+CONFIG_NETFILTER_XT_MATCH_RECENT=m
+CONFIG_NETFILTER_XT_MATCH_SOCKET=m
+CONFIG_NETFILTER_XT_MATCH_STATE=m
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
+CONFIG_NETFILTER_XT_MATCH_STRING=m
+CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
+CONFIG_NETFILTER_XT_MATCH_TIME=m
+CONFIG_NETFILTER_XT_MATCH_U32=m
+CONFIG_IP_VS=m
+CONFIG_IP_VS_IPV6=y
+CONFIG_IP_VS_PROTO_TCP=y
+CONFIG_IP_VS_PROTO_UDP=y
+CONFIG_IP_VS_PROTO_ESP=y
+CONFIG_IP_VS_PROTO_AH=y
+CONFIG_IP_VS_RR=m
+CONFIG_IP_VS_WRR=m
+CONFIG_IP_VS_LC=m
+CONFIG_IP_VS_WLC=m
+CONFIG_IP_VS_LBLC=m
+CONFIG_IP_VS_LBLCR=m
+CONFIG_IP_VS_DH=m
+CONFIG_IP_VS_SH=m
+CONFIG_IP_VS_SED=m
+CONFIG_IP_VS_NQ=m
+CONFIG_NF_CONNTRACK_IPV4=m
+CONFIG_IP_NF_IPTABLES=m
+CONFIG_IP_NF_MATCH_AH=m
+CONFIG_IP_NF_MATCH_ECN=m
+CONFIG_IP_NF_MATCH_TTL=m
+CONFIG_IP_NF_FILTER=m
+CONFIG_IP_NF_TARGET_REJECT=m
+CONFIG_IP_NF_TARGET_ULOG=m
+CONFIG_IP_NF_MANGLE=m
+CONFIG_IP_NF_TARGET_CLUSTERIP=m
+CONFIG_IP_NF_TARGET_ECN=m
+CONFIG_IP_NF_TARGET_TTL=m
+CONFIG_IP_NF_RAW=m
+CONFIG_IP_NF_ARPTABLES=m
+CONFIG_IP_NF_ARPFILTER=m
+CONFIG_IP_NF_ARP_MANGLE=m
+CONFIG_NF_CONNTRACK_IPV6=m
+CONFIG_IP6_NF_MATCH_AH=m
+CONFIG_IP6_NF_MATCH_EUI64=m
+CONFIG_IP6_NF_MATCH_FRAG=m
+CONFIG_IP6_NF_MATCH_OPTS=m
+CONFIG_IP6_NF_MATCH_HL=m
+CONFIG_IP6_NF_MATCH_IPV6HEADER=m
+CONFIG_IP6_NF_MATCH_MH=m
+CONFIG_IP6_NF_MATCH_RT=m
+CONFIG_IP6_NF_TARGET_HL=m
+CONFIG_IP6_NF_FILTER=m
+CONFIG_IP6_NF_TARGET_REJECT=m
+CONFIG_IP6_NF_MANGLE=m
+CONFIG_IP6_NF_RAW=m
+CONFIG_BRIDGE_NF_EBTABLES=m
+CONFIG_BRIDGE_EBT_BROUTE=m
+CONFIG_BRIDGE_EBT_T_FILTER=m
+CONFIG_BRIDGE_EBT_T_NAT=m
+CONFIG_BRIDGE_EBT_802_3=m
+CONFIG_BRIDGE_EBT_AMONG=m
+CONFIG_BRIDGE_EBT_ARP=m
+CONFIG_BRIDGE_EBT_IP=m
+CONFIG_BRIDGE_EBT_IP6=m
+CONFIG_BRIDGE_EBT_LIMIT=m
+CONFIG_BRIDGE_EBT_MARK=m
+CONFIG_BRIDGE_EBT_PKTTYPE=m
+CONFIG_BRIDGE_EBT_STP=m
+CONFIG_BRIDGE_EBT_VLAN=m
+CONFIG_BRIDGE_EBT_ARPREPLY=m
+CONFIG_BRIDGE_EBT_DNAT=m
+CONFIG_BRIDGE_EBT_MARK_T=m
+CONFIG_BRIDGE_EBT_REDIRECT=m
+CONFIG_BRIDGE_EBT_SNAT=m
+CONFIG_BRIDGE_EBT_LOG=m
+CONFIG_BRIDGE_EBT_ULOG=m
+CONFIG_BRIDGE_EBT_NFLOG=m
+CONFIG_IP_SCTP=m
+CONFIG_BRIDGE=m
+CONFIG_VLAN_8021Q=m
+CONFIG_VLAN_8021Q_GVRP=y
+CONFIG_ATALK=m
+CONFIG_DEV_APPLETALK=m
+CONFIG_IPDDP=m
+CONFIG_IPDDP_ENCAP=y
+CONFIG_PHONET=m
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_CBQ=m
+CONFIG_NET_SCH_HTB=m
+CONFIG_NET_SCH_HFSC=m
+CONFIG_NET_SCH_PRIO=m
+CONFIG_NET_SCH_RED=m
+CONFIG_NET_SCH_SFQ=m
+CONFIG_NET_SCH_TEQL=m
+CONFIG_NET_SCH_TBF=m
+CONFIG_NET_SCH_GRED=m
+CONFIG_NET_SCH_DSMARK=m
+CONFIG_NET_SCH_NETEM=m
+CONFIG_NET_SCH_INGRESS=m
+CONFIG_NET_CLS_BASIC=m
+CONFIG_NET_CLS_TCINDEX=m
+CONFIG_NET_CLS_ROUTE4=m
+CONFIG_NET_CLS_FW=m
+CONFIG_NET_CLS_U32=m
+CONFIG_NET_CLS_RSVP=m
+CONFIG_NET_CLS_RSVP6=m
+CONFIG_NET_CLS_FLOW=m
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_POLICE=y
+CONFIG_NET_ACT_GACT=m
+CONFIG_GACT_PROB=y
+CONFIG_NET_ACT_MIRRED=m
+CONFIG_NET_ACT_IPT=m
+CONFIG_NET_ACT_NAT=m
+CONFIG_NET_ACT_PEDIT=m
+CONFIG_NET_ACT_SIMP=m
+CONFIG_NET_ACT_SKBEDIT=m
+CONFIG_NET_CLS_IND=y
+CONFIG_CFG80211=m
+CONFIG_MAC80211=m
+CONFIG_MAC80211_RC_PID=y
+CONFIG_MAC80211_RC_DEFAULT_PID=y
+CONFIG_MAC80211_MESH=y
+CONFIG_RFKILL=m
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
+CONFIG_CONNECTOR=m
+CONFIG_MTD=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_OOPS=m
+CONFIG_MTD_CFI=y
+CONFIG_MTD_CFI_INTELEXT=y
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_MTD_CFI_STAA=y
+CONFIG_MTD_PHYSMAP=y
+CONFIG_MTD_UBI=m
+CONFIG_MTD_UBI_GLUEBI=m
+CONFIG_BLK_DEV_FD=m
+CONFIG_BLK_DEV_UMEM=m
+CONFIG_BLK_DEV_LOOP=m
+CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_BLK_DEV_NBD=m
+CONFIG_BLK_DEV_RAM=y
+CONFIG_CDROM_PKTCDVD=m
+CONFIG_ATA_OVER_ETH=m
+CONFIG_IDE=y
+CONFIG_BLK_DEV_IDECD=y
+CONFIG_IDE_GENERIC=y
+CONFIG_BLK_DEV_GENERIC=y
+CONFIG_BLK_DEV_PIIX=y
+CONFIG_BLK_DEV_IT8213=m
+CONFIG_BLK_DEV_TC86C001=m
+CONFIG_RAID_ATTRS=m
+CONFIG_SCSI=m
+CONFIG_SCSI_TGT=m
+CONFIG_BLK_DEV_SD=m
+CONFIG_CHR_DEV_ST=m
+CONFIG_CHR_DEV_OSST=m
+CONFIG_BLK_DEV_SR=m
+CONFIG_BLK_DEV_SR_VENDOR=y
+CONFIG_CHR_DEV_SG=m
+CONFIG_SCSI_MULTI_LUN=y
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_LOGGING=y
+CONFIG_SCSI_SCAN_ASYNC=y
+CONFIG_SCSI_FC_ATTRS=m
+CONFIG_ISCSI_TCP=m
+CONFIG_BLK_DEV_3W_XXXX_RAID=m
+CONFIG_SCSI_3W_9XXX=m
+CONFIG_SCSI_ACARD=m
+CONFIG_SCSI_AACRAID=m
+CONFIG_SCSI_AIC7XXX=m
+CONFIG_AIC7XXX_RESET_DELAY_MS=15000
+# CONFIG_AIC7XXX_DEBUG_ENABLE is not set
+CONFIG_MD=y
+CONFIG_BLK_DEV_MD=m
+CONFIG_MD_LINEAR=m
+CONFIG_MD_RAID0=m
+CONFIG_MD_RAID1=m
+CONFIG_MD_RAID10=m
+CONFIG_MD_RAID456=m
+CONFIG_MD_MULTIPATH=m
+CONFIG_MD_FAULTY=m
+CONFIG_BLK_DEV_DM=m
+CONFIG_DM_CRYPT=m
+CONFIG_DM_SNAPSHOT=m
+CONFIG_DM_MIRROR=m
+CONFIG_DM_ZERO=m
+CONFIG_DM_MULTIPATH=m
+CONFIG_NETDEVICES=y
+CONFIG_BONDING=m
+CONFIG_DUMMY=m
+CONFIG_EQUALIZER=m
+CONFIG_IFB=m
+CONFIG_MACVLAN=m
+CONFIG_TUN=m
+CONFIG_VETH=m
+# CONFIG_NET_VENDOR_3COM is not set
+CONFIG_PCNET32=y
+CONFIG_CHELSIO_T3=m
+CONFIG_AX88796=m
+CONFIG_NETXEN_NIC=m
+CONFIG_TC35815=m
+CONFIG_MARVELL_PHY=m
+CONFIG_DAVICOM_PHY=m
+CONFIG_QSEMI_PHY=m
+CONFIG_LXT_PHY=m
+CONFIG_CICADA_PHY=m
+CONFIG_VITESSE_PHY=m
+CONFIG_SMSC_PHY=m
+CONFIG_BROADCOM_PHY=m
+CONFIG_ICPLUS_PHY=m
+CONFIG_REALTEK_PHY=m
+CONFIG_ATMEL=m
+CONFIG_PCI_ATMEL=m
+CONFIG_PRISM54=m
+CONFIG_HOSTAP=m
+CONFIG_HOSTAP_FIRMWARE=y
+CONFIG_HOSTAP_FIRMWARE_NVRAM=y
+CONFIG_HOSTAP_PLX=m
+CONFIG_HOSTAP_PCI=m
+CONFIG_IPW2100=m
+CONFIG_IPW2100_MONITOR=y
+CONFIG_LIBERTAS=m
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO_I8042 is not set
+CONFIG_VT_HW_CONSOLE_BINDING=y
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+# CONFIG_HWMON is not set
+CONFIG_FB=y
+CONFIG_FB_CIRRUS=y
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_HID=m
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_CMOS=y
+CONFIG_UIO=m
+CONFIG_UIO_CIF=m
+CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
+CONFIG_REISERFS_FS=m
+CONFIG_REISERFS_PROC_INFO=y
+CONFIG_REISERFS_FS_XATTR=y
+CONFIG_REISERFS_FS_POSIX_ACL=y
+CONFIG_REISERFS_FS_SECURITY=y
+CONFIG_JFS_FS=m
+CONFIG_JFS_POSIX_ACL=y
+CONFIG_JFS_SECURITY=y
+CONFIG_XFS_FS=m
+CONFIG_XFS_QUOTA=y
+CONFIG_XFS_POSIX_ACL=y
+CONFIG_QUOTA=y
+CONFIG_QFMT_V2=y
+CONFIG_FUSE_FS=m
+CONFIG_ISO9660_FS=m
+CONFIG_JOLIET=y
+CONFIG_ZISOFS=y
+CONFIG_UDF_FS=m
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_AFFS_FS=m
+CONFIG_HFS_FS=m
+CONFIG_HFSPLUS_FS=m
+CONFIG_BEFS_FS=m
+CONFIG_BFS_FS=m
+CONFIG_EFS_FS=m
+CONFIG_JFFS2_FS=m
+CONFIG_JFFS2_FS_XATTR=y
+CONFIG_JFFS2_COMPRESSION_OPTIONS=y
+CONFIG_JFFS2_RUBIN=y
+CONFIG_CRAMFS=m
+CONFIG_VXFS_FS=m
+CONFIG_MINIX_FS=m
+CONFIG_ROMFS_FS=m
+CONFIG_SYSV_FS=m
+CONFIG_UFS_FS=m
+CONFIG_NFS_FS=y
+CONFIG_ROOT_NFS=y
+CONFIG_NFSD=y
+CONFIG_NFSD_V3=y
+CONFIG_NLS_CODEPAGE_437=m
+CONFIG_NLS_CODEPAGE_737=m
+CONFIG_NLS_CODEPAGE_775=m
+CONFIG_NLS_CODEPAGE_850=m
+CONFIG_NLS_CODEPAGE_852=m
+CONFIG_NLS_CODEPAGE_855=m
+CONFIG_NLS_CODEPAGE_857=m
+CONFIG_NLS_CODEPAGE_860=m
+CONFIG_NLS_CODEPAGE_861=m
+CONFIG_NLS_CODEPAGE_862=m
+CONFIG_NLS_CODEPAGE_863=m
+CONFIG_NLS_CODEPAGE_864=m
+CONFIG_NLS_CODEPAGE_865=m
+CONFIG_NLS_CODEPAGE_866=m
+CONFIG_NLS_CODEPAGE_869=m
+CONFIG_NLS_CODEPAGE_936=m
+CONFIG_NLS_CODEPAGE_950=m
+CONFIG_NLS_CODEPAGE_932=m
+CONFIG_NLS_CODEPAGE_949=m
+CONFIG_NLS_CODEPAGE_874=m
+CONFIG_NLS_ISO8859_8=m
+CONFIG_NLS_CODEPAGE_1250=m
+CONFIG_NLS_CODEPAGE_1251=m
+CONFIG_NLS_ASCII=m
+CONFIG_NLS_ISO8859_1=m
+CONFIG_NLS_ISO8859_2=m
+CONFIG_NLS_ISO8859_3=m
+CONFIG_NLS_ISO8859_4=m
+CONFIG_NLS_ISO8859_5=m
+CONFIG_NLS_ISO8859_6=m
+CONFIG_NLS_ISO8859_7=m
+CONFIG_NLS_ISO8859_9=m
+CONFIG_NLS_ISO8859_13=m
+CONFIG_NLS_ISO8859_14=m
+CONFIG_NLS_ISO8859_15=m
+CONFIG_NLS_KOI8_R=m
+CONFIG_NLS_KOI8_U=m
+CONFIG_CRYPTO_NULL=m
+CONFIG_CRYPTO_CRYPTD=m
+CONFIG_CRYPTO_LRW=m
+CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_XCBC=m
+CONFIG_CRYPTO_MD4=m
+CONFIG_CRYPTO_SHA256=m
+CONFIG_CRYPTO_SHA512=m
+CONFIG_CRYPTO_TGR192=m
+CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_CAMELLIA=m
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_FCRYPT=m
+CONFIG_CRYPTO_KHAZAD=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_TWOFISH=m
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRC16=m
diff --git a/arch/mips/configs/maltaaprp_defconfig b/arch/mips/configs/maltaaprp_defconfig
index 93057a7..f31d047 100644
--- a/arch/mips/configs/maltaaprp_defconfig
+++ b/arch/mips/configs/maltaaprp_defconfig
@@ -80,6 +80,7 @@
CONFIG_NET_ACT_POLICE=y
CONFIG_NET_CLS_IND=y
# CONFIG_WIRELESS is not set
+CONFIG_DEVTMPFS=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_CRYPTOLOOP=m
CONFIG_IDE=y
diff --git a/arch/mips/configs/maltasmtc_defconfig b/arch/mips/configs/maltasmtc_defconfig
index 4e54b75..86dd3b3e 100644
--- a/arch/mips/configs/maltasmtc_defconfig
+++ b/arch/mips/configs/maltasmtc_defconfig
@@ -81,6 +81,7 @@
CONFIG_NET_ACT_POLICE=y
CONFIG_NET_CLS_IND=y
# CONFIG_WIRELESS is not set
+CONFIG_DEVTMPFS=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_CRYPTOLOOP=m
CONFIG_IDE=y
diff --git a/arch/mips/configs/maltasmvp_defconfig b/arch/mips/configs/maltasmvp_defconfig
index 8a66602..063676e 100644
--- a/arch/mips/configs/maltasmvp_defconfig
+++ b/arch/mips/configs/maltasmvp_defconfig
@@ -58,7 +58,6 @@
CONFIG_DEV_APPLETALK=m
CONFIG_IPDDP=m
CONFIG_IPDDP_ENCAP=y
-CONFIG_IPDDP_DECAP=y
CONFIG_NET_SCHED=y
CONFIG_NET_SCH_CBQ=m
CONFIG_NET_SCH_HTB=m
@@ -83,6 +82,7 @@
CONFIG_NET_ACT_POLICE=y
CONFIG_NET_CLS_IND=y
# CONFIG_WIRELESS is not set
+CONFIG_DEVTMPFS=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_CRYPTOLOOP=m
CONFIG_IDE=y
diff --git a/arch/mips/configs/maltaup_defconfig b/arch/mips/configs/maltaup_defconfig
index 9868fc9..49121d3 100644
--- a/arch/mips/configs/maltaup_defconfig
+++ b/arch/mips/configs/maltaup_defconfig
@@ -79,6 +79,7 @@
CONFIG_NET_ACT_POLICE=y
CONFIG_NET_CLS_IND=y
# CONFIG_WIRELESS is not set
+CONFIG_DEVTMPFS=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_CRYPTOLOOP=m
CONFIG_IDE=y
diff --git a/arch/mips/configs/ranchu64_defconfig b/arch/mips/configs/ranchu64_defconfig
new file mode 100644
index 0000000..fb29b84
--- /dev/null
+++ b/arch/mips/configs/ranchu64_defconfig
@@ -0,0 +1,257 @@
+CONFIG_MIPS_RANCHU=y
+CONFIG_CPU_LITTLE_ENDIAN=y
+CONFIG_CPU_MIPS64_R6=y
+CONFIG_64BIT=y
+CONFIG_MIPS_INCOMPATIBLE_ARCH_EMULATION=y
+# CONFIG_COMPACTION is not set
+CONFIG_HZ_100=y
+CONFIG_PREEMPT=y
+# CONFIG_SECCOMP is not set
+CONFIG_MIPS_APPENDED_DTB=y
+CONFIG_SYSVIPC=y
+CONFIG_AUDIT=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_TINY_PREEMPT_RCU=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=16
+CONFIG_CGROUPS=y
+CONFIG_CGROUP_DEBUG=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_RESOURCE_COUNTERS=y
+CONFIG_CGROUP_SCHED=y
+CONFIG_RT_GROUP_SCHED=y
+CONFIG_NAMESPACES=y
+# CONFIG_UTS_NS is not set
+# CONFIG_IPC_NS is not set
+# CONFIG_PID_NS is not set
+# CONFIG_NET_NS is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_EMBEDDED=y
+CONFIG_SLAB=y
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_BINFMT_MISC=y
+CONFIG_PM_AUTOSLEEP=y
+CONFIG_PM_WAKELOCKS=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=y
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_NET_IPIP=y
+CONFIG_IP_MROUTE=y
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
+CONFIG_ARPD=y
+CONFIG_SYN_COOKIES=y
+CONFIG_INET_AH=y
+CONFIG_INET_ESP=y
+CONFIG_INET_IPCOMP=y
+# CONFIG_INET_LRO is not set
+CONFIG_IPV6_PRIVACY=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_IPV6_OPTIMISTIC_DAD=y
+CONFIG_INET6_AH=y
+CONFIG_INET6_ESP=y
+CONFIG_INET6_IPCOMP=y
+CONFIG_IPV6_MIP6=y
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_NETFILTER=y
+# CONFIG_BRIDGE_NETFILTER is not set
+CONFIG_NF_CONNTRACK=y
+CONFIG_NF_CONNTRACK_SECMARK=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CT_PROTO_DCCP=y
+CONFIG_NF_CT_PROTO_SCTP=y
+CONFIG_NF_CT_PROTO_UDPLITE=y
+CONFIG_NF_CONNTRACK_AMANDA=y
+CONFIG_NF_CONNTRACK_FTP=y
+CONFIG_NF_CONNTRACK_H323=y
+CONFIG_NF_CONNTRACK_IRC=y
+CONFIG_NF_CONNTRACK_NETBIOS_NS=y
+CONFIG_NF_CONNTRACK_PPTP=y
+CONFIG_NF_CONNTRACK_SANE=y
+CONFIG_NF_CONNTRACK_TFTP=y
+CONFIG_NF_CT_NETLINK=y
+CONFIG_NETFILTER_TPROXY=y
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
+CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y
+CONFIG_NETFILTER_XT_TARGET_MARK=y
+CONFIG_NETFILTER_XT_TARGET_NFLOG=y
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
+CONFIG_NETFILTER_XT_TARGET_TPROXY=y
+CONFIG_NETFILTER_XT_TARGET_TRACE=y
+CONFIG_NETFILTER_XT_TARGET_SECMARK=y
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
+CONFIG_NETFILTER_XT_MATCH_COMMENT=y
+CONFIG_NETFILTER_XT_MATCH_CONNBYTES=y
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_HELPER=y
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
+CONFIG_NETFILTER_XT_MATCH_LENGTH=y
+CONFIG_NETFILTER_XT_MATCH_LIMIT=y
+CONFIG_NETFILTER_XT_MATCH_MAC=y
+CONFIG_NETFILTER_XT_MATCH_MARK=y
+CONFIG_NETFILTER_XT_MATCH_POLICY=y
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
+CONFIG_NETFILTER_XT_MATCH_QTAGUID=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA2=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG=y
+CONFIG_NETFILTER_XT_MATCH_SOCKET=y
+CONFIG_NETFILTER_XT_MATCH_STATE=y
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=y
+CONFIG_NETFILTER_XT_MATCH_STRING=y
+CONFIG_NETFILTER_XT_MATCH_TIME=y
+CONFIG_NETFILTER_XT_MATCH_U32=y
+CONFIG_NF_CONNTRACK_IPV4=y
+CONFIG_IP_NF_IPTABLES=y
+CONFIG_IP_NF_MATCH_AH=y
+CONFIG_IP_NF_MATCH_ECN=y
+CONFIG_IP_NF_MATCH_TTL=y
+CONFIG_IP_NF_FILTER=y
+CONFIG_IP_NF_TARGET_REJECT=y
+CONFIG_IP_NF_TARGET_REJECT_SKERR=y
+CONFIG_NF_NAT_IPV4=y
+CONFIG_IP_NF_TARGET_MASQUERADE=y
+CONFIG_IP_NF_TARGET_NETMAP=y
+CONFIG_IP_NF_TARGET_REDIRECT=y
+CONFIG_IP_NF_MANGLE=y
+CONFIG_IP_NF_RAW=y
+CONFIG_IP_NF_SECURITY=y
+CONFIG_IP_NF_ARPTABLES=y
+CONFIG_IP_NF_ARPFILTER=y
+CONFIG_IP_NF_ARP_MANGLE=y
+CONFIG_NF_CONNTRACK_IPV6=y
+CONFIG_IP6_NF_IPTABLES=y
+CONFIG_IP6_NF_FILTER=y
+CONFIG_IP6_NF_TARGET_REJECT=y
+CONFIG_IP6_NF_TARGET_REJECT_SKERR=y
+CONFIG_IP6_NF_MANGLE=y
+CONFIG_IP6_NF_RAW=y
+CONFIG_NF_NAT_IPV6=y
+CONFIG_IP6_NF_TARGET_MASQUERADE=y
+CONFIG_IP6_NF_TARGET_NPT=y
+CONFIG_BRIDGE=y
+CONFIG_VLAN_8021Q=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_HTB=y
+CONFIG_NET_CLS_U32=y
+CONFIG_NET_EMATCH=y
+CONFIG_NET_EMATCH_U32=y
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_9P=y
+CONFIG_NET_9P_VIRTIO=y
+CONFIG_CONNECTOR=y
+CONFIG_MTD=y
+CONFIG_MTD_BLOCK=y
+CONFIG_PROC_DEVICETREE=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_NBD=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=8192
+CONFIG_VIRTIO_BLK=y
+CONFIG_MD=y
+CONFIG_BLK_DEV_DM=y
+CONFIG_DM_DEBUG=y
+CONFIG_DM_CRYPT=y
+CONFIG_DM_UEVENT=y
+CONFIG_DM_VERITY=y
+CONFIG_NETDEVICES=y
+CONFIG_TUN=y
+CONFIG_VIRTIO_NET=y
+CONFIG_SMC91X=y
+CONFIG_SMSC911X=y
+CONFIG_PPP=y
+CONFIG_PPP_BSDCOMP=y
+CONFIG_PPP_DEFLATE=y
+CONFIG_PPP_MPPE=y
+CONFIG_PPPOLAC=y
+CONFIG_PPPOPNS=y
+CONFIG_INPUT_EVDEV=y
+CONFIG_KEYBOARD_GOLDFISH_EVENTS=y
+# CONFIG_INPUT_MOUSE is not set
+CONFIG_INPUT_MISC=y
+# CONFIG_SERIO_I8042 is not set
+# CONFIG_SERIO_SERPORT is not set
+# CONFIG_LEGACY_PTYS is not set
+CONFIG_GOLDFISH_TTY=y
+# CONFIG_DEVMEM is not set
+# CONFIG_DEVKMEM is not set
+CONFIG_VIRTIO_CONSOLE=y
+CONFIG_POWER_SUPPLY=y
+CONFIG_BATTERY_GOLDFISH=y
+# CONFIG_HWMON is not set
+CONFIG_FB=y
+CONFIG_FB_MODE_HELPERS=y
+CONFIG_FB_TILEBLITTING=y
+CONFIG_FB_GOLDFISH=y
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_USB_OTG_WAKELOCK=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_G_ANDROID=y
+CONFIG_MMC=y
+CONFIG_SWITCH=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_GOLDFISH=y
+CONFIG_VIRTIO_BALLOON=y
+CONFIG_VIRTIO_MMIO=y
+CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y
+CONFIG_STAGING=y
+CONFIG_ANDROID=y
+CONFIG_ASHMEM=y
+CONFIG_ANDROID_LOW_MEMORY_KILLER=y
+CONFIG_SYNC=y
+CONFIG_GOLDFISH_AUDIO=y
+CONFIG_GOLDFISH=y
+CONFIG_GOLDFISH_PIPE=y
+CONFIG_ANDROID_BINDER_IPC=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_FUSE_FS=y
+CONFIG_FSCACHE=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS=y
+CONFIG_JFFS2_FS=y
+CONFIG_JFFS2_FS_XATTR=y
+CONFIG_NFSD=y
+CONFIG_NFSD_V3=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_FRAME_WARN=1024
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_SCHEDSTATS=y
+CONFIG_DEBUG_MEMORY_INIT=y
+CONFIG_FUNCTION_TRACER=y
+CONFIG_SCHED_TRACER=y
+CONFIG_BLK_DEV_IO_TRACE=y
+# CONFIG_EARLY_PRINTK is not set
+CONFIG_SECURITY=y
+CONFIG_SECURITY_NETWORK=y
+CONFIG_SECURITY_SELINUX=y
+CONFIG_CRYPTO_PCBC=y
+CONFIG_CRYPTO_SHA256=y
+CONFIG_CRYPTO_TWOFISH=y
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_XZ_DEC=y
diff --git a/arch/mips/configs/ranchu_defconfig b/arch/mips/configs/ranchu_defconfig
new file mode 100644
index 0000000..195ce8a
--- /dev/null
+++ b/arch/mips/configs/ranchu_defconfig
@@ -0,0 +1,254 @@
+CONFIG_MIPS_RANCHU=y
+CONFIG_CPU_LITTLE_ENDIAN=y
+CONFIG_HIGHMEM=y
+# CONFIG_COMPACTION is not set
+CONFIG_HZ_100=y
+CONFIG_PREEMPT=y
+# CONFIG_SECCOMP is not set
+CONFIG_MIPS_APPENDED_DTB=y
+CONFIG_SYSVIPC=y
+CONFIG_AUDIT=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_TINY_PREEMPT_RCU=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=16
+CONFIG_CGROUPS=y
+CONFIG_CGROUP_DEBUG=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_RESOURCE_COUNTERS=y
+CONFIG_CGROUP_SCHED=y
+CONFIG_RT_GROUP_SCHED=y
+CONFIG_NAMESPACES=y
+# CONFIG_UTS_NS is not set
+# CONFIG_IPC_NS is not set
+# CONFIG_PID_NS is not set
+# CONFIG_NET_NS is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_EMBEDDED=y
+CONFIG_SLAB=y
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_BINFMT_MISC=y
+CONFIG_PM_AUTOSLEEP=y
+CONFIG_PM_WAKELOCKS=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=y
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_NET_IPIP=y
+CONFIG_IP_MROUTE=y
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
+CONFIG_ARPD=y
+CONFIG_SYN_COOKIES=y
+CONFIG_INET_AH=y
+CONFIG_INET_ESP=y
+CONFIG_INET_IPCOMP=y
+# CONFIG_INET_LRO is not set
+CONFIG_IPV6_PRIVACY=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_IPV6_OPTIMISTIC_DAD=y
+CONFIG_INET6_AH=y
+CONFIG_INET6_ESP=y
+CONFIG_INET6_IPCOMP=y
+CONFIG_IPV6_MIP6=y
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_NETFILTER=y
+# CONFIG_BRIDGE_NETFILTER is not set
+CONFIG_NF_CONNTRACK=y
+CONFIG_NF_CONNTRACK_SECMARK=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CT_PROTO_DCCP=y
+CONFIG_NF_CT_PROTO_SCTP=y
+CONFIG_NF_CT_PROTO_UDPLITE=y
+CONFIG_NF_CONNTRACK_AMANDA=y
+CONFIG_NF_CONNTRACK_FTP=y
+CONFIG_NF_CONNTRACK_H323=y
+CONFIG_NF_CONNTRACK_IRC=y
+CONFIG_NF_CONNTRACK_NETBIOS_NS=y
+CONFIG_NF_CONNTRACK_PPTP=y
+CONFIG_NF_CONNTRACK_SANE=y
+CONFIG_NF_CONNTRACK_TFTP=y
+CONFIG_NF_CT_NETLINK=y
+CONFIG_NETFILTER_TPROXY=y
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
+CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y
+CONFIG_NETFILTER_XT_TARGET_MARK=y
+CONFIG_NETFILTER_XT_TARGET_NFLOG=y
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
+CONFIG_NETFILTER_XT_TARGET_TPROXY=y
+CONFIG_NETFILTER_XT_TARGET_TRACE=y
+CONFIG_NETFILTER_XT_TARGET_SECMARK=y
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
+CONFIG_NETFILTER_XT_MATCH_COMMENT=y
+CONFIG_NETFILTER_XT_MATCH_CONNBYTES=y
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_HELPER=y
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
+CONFIG_NETFILTER_XT_MATCH_LENGTH=y
+CONFIG_NETFILTER_XT_MATCH_LIMIT=y
+CONFIG_NETFILTER_XT_MATCH_MAC=y
+CONFIG_NETFILTER_XT_MATCH_MARK=y
+CONFIG_NETFILTER_XT_MATCH_POLICY=y
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
+CONFIG_NETFILTER_XT_MATCH_QTAGUID=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA2=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG=y
+CONFIG_NETFILTER_XT_MATCH_SOCKET=y
+CONFIG_NETFILTER_XT_MATCH_STATE=y
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=y
+CONFIG_NETFILTER_XT_MATCH_STRING=y
+CONFIG_NETFILTER_XT_MATCH_TIME=y
+CONFIG_NETFILTER_XT_MATCH_U32=y
+CONFIG_NF_CONNTRACK_IPV4=y
+CONFIG_IP_NF_IPTABLES=y
+CONFIG_IP_NF_MATCH_AH=y
+CONFIG_IP_NF_MATCH_ECN=y
+CONFIG_IP_NF_MATCH_TTL=y
+CONFIG_IP_NF_FILTER=y
+CONFIG_IP_NF_TARGET_REJECT=y
+CONFIG_IP_NF_TARGET_REJECT_SKERR=y
+CONFIG_NF_NAT_IPV4=y
+CONFIG_IP_NF_TARGET_MASQUERADE=y
+CONFIG_IP_NF_TARGET_NETMAP=y
+CONFIG_IP_NF_TARGET_REDIRECT=y
+CONFIG_IP_NF_MANGLE=y
+CONFIG_IP_NF_RAW=y
+CONFIG_IP_NF_SECURITY=y
+CONFIG_IP_NF_ARPTABLES=y
+CONFIG_IP_NF_ARPFILTER=y
+CONFIG_IP_NF_ARP_MANGLE=y
+CONFIG_NF_CONNTRACK_IPV6=y
+CONFIG_IP6_NF_IPTABLES=y
+CONFIG_IP6_NF_FILTER=y
+CONFIG_IP6_NF_TARGET_REJECT=y
+CONFIG_IP6_NF_TARGET_REJECT_SKERR=y
+CONFIG_IP6_NF_MANGLE=y
+CONFIG_IP6_NF_RAW=y
+CONFIG_NF_NAT_IPV6=y
+CONFIG_IP6_NF_TARGET_MASQUERADE=y
+CONFIG_IP6_NF_TARGET_NPT=y
+CONFIG_BRIDGE=y
+CONFIG_VLAN_8021Q=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_HTB=y
+CONFIG_NET_CLS_U32=y
+CONFIG_NET_EMATCH=y
+CONFIG_NET_EMATCH_U32=y
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_9P=y
+CONFIG_NET_9P_VIRTIO=y
+CONFIG_CONNECTOR=y
+CONFIG_MTD=y
+CONFIG_MTD_BLOCK=y
+CONFIG_PROC_DEVICETREE=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_NBD=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=8192
+CONFIG_VIRTIO_BLK=y
+CONFIG_MD=y
+CONFIG_BLK_DEV_DM=y
+CONFIG_DM_DEBUG=y
+CONFIG_DM_CRYPT=y
+CONFIG_DM_UEVENT=y
+CONFIG_DM_VERITY=y
+CONFIG_NETDEVICES=y
+CONFIG_TUN=y
+CONFIG_VIRTIO_NET=y
+CONFIG_SMC91X=y
+CONFIG_SMSC911X=y
+CONFIG_PPP=y
+CONFIG_PPP_BSDCOMP=y
+CONFIG_PPP_DEFLATE=y
+CONFIG_PPP_MPPE=y
+CONFIG_PPPOLAC=y
+CONFIG_PPPOPNS=y
+CONFIG_INPUT_EVDEV=y
+CONFIG_KEYBOARD_GOLDFISH_EVENTS=y
+# CONFIG_INPUT_MOUSE is not set
+CONFIG_INPUT_MISC=y
+# CONFIG_SERIO_I8042 is not set
+# CONFIG_SERIO_SERPORT is not set
+# CONFIG_LEGACY_PTYS is not set
+CONFIG_GOLDFISH_TTY=y
+# CONFIG_DEVMEM is not set
+# CONFIG_DEVKMEM is not set
+CONFIG_VIRTIO_CONSOLE=y
+CONFIG_POWER_SUPPLY=y
+CONFIG_BATTERY_GOLDFISH=y
+# CONFIG_HWMON is not set
+CONFIG_FB=y
+CONFIG_FB_MODE_HELPERS=y
+CONFIG_FB_TILEBLITTING=y
+CONFIG_FB_GOLDFISH=y
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_USB_OTG_WAKELOCK=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_G_ANDROID=y
+CONFIG_MMC=y
+CONFIG_SWITCH=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_GOLDFISH=y
+CONFIG_VIRTIO_BALLOON=y
+CONFIG_VIRTIO_MMIO=y
+CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y
+CONFIG_STAGING=y
+CONFIG_ANDROID=y
+CONFIG_ASHMEM=y
+CONFIG_ANDROID_LOW_MEMORY_KILLER=y
+CONFIG_SYNC=y
+CONFIG_GOLDFISH_AUDIO=y
+CONFIG_GOLDFISH=y
+CONFIG_GOLDFISH_PIPE=y
+CONFIG_ANDROID_BINDER_IPC=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_FUSE_FS=y
+CONFIG_FSCACHE=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS=y
+CONFIG_JFFS2_FS=y
+CONFIG_JFFS2_FS_XATTR=y
+CONFIG_NFSD=y
+CONFIG_NFSD_V3=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_SCHEDSTATS=y
+CONFIG_DEBUG_MEMORY_INIT=y
+CONFIG_FUNCTION_TRACER=y
+CONFIG_SCHED_TRACER=y
+CONFIG_BLK_DEV_IO_TRACE=y
+# CONFIG_EARLY_PRINTK is not set
+CONFIG_SECURITY=y
+CONFIG_SECURITY_NETWORK=y
+CONFIG_SECURITY_SELINUX=y
+CONFIG_CRYPTO_PCBC=y
+CONFIG_CRYPTO_SHA256=y
+CONFIG_CRYPTO_TWOFISH=y
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_XZ_DEC=y
diff --git a/arch/mips/goldfish/Makefile b/arch/mips/goldfish/Makefile
new file mode 100644
index 0000000..5ad7dd6
--- /dev/null
+++ b/arch/mips/goldfish/Makefile
@@ -0,0 +1,19 @@
+#
+# Makefile for Goldfish virtual platform
+#
+
+EXTRA_CFLAGS += -Wno-error
+
+obj-$(CONFIG_GOLDFISH) += goldfish-platform.o
+obj-$(CONFIG_GOLDFISH) += goldfish-interrupt.o
+obj-$(CONFIG_GOLDFISH) += goldfish-time.o
+obj-$(CONFIG_GOLDFISH) += pm.o
+
+obj-$(CONFIG_MIPS_GOLDFISH_SWITCH) += switch.o
+
+ifneq ($(CONFIG_MIPS_APPENDED_DTB), y)
+obj-$(CONFIG_MIPS_RANCHU) += ranchu.dtb.o
+
+$(obj)/%.dtb: $(obj)/%.dts
+ $(call if_changed,dtc)
+endif
diff --git a/arch/mips/goldfish/Platform b/arch/mips/goldfish/Platform
new file mode 100644
index 0000000..1dc5cb4
--- /dev/null
+++ b/arch/mips/goldfish/Platform
@@ -0,0 +1,8 @@
+#
+# Goldfish
+#
+platform-$(CONFIG_GOLDFISH) := goldfish/
+cflags-$(CONFIG_GOLDFISH) += -Iarch/mips/include/asm/mach-goldfish -Iarch/mips/goldfish/include
+load-$(CONFIG_GOLDFISH) += 0xffffffff80010000
+all-$(CONFIG_GOLDFISH) := $(COMPRESSION_FNAME).bin
+
diff --git a/arch/mips/goldfish/goldfish-interrupt.c b/arch/mips/goldfish/goldfish-interrupt.c
new file mode 100644
index 0000000..46d85a6
--- /dev/null
+++ b/arch/mips/goldfish/goldfish-interrupt.c
@@ -0,0 +1,153 @@
+/* arch/mips/mach-goldfish/goldfish-platform.c
+**
+** Copyright (C) 2007 Google, Inc.
+**
+** This software is licensed under the terms of the GNU General Public
+** License version 2, as published by the Free Software Foundation, and
+** may be copied, distributed, and modified under those terms.
+**
+** This program is distributed in the hope that it will be useful,
+** but WITHOUT ANY WARRANTY; without even the implied warranty of
+** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+** GNU General Public License for more details.
+**
+*/
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/irqchip.h>
+#include <linux/of.h>
+
+#include <mach/hardware.h>
+#include <mach/irq.h>
+#include <asm/io.h>
+#include <asm/irq_cpu.h>
+#include <asm/setup.h>
+
+#define GOLDFISH_INTERRUPT_STATUS 0x00 // number of pending interrupts
+#define GOLDFISH_INTERRUPT_NUMBER 0x04
+#define GOLDFISH_INTERRUPT_DISABLE_ALL 0x08
+#define GOLDFISH_INTERRUPT_DISABLE 0x0c
+#define GOLDFISH_INTERRUPT_ENABLE 0x10
+
+static void __iomem *goldfish_interrupt;
+
+void goldfish_mask_irq(struct irq_data *d)
+{
+ writel(d->irq-GOLDFISH_IRQ_BASE,
+ goldfish_interrupt + GOLDFISH_INTERRUPT_DISABLE);
+}
+
+void goldfish_unmask_irq(struct irq_data *d)
+{
+ writel(d->irq-GOLDFISH_IRQ_BASE,
+ goldfish_interrupt + GOLDFISH_INTERRUPT_ENABLE);
+}
+
+static struct irq_chip goldfish_irq_chip = {
+ .name = "goldfish",
+ .irq_mask = goldfish_mask_irq,
+ .irq_mask_ack = goldfish_mask_irq,
+ .irq_unmask = goldfish_unmask_irq,
+};
+
+void goldfish_init_irq(void)
+{
+ unsigned int i;
+ uint32_t base;
+ struct device_node *dn;
+
+ if ((dn = of_find_node_by_name(NULL, "goldfish_pic")) == NULL) {
+ panic("goldfish_init_irq() failed to "
+ "fetch device node \'goldfish-pic\'!\n");
+ }
+
+ if (of_property_read_u32(dn, "reg", &base) < 0) {
+ panic("goldfish_init_irq() failed to "
+ "fetch device base address property \'reg\'!\n");
+ }
+
+ goldfish_interrupt = IO_ADDRESS(base);
+
+ /*
+ * Disable all interrupt sources
+ */
+ writel(1, goldfish_interrupt + GOLDFISH_INTERRUPT_DISABLE_ALL);
+
+ for (i = GOLDFISH_IRQ_BASE; i < GOLDFISH_IRQ_BASE+32; i++) {
+ irq_set_chip(i, &goldfish_irq_chip);
+ irq_set_handler(i, handle_level_irq);
+#if 0
+ set_irq_flags(i, IRQF_VALID | IRQF_PROBE);
+#endif
+ }
+}
+
+void goldfish_irq_dispatch(void)
+{
+ uint32_t irq;
+ /*
+ * Disable all interrupt sources
+ */
+ irq = readl(goldfish_interrupt + GOLDFISH_INTERRUPT_NUMBER);
+ do_IRQ(GOLDFISH_IRQ_BASE+irq);
+}
+
+void goldfish_fiq_dispatch(void)
+{
+ panic("goldfish_fiq_dispatch");
+}
+
+asmlinkage void plat_irq_dispatch(void)
+{
+ unsigned int pending = read_c0_cause() & read_c0_status() & ST0_IM;
+
+ if (pending & CAUSEF_IP2)
+ goldfish_irq_dispatch();
+ else if (pending & CAUSEF_IP3)
+ goldfish_fiq_dispatch();
+ else if (pending & CAUSEF_IP7)
+ do_IRQ(MIPS_CPU_IRQ_BASE + 7);
+ else
+ spurious_interrupt();
+}
+
+static struct irqaction cascade = {
+ .handler = no_action,
+ .flags = IRQF_NO_THREAD,
+ .name = "cascade",
+};
+
+static void mips_timer_dispatch(void)
+{
+ do_IRQ(MIPS_CPU_IRQ_BASE + MIPS_CPU_IRQ_COMPARE);
+}
+
+void __init goldfish_pic_init(struct device_node *node, struct device_node *parent)
+{
+ mips_cpu_irq_init();
+ goldfish_init_irq();
+
+ if (cpu_has_vint) {
+ set_vi_handler(MIPS_CPU_IRQ_PIC, goldfish_irq_dispatch);
+ set_vi_handler(MIPS_CPU_IRQ_PIC, goldfish_fiq_dispatch);
+ }
+ setup_irq(MIPS_CPU_IRQ_BASE+MIPS_CPU_IRQ_PIC, &cascade);
+ setup_irq(MIPS_CPU_IRQ_BASE+MIPS_CPU_IRQ_FIQ, &cascade);
+
+ if (cpu_has_vint)
+ set_vi_handler(MIPS_CPU_IRQ_COMPARE, mips_timer_dispatch);
+}
+
+void __init arch_init_irq(void)
+{
+ irqchip_init();
+}
+
+static const struct of_device_id irqchip_of_match_goldfish_pic
+__used __section(__irqchip_of_table)
+= { .compatible = "generic,goldfish-pic", .data = goldfish_pic_init };
diff --git a/arch/mips/goldfish/goldfish-platform.c b/arch/mips/goldfish/goldfish-platform.c
new file mode 100644
index 0000000..286b0e3
--- /dev/null
+++ b/arch/mips/goldfish/goldfish-platform.c
@@ -0,0 +1,103 @@
+/* arch/mips/mach-goldfish/goldfish-platform.c
+**
+** Copyright (C) 2007 Google, Inc.
+**
+** This software is licensed under the terms of the GNU General Public
+** License version 2, as published by the Free Software Foundation, and
+** may be copied, distributed, and modified under those terms.
+**
+** This program is distributed in the hope that it will be useful,
+** but WITHOUT ANY WARRANTY; without even the implied warranty of
+** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+** GNU General Public License for more details.
+**
+*/
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/of_platform.h>
+#include <linux/of_fdt.h>
+#include <linux/bootmem.h>
+
+#include <mach/hardware.h>
+#include <mach/irq.h>
+#include <asm/io.h>
+#include <asm/bootinfo.h>
+#include <asm/mips-boards/generic.h>
+
+void __init prom_init(void)
+{
+ char *cmdline = (char *)fw_arg0;
+ strcpy(arcs_cmdline, cmdline);
+}
+
+void prom_free_prom_memory(void)
+{
+}
+
+#ifdef CONFIG_64BIT
+#define GOLDFISH_TTY_PUT_CHAR (*(volatile unsigned int *)0xffffffffbf002000)
+#else
+#define GOLDFISH_TTY_PUT_CHAR (*(volatile unsigned int *)0xbf002000)
+#endif
+
+void prom_putchar(int c)
+{
+ GOLDFISH_TTY_PUT_CHAR = c;
+}
+
+const char *get_system_type(void)
+{
+ return "MIPS-Goldfish";
+}
+
+void __init plat_mem_setup(void)
+{
+ /*
+ * Load the builtin or appended devicetree.
+ * This causes the chosen node to be
+ * parsed resulting in our memory appearing
+ */
+#ifdef CONFIG_MIPS_APPENDED_DTB
+ pr_info("Try setup using Appended DTB\n");
+ __dt_setup_arch(&__appended_dtb);
+#else
+ pr_info("Try setup using Built-in DTB\n");
+ __dt_setup_arch(&__dtb_start);
+#endif
+}
+
+void __init device_tree_init(void)
+{
+ unsigned long base, size;
+
+ if (!initial_boot_params)
+ return;
+
+ base = virt_to_phys((void *)initial_boot_params);
+ size = be32_to_cpu(initial_boot_params->totalsize);
+
+ /* Before we do anything, lets reserve the dt blob */
+ reserve_bootmem(base, size, BOOTMEM_DEFAULT);
+
+ unflatten_device_tree();
+}
+
+static struct of_device_id __initdata goldfish_ids[] = {
+ { .compatible = "simple-bus", },
+ {},
+};
+
+int __init plat_of_setup(void)
+{
+ if (!of_have_populated_dt())
+ panic("device tree not present");
+
+ return of_platform_populate(NULL, goldfish_ids, NULL, NULL);
+}
+
+arch_initcall(plat_of_setup);
diff --git a/arch/mips/goldfish/goldfish-time.c b/arch/mips/goldfish/goldfish-time.c
new file mode 100644
index 0000000..7ce637a
--- /dev/null
+++ b/arch/mips/goldfish/goldfish-time.c
@@ -0,0 +1,112 @@
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/export.h>
+#include <linux/of.h>
+
+#include <mach/hardware.h>
+#include <mach/irq.h>
+#include <asm/io.h>
+#include <asm/time.h>
+#include <asm/bootinfo.h>
+#include <asm/div64.h>
+
+#define GOLDFISH_TIMER_LOW 0x00
+#define GOLDFISH_TIMER_HIGH 0x04
+
+/*
+ * Estimate CPU frequency. Sets mips_hpt_frequency as a side-effect
+ */
+static unsigned int __init estimate_cpu_frequency(void)
+{
+ uint32_t base;
+ struct device_node *dn;
+ void __iomem *rtc_base;
+ unsigned int start, count;
+ unsigned long rtcstart, rtcdelta;
+ unsigned int prid = read_c0_prid() & 0xffff00;
+
+ if ((dn = of_find_node_by_name(NULL, "goldfish_timer")) == NULL) {
+ panic("goldfish_timer_init() failed to "
+ "fetch device node \'goldfish-timer\'!\n");
+ }
+
+ if (of_property_read_u32(dn, "reg", &base) < 0) {
+ panic("goldfish_timer_init() failed to "
+ "fetch device base address property \'reg\'!\n");
+ }
+
+ rtc_base = IO_ADDRESS(base);
+
+ /*
+ * poll the nanosecond resolution RTC for 1s
+ * to calibrate the CPU frequency
+ */
+ rtcstart = readl(rtc_base+GOLDFISH_TIMER_LOW);
+ start = read_c0_count();
+ do {
+ rtcdelta = readl(rtc_base+GOLDFISH_TIMER_LOW) - rtcstart;
+ } while (rtcdelta < 1000000000);
+ count = read_c0_count() - start;
+
+ mips_hpt_frequency = count;
+ if ((prid != (PRID_COMP_MIPS | PRID_IMP_20KC)) &&
+ (prid != (PRID_COMP_MIPS | PRID_IMP_25KF)))
+ count *= 2;
+
+ count += 5000; /* round */
+ count -= count%10000;
+
+ return count;
+}
+
+unsigned long cpu_khz;
+
+void plat_time_init(void)
+{
+ unsigned int est_freq;
+
+ est_freq = estimate_cpu_frequency();
+
+ printk(KERN_INFO "CPU frequency %d.%02d MHz\n", est_freq/1000000,
+ (est_freq%1000000)*100/1000000);
+
+ cpu_khz = est_freq / 1000;
+
+}
+
+/**
+ * save_time_delta - Save the offset between system time and RTC time
+ * @delta: pointer to timespec to store delta
+ * @rtc: pointer to timespec for current RTC time
+ *
+ * Return a delta between the system time and the RTC time, such
+ * that system time can be restored later with restore_time_delta()
+ */
+void save_time_delta(struct timespec *delta, struct timespec *rtc)
+{
+ set_normalized_timespec(delta,
+ __current_kernel_time().tv_sec - rtc->tv_sec,
+ __current_kernel_time().tv_nsec - rtc->tv_nsec);
+}
+EXPORT_SYMBOL(save_time_delta);
+
+/**
+ * restore_time_delta - Restore the current system time
+ * @delta: delta returned by save_time_delta()
+ * @rtc: pointer to timespec for current RTC time
+ */
+void restore_time_delta(struct timespec *delta, struct timespec *rtc)
+{
+ struct timespec ts;
+
+ set_normalized_timespec(&ts,
+ delta->tv_sec + rtc->tv_sec,
+ delta->tv_nsec + rtc->tv_nsec);
+
+ do_settimeofday(&ts);
+}
+EXPORT_SYMBOL(restore_time_delta);
diff --git a/arch/mips/goldfish/include/mach/hardware.h b/arch/mips/goldfish/include/mach/hardware.h
new file mode 100644
index 0000000..ce054f1
--- /dev/null
+++ b/arch/mips/goldfish/include/mach/hardware.h
@@ -0,0 +1,3 @@
+#ifdef CONFIG_GOLDFISH
+#include <asm/mach-goldfish/hardware.h>
+#endif
diff --git a/arch/mips/goldfish/include/mach/irq.h b/arch/mips/goldfish/include/mach/irq.h
new file mode 100644
index 0000000..10ffa67
--- /dev/null
+++ b/arch/mips/goldfish/include/mach/irq.h
@@ -0,0 +1,3 @@
+#ifdef CONFIG_GOLDFISH
+#include <asm/mach-goldfish/irq.h>
+#endif
diff --git a/arch/mips/goldfish/include/mach/time.h b/arch/mips/goldfish/include/mach/time.h
new file mode 100644
index 0000000..984ed3a
--- /dev/null
+++ b/arch/mips/goldfish/include/mach/time.h
@@ -0,0 +1,3 @@
+#ifdef CONFIG_GOLDFISH
+#include <asm/mach-goldfish/time.h>
+#endif
diff --git a/arch/mips/goldfish/pm.c b/arch/mips/goldfish/pm.c
new file mode 100644
index 0000000..745c173
--- /dev/null
+++ b/arch/mips/goldfish/pm.c
@@ -0,0 +1,45 @@
+/* arch/arm/mach-msm/pm.c
+ *
+ * Goldfish Power Management Routines
+ *
+ * Copyright (C) 2007 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/pm.h>
+#include <linux/suspend.h>
+
+extern void (*cpu_wait)(void);
+
+static int goldfish_pm_enter(suspend_state_t state)
+{
+ if (cpu_wait)
+ (*cpu_wait)();
+ return 0;
+}
+
+static struct platform_suspend_ops goldfish_pm_ops = {
+ .enter = goldfish_pm_enter,
+ .valid = suspend_valid_only_mem,
+};
+
+static int __init goldfish_pm_init(void)
+{
+ suspend_set_ops(&goldfish_pm_ops);
+ return 0;
+}
+
+device_initcall(goldfish_pm_init);
+
diff --git a/arch/mips/goldfish/ranchu.dts b/arch/mips/goldfish/ranchu.dts
new file mode 100644
index 0000000..0b618f1
--- /dev/null
+++ b/arch/mips/goldfish/ranchu.dts
@@ -0,0 +1,180 @@
+/dts-v1/;
+
+/ {
+ interrupt-parent = <0x8000>;
+ #size-cells = <0x1>;
+ #address-cells = <0x1>;
+ compatible = "mti,goldfish";
+ model = "ranchu";
+
+ virtio_mmio@1f011e00 {
+ interrupts = <0x27>;
+ reg = <0x1f011e00 0x200>;
+ compatible = "virtio,mmio";
+ };
+
+ virtio_mmio@1f011c00 {
+ interrupts = <0x26>;
+ reg = <0x1f011c00 0x200>;
+ compatible = "virtio,mmio";
+ };
+
+ virtio_mmio@1f011a00 {
+ interrupts = <0x25>;
+ reg = <0x1f011a00 0x200>;
+ compatible = "virtio,mmio";
+ };
+
+ virtio_mmio@1f011800 {
+ interrupts = <0x24>;
+ reg = <0x1f011800 0x200>;
+ compatible = "virtio,mmio";
+ };
+
+ virtio_mmio@1f011600 {
+ interrupts = <0x23>;
+ reg = <0x1f011600 0x200>;
+ compatible = "virtio,mmio";
+ };
+
+ virtio_mmio@1f011400 {
+ interrupts = <0x22>;
+ reg = <0x1f011400 0x200>;
+ compatible = "virtio,mmio";
+ };
+
+ virtio_mmio@1f011200 {
+ interrupts = <0x21>;
+ reg = <0x1f011200 0x200>;
+ compatible = "virtio,mmio";
+ };
+
+ virtio_mmio@1f011000 {
+ interrupts = <0x20>;
+ reg = <0x1f011000 0x200>;
+ compatible = "virtio,mmio";
+ };
+
+ virtio_mmio@1f010e00 {
+ interrupts = <0x1f>;
+ reg = <0x1f010e00 0x200>;
+ compatible = "virtio,mmio";
+ };
+
+ virtio_mmio@1f010c00 {
+ interrupts = <0x1e>;
+ reg = <0x1f010c00 0x200>;
+ compatible = "virtio,mmio";
+ };
+
+ virtio_mmio@1f010a00 {
+ interrupts = <0x1d>;
+ reg = <0x1f010a00 0x200>;
+ compatible = "virtio,mmio";
+ };
+
+ virtio_mmio@1f010800 {
+ interrupts = <0x1c>;
+ reg = <0x1f010800 0x200>;
+ compatible = "virtio,mmio";
+ };
+
+ virtio_mmio@1f010600 {
+ interrupts = <0x1b>;
+ reg = <0x1f010600 0x200>;
+ compatible = "virtio,mmio";
+ };
+
+ virtio_mmio@1f010400 {
+ interrupts = <0x1a>;
+ reg = <0x1f010400 0x200>;
+ compatible = "virtio,mmio";
+ };
+
+ virtio_mmio@1f010200 {
+ interrupts = <0x19>;
+ reg = <0x1f010200 0x200>;
+ compatible = "virtio,mmio";
+ };
+
+ virtio_mmio@1f010000 {
+ interrupts = <0x18>;
+ reg = <0x1f010000 0x200>;
+ compatible = "virtio,mmio";
+ };
+
+ goldfish_timer@1f005000 {
+ interrupts = <0xd>;
+ reg = <0x1f005000 0x1000>;
+ compatible = "generic,goldfish-timer";
+ };
+
+ goldfish_rtc@1f006000 {
+ interrupts = <0xe>;
+ reg = <0x1f006000 0x1000>;
+ compatible = "generic,goldfish-rtc";
+ };
+
+ goldfish_battery@1f007000 {
+ interrupts = <0xf>;
+ reg = <0x1f007000 0x1000>;
+ compatible = "generic,goldfish-battery";
+ };
+
+ goldfish_fb@1f008000 {
+ interrupts = <0x10>;
+ reg = <0x1f008000 0x100>;
+ compatible = "generic,goldfish-fb";
+ };
+
+ goldfish_events@1f009000 {
+ interrupts = <0x11>;
+ reg = <0x1f009000 0x1000>;
+ compatible = "generic,goldfish-events-keypad";
+ };
+
+ android_pipe@1f00a000 {
+ interrupts = <0x12>;
+ reg = <0x1f00a000 0x2000>;
+ compatible = "generic,android-pipe";
+ };
+
+ goldfish_tty@1f004000 {
+ interrupts = <0xc>;
+ reg = <0x1f004000 0x1000>;
+ compatible = "generic,goldfish-tty";
+ };
+
+ goldfish_tty@1f003000 {
+ interrupts = <0xb>;
+ reg = <0x1f003000 0x1000>;
+ compatible = "generic,goldfish-tty";
+ };
+
+ goldfish_tty@1f002000 {
+ interrupts = <0xa>;
+ reg = <0x1f002000 0x1000>;
+ compatible = "generic,goldfish-tty";
+ };
+
+ goldfish_pic@1f000000 {
+ #interrupt-cells = <0x1>;
+ phandle = <0x8000>;
+ interrupt-controller;
+ reg = <0x1f000000 0x1000>;
+ compatible = "generic,goldfish-pic";
+ };
+
+ memory {
+ reg = <0x0 0x1f000000>;
+ device_type = "memory";
+ };
+
+ cpus {
+
+ cpu@0 {
+ compatible = "mti,5KEf";
+ device_type = "cpu";
+ };
+ };
+};
diff --git a/arch/mips/goldfish/switch.c b/arch/mips/goldfish/switch.c
new file mode 100644
index 0000000..85bddfd
--- /dev/null
+++ b/arch/mips/goldfish/switch.c
@@ -0,0 +1,227 @@
+/* arch/mips/mach-goldfish/timer.c
+**
+** Copyright (C) 2007 Google, Inc.
+**
+** This software is licensed under the terms of the GNU General Public
+** License version 2, as published by the Free Software Foundation, and
+** may be copied, distributed, and modified under those terms.
+**
+** This program is distributed in the hope that it will be useful,
+** but WITHOUT ANY WARRANTY; without even the implied warranty of
+** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+** GNU General Public License for more details.
+**
+*/
+
+#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/slab.h>
+#include <linux/stat.h>
+#include <linux/platform_device.h>
+#include <asm/io.h>
+
+enum {
+ SW_NAME_LEN = 0x00,
+ SW_NAME_PTR = 0x04,
+ SW_FLAGS = 0x08,
+ SW_STATE = 0x0c,
+ SW_INT_STATUS = 0x10,
+ SW_INT_ENABLE = 0x14,
+
+ SW_FLAGS_OUTPUT = 1U << 0
+};
+
+static struct class *goldfish_switch_class;
+
+struct goldfish_switch {
+ void __iomem *base;
+ int irq;
+ uint32_t state;
+ uint32_t flags;
+ struct device *cdev;
+ struct work_struct work;
+ char name[0];
+};
+
+static irqreturn_t
+goldfish_switch_interrupt(int irq, void *dev_id)
+{
+ struct goldfish_switch *qs = dev_id;
+ uint32_t status;
+
+ status = readl(qs->base + SW_INT_STATUS);
+ if (status) {
+ qs->state = readl(qs->base + SW_STATE);
+ schedule_work(&qs->work);
+ }
+
+ return status ? IRQ_HANDLED : IRQ_NONE;
+}
+
+static ssize_t goldfish_switch_state_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct goldfish_switch *qs = dev_get_drvdata(dev);
+ uint32_t state;
+
+ if (!(qs->flags & SW_FLAGS_OUTPUT))
+ return -EPERM;
+
+ if (sscanf(buf, "%d", &state) != 1)
+ return -EINVAL;
+
+ writel(state, qs->base + SW_STATE);
+ qs->state = readl(qs->base + SW_STATE);
+ if (state != qs->state)
+ return -EINVAL;
+
+ return count;
+}
+
+static ssize_t goldfish_switch_state_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct goldfish_switch *qs = dev_get_drvdata(dev);
+ return sprintf(buf, "%d\n", qs->state);
+}
+
+static ssize_t goldfish_switch_direction_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct goldfish_switch *qs = dev_get_drvdata(dev);
+ return sprintf(buf, "%s\n", (qs->flags & SW_FLAGS_OUTPUT) ? "output" : "input");
+}
+
+
+static DEVICE_ATTR(state, S_IRUGO | S_IWUSR, goldfish_switch_state_show, goldfish_switch_state_store);
+static DEVICE_ATTR(direction, S_IRUGO, goldfish_switch_direction_show, NULL);
+
+void goldfish_switch_work(struct work_struct *work)
+{
+#if 0 /* TODO: use some other update notification */
+ struct goldfish_switch *qs = container_of(work, struct goldfish_switch, work);
+ int ret;
+ ret = sysfs_update_file(&qs->cdev->kobj, &class_device_attr_state.attr);
+#endif
+}
+
+static int goldfish_switch_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct resource *r;
+ struct goldfish_switch *qs;
+ void __iomem *base;
+ uint32_t name_len;
+ unsigned long swdev_addr;
+ unsigned long swdev_len;
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (r == NULL) {
+ ret = -ENODEV;
+ goto err_no_io_base;
+ }
+ swdev_addr = r->start;
+ swdev_len = resource_size(r);
+ base = ioremap(swdev_addr, swdev_len);
+ name_len = readl(base + SW_NAME_LEN);
+
+ qs = kzalloc(sizeof(*qs) + name_len + 1, GFP_KERNEL);
+ if (qs == NULL) {
+ ret = -ENOMEM;
+ goto err_qs_alloc_failed;
+ }
+ platform_set_drvdata(pdev, qs);
+ qs->base = base;
+ r = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (r == NULL) {
+ ret = -ENODEV;
+ goto err_no_irq;
+ }
+ qs->irq = r->start;
+
+ writel((u32)(qs->name), base + SW_NAME_PTR);
+ qs->name[name_len] = '\0';
+ writel(0, base + SW_INT_ENABLE);
+
+ qs->flags = readl(base + SW_FLAGS);
+ qs->state = readl(base + SW_STATE);
+ INIT_WORK(&qs->work, goldfish_switch_work);
+
+ qs->cdev = device_create(goldfish_switch_class, &pdev->dev, 0,
+ NULL, "%s", qs->name);
+ if (unlikely(IS_ERR(qs->cdev))) {
+ ret = PTR_ERR(qs->cdev);
+ goto err_device_create_failed;
+ }
+ dev_set_drvdata(qs->cdev, qs);
+
+ ret = device_create_file(qs->cdev, &dev_attr_state);
+ if (ret)
+ goto err_device_create_file_failed;
+
+ ret = device_create_file(qs->cdev, &dev_attr_direction);
+ if (ret)
+ goto err_device_create_file_failed;
+
+ ret = request_irq(qs->irq, goldfish_switch_interrupt, IRQF_SHARED, "goldfish_switch", qs);
+ if (ret)
+ goto err_request_irq_failed;
+ writel(1, base + SW_INT_ENABLE);
+
+ return 0;
+
+
+// free_irq(qs->irq, qs);
+err_request_irq_failed:
+err_device_create_file_failed:
+ device_unregister(qs->cdev);
+err_device_create_failed:
+err_no_irq:
+ kfree(qs);
+err_qs_alloc_failed:
+err_no_io_base:
+ printk(KERN_ERR "goldfish_switch_probe failed %d\n", ret);
+ return ret;
+}
+
+static int goldfish_switch_remove(struct platform_device *pdev)
+{
+ struct goldfish_switch *qs = platform_get_drvdata(pdev);
+ writel(0, qs->base + SW_INT_ENABLE);
+ free_irq(qs->irq, qs);
+ device_unregister(qs->cdev);
+ kfree(qs);
+ return 0;
+}
+
+static struct platform_driver goldfish_switch_driver = {
+ .probe = goldfish_switch_probe,
+ .remove = goldfish_switch_remove,
+ .driver = {
+ .name = "goldfish-switch"
+ }
+};
+
+static int __init goldfish_switch_init(void)
+{
+ goldfish_switch_class = class_create(THIS_MODULE, "switch");
+ if (IS_ERR(goldfish_switch_class))
+ return PTR_ERR(goldfish_switch_class);
+ return platform_driver_register(&goldfish_switch_driver);
+}
+
+static void goldfish_switch_exit(void)
+{
+ platform_driver_unregister(&goldfish_switch_driver);
+ class_destroy(goldfish_switch_class);
+}
+
+module_init(goldfish_switch_init);
+module_exit(goldfish_switch_exit);
+
diff --git a/arch/mips/include/asm/Kbuild b/arch/mips/include/asm/Kbuild
index 9b54b7a..ae1b5f4 100644
--- a/arch/mips/include/asm/Kbuild
+++ b/arch/mips/include/asm/Kbuild
@@ -1,2 +1,15 @@
# MIPS headers
+generic-y += cputime.h
+generic-y += current.h
+generic-y += emergency-restart.h
+generic-y += local64.h
+generic-y += mutex.h
+generic-y += parport.h
+generic-y += percpu.h
+generic-y += scatterlist.h
+generic-y += sections.h
+generic-y += segment.h
+generic-y += serial.h
generic-y += trace_clock.h
+generic-y += preempt.h
+generic-y += xor.h
diff --git a/arch/mips/include/asm/addrspace.h b/arch/mips/include/asm/addrspace.h
index 13d61c0..ba0925c 100644
--- a/arch/mips/include/asm/addrspace.h
+++ b/arch/mips/include/asm/addrspace.h
@@ -6,6 +6,7 @@
* Copyright (C) 1996, 99 Ralf Baechle
* Copyright (C) 2000, 2002 Maciej W. Rozycki
* Copyright (C) 1990, 1999 by Silicon Graphics, Inc.
+ * Copyright (C) 2015 Imagination Technologies
*/
#ifndef _ASM_ADDRSPACE_H
#define _ASM_ADDRSPACE_H
@@ -94,6 +95,7 @@
* Memory segments (32bit kernel mode addresses)
* These are the traditional names used in the 32-bit universe.
*/
+#ifndef KSEG
#define KUSEG 0x00000000
#define KSEG0 0x80000000
#define KSEG1 0xa0000000
@@ -105,6 +107,7 @@
#define CKSEG1 0xa0000000
#define CKSEG2 0xc0000000
#define CKSEG3 0xe0000000
+#endif
#endif
@@ -128,6 +131,13 @@
#define XKPHYS_TO_PHYS(p) ((p) & TO_PHYS_MASK)
#define PHYS_TO_XKPHYS(cm, a) (_CONST64_(0x8000000000000000) | \
(_CONST64_(cm) << 59) | (a))
+#ifndef CAC_BASE
+#ifndef __ASSEMBLY__
+extern unsigned int mips_cca;
+#define CAC_BASE (_CONST64_(0x8000000000000000) | \
+ (_ACAST64_(mips_cca) << 59))
+#endif
+#endif
/*
* The ultimate limited of the 64-bit MIPS architecture: 2 bits for selecting
diff --git a/arch/mips/include/asm/asm.h b/arch/mips/include/asm/asm.h
index 879691d..04f1350 100644
--- a/arch/mips/include/asm/asm.h
+++ b/arch/mips/include/asm/asm.h
@@ -143,69 +143,50 @@
*/
#ifdef CONFIG_CPU_HAS_PREFETCH
-#define PREF(hint,addr) \
+#ifdef CONFIG_CPU_MIPSR6
+
+#define PREF(hint,addr) \
+ .set push; \
+ .set mips64r6; \
+ pref hint, addr; \
+ .set pop
+
+
+#define PREFX(hint, addr)
+
+#else /* !CONFIG_CPU_MIPSR6 */
+
+#define PREF(hint,addr) \
.set push; \
.set mips4; \
pref hint, addr; \
.set pop
-#define PREFX(hint,addr) \
+#ifdef CONFIG_EVA
+#define PREFE(hint,addr) \
+ .set push; \
+ .set mips4; \
+ .set eva; \
+ prefe hint, addr; \
+ .set pop
+#endif
+
+#define PREFX(hint,addr) \
.set push; \
.set mips4; \
prefx hint, addr; \
.set pop
+#endif /* CONFIG_CPU_MIPSR6 */
#else /* !CONFIG_CPU_HAS_PREFETCH */
#define PREF(hint, addr)
+#define PREFE(hint, addr)
#define PREFX(hint, addr)
#endif /* !CONFIG_CPU_HAS_PREFETCH */
/*
- * MIPS ISA IV/V movn/movz instructions and equivalents for older CPUs.
- */
-#if (_MIPS_ISA == _MIPS_ISA_MIPS1)
-#define MOVN(rd, rs, rt) \
- .set push; \
- .set reorder; \
- beqz rt, 9f; \
- move rd, rs; \
- .set pop; \
-9:
-#define MOVZ(rd, rs, rt) \
- .set push; \
- .set reorder; \
- bnez rt, 9f; \
- move rd, rs; \
- .set pop; \
-9:
-#endif /* _MIPS_ISA == _MIPS_ISA_MIPS1 */
-#if (_MIPS_ISA == _MIPS_ISA_MIPS2) || (_MIPS_ISA == _MIPS_ISA_MIPS3)
-#define MOVN(rd, rs, rt) \
- .set push; \
- .set noreorder; \
- bnezl rt, 9f; \
- move rd, rs; \
- .set pop; \
-9:
-#define MOVZ(rd, rs, rt) \
- .set push; \
- .set noreorder; \
- beqzl rt, 9f; \
- move rd, rs; \
- .set pop; \
-9:
-#endif /* (_MIPS_ISA == _MIPS_ISA_MIPS2) || (_MIPS_ISA == _MIPS_ISA_MIPS3) */
-#if (_MIPS_ISA == _MIPS_ISA_MIPS4 ) || (_MIPS_ISA == _MIPS_ISA_MIPS5) || \
- (_MIPS_ISA == _MIPS_ISA_MIPS32) || (_MIPS_ISA == _MIPS_ISA_MIPS64)
-#define MOVN(rd, rs, rt) \
- movn rd, rs, rt
-#define MOVZ(rd, rs, rt) \
- movz rd, rs, rt
-#endif /* MIPS IV, MIPS V, MIPS32 or MIPS64 */
-
-/*
* Stack alignment
*/
#if (_MIPS_SIM == _MIPS_SIM_ABI32)
@@ -253,8 +234,10 @@
#if (_MIPS_SZINT == 32)
#define INT_ADD add
#define INT_ADDU addu
-#define INT_ADDI addi
-#define INT_ADDIU addiu
+#ifndef CONFIG_CPU_MIPSR6
+#define INT_ADDI addi
+#endif
+#define INT_ADDIU addiu
#define INT_SUB sub
#define INT_SUBU subu
#define INT_L lw
@@ -270,7 +253,9 @@
#if (_MIPS_SZINT == 64)
#define INT_ADD dadd
#define INT_ADDU daddu
-#define INT_ADDI daddi
+#ifndef CONFIG_CPU_MIPSR6
+#define INT_ADDI daddi
+#endif
#define INT_ADDIU daddiu
#define INT_SUB dsub
#define INT_SUBU dsubu
@@ -290,7 +275,9 @@
#if (_MIPS_SZLONG == 32)
#define LONG_ADD add
#define LONG_ADDU addu
+#ifndef CONFIG_CPU_MIPSR6
#define LONG_ADDI addi
+#endif
#define LONG_ADDIU addiu
#define LONG_SUB sub
#define LONG_SUBU subu
@@ -313,7 +300,9 @@
#if (_MIPS_SZLONG == 64)
#define LONG_ADD dadd
#define LONG_ADDU daddu
+#ifndef CONFIG_CPU_MIPSR6
#define LONG_ADDI daddi
+#endif
#define LONG_ADDIU daddiu
#define LONG_SUB dsub
#define LONG_SUBU dsubu
@@ -339,7 +328,9 @@
#if (_MIPS_SZPTR == 32)
#define PTR_ADD add
#define PTR_ADDU addu
+#ifndef CONFIG_CPU_MIPSR6
#define PTR_ADDI addi
+#endif
#define PTR_ADDIU addiu
#define PTR_SUB sub
#define PTR_SUBU subu
@@ -364,7 +355,9 @@
#if (_MIPS_SZPTR == 64)
#define PTR_ADD dadd
#define PTR_ADDU daddu
+#ifndef CONFIG_CPU_MIPSR6
#define PTR_ADDI daddi
+#endif
#define PTR_ADDIU daddiu
#define PTR_SUB dsub
#define PTR_SUBU dsubu
diff --git a/arch/mips/include/asm/asmmacro-32.h b/arch/mips/include/asm/asmmacro-32.h
index 2413afe..f2cd420 100644
--- a/arch/mips/include/asm/asmmacro-32.h
+++ b/arch/mips/include/asm/asmmacro-32.h
@@ -12,122 +12,6 @@
#include <asm/fpregdef.h>
#include <asm/mipsregs.h>
- .macro fpu_save_double thread status tmp1=t0
- cfc1 \tmp1, fcr31
- sdc1 $f0, THREAD_FPR0(\thread)
- sdc1 $f2, THREAD_FPR2(\thread)
- sdc1 $f4, THREAD_FPR4(\thread)
- sdc1 $f6, THREAD_FPR6(\thread)
- sdc1 $f8, THREAD_FPR8(\thread)
- sdc1 $f10, THREAD_FPR10(\thread)
- sdc1 $f12, THREAD_FPR12(\thread)
- sdc1 $f14, THREAD_FPR14(\thread)
- sdc1 $f16, THREAD_FPR16(\thread)
- sdc1 $f18, THREAD_FPR18(\thread)
- sdc1 $f20, THREAD_FPR20(\thread)
- sdc1 $f22, THREAD_FPR22(\thread)
- sdc1 $f24, THREAD_FPR24(\thread)
- sdc1 $f26, THREAD_FPR26(\thread)
- sdc1 $f28, THREAD_FPR28(\thread)
- sdc1 $f30, THREAD_FPR30(\thread)
- sw \tmp1, THREAD_FCR31(\thread)
- .endm
-
- .macro fpu_save_single thread tmp=t0
- cfc1 \tmp, fcr31
- swc1 $f0, THREAD_FPR0(\thread)
- swc1 $f1, THREAD_FPR1(\thread)
- swc1 $f2, THREAD_FPR2(\thread)
- swc1 $f3, THREAD_FPR3(\thread)
- swc1 $f4, THREAD_FPR4(\thread)
- swc1 $f5, THREAD_FPR5(\thread)
- swc1 $f6, THREAD_FPR6(\thread)
- swc1 $f7, THREAD_FPR7(\thread)
- swc1 $f8, THREAD_FPR8(\thread)
- swc1 $f9, THREAD_FPR9(\thread)
- swc1 $f10, THREAD_FPR10(\thread)
- swc1 $f11, THREAD_FPR11(\thread)
- swc1 $f12, THREAD_FPR12(\thread)
- swc1 $f13, THREAD_FPR13(\thread)
- swc1 $f14, THREAD_FPR14(\thread)
- swc1 $f15, THREAD_FPR15(\thread)
- swc1 $f16, THREAD_FPR16(\thread)
- swc1 $f17, THREAD_FPR17(\thread)
- swc1 $f18, THREAD_FPR18(\thread)
- swc1 $f19, THREAD_FPR19(\thread)
- swc1 $f20, THREAD_FPR20(\thread)
- swc1 $f21, THREAD_FPR21(\thread)
- swc1 $f22, THREAD_FPR22(\thread)
- swc1 $f23, THREAD_FPR23(\thread)
- swc1 $f24, THREAD_FPR24(\thread)
- swc1 $f25, THREAD_FPR25(\thread)
- swc1 $f26, THREAD_FPR26(\thread)
- swc1 $f27, THREAD_FPR27(\thread)
- swc1 $f28, THREAD_FPR28(\thread)
- swc1 $f29, THREAD_FPR29(\thread)
- swc1 $f30, THREAD_FPR30(\thread)
- swc1 $f31, THREAD_FPR31(\thread)
- sw \tmp, THREAD_FCR31(\thread)
- .endm
-
- .macro fpu_restore_double thread status tmp=t0
- lw \tmp, THREAD_FCR31(\thread)
- ldc1 $f0, THREAD_FPR0(\thread)
- ldc1 $f2, THREAD_FPR2(\thread)
- ldc1 $f4, THREAD_FPR4(\thread)
- ldc1 $f6, THREAD_FPR6(\thread)
- ldc1 $f8, THREAD_FPR8(\thread)
- ldc1 $f10, THREAD_FPR10(\thread)
- ldc1 $f12, THREAD_FPR12(\thread)
- ldc1 $f14, THREAD_FPR14(\thread)
- ldc1 $f16, THREAD_FPR16(\thread)
- ldc1 $f18, THREAD_FPR18(\thread)
- ldc1 $f20, THREAD_FPR20(\thread)
- ldc1 $f22, THREAD_FPR22(\thread)
- ldc1 $f24, THREAD_FPR24(\thread)
- ldc1 $f26, THREAD_FPR26(\thread)
- ldc1 $f28, THREAD_FPR28(\thread)
- ldc1 $f30, THREAD_FPR30(\thread)
- ctc1 \tmp, fcr31
- .endm
-
- .macro fpu_restore_single thread tmp=t0
- lw \tmp, THREAD_FCR31(\thread)
- lwc1 $f0, THREAD_FPR0(\thread)
- lwc1 $f1, THREAD_FPR1(\thread)
- lwc1 $f2, THREAD_FPR2(\thread)
- lwc1 $f3, THREAD_FPR3(\thread)
- lwc1 $f4, THREAD_FPR4(\thread)
- lwc1 $f5, THREAD_FPR5(\thread)
- lwc1 $f6, THREAD_FPR6(\thread)
- lwc1 $f7, THREAD_FPR7(\thread)
- lwc1 $f8, THREAD_FPR8(\thread)
- lwc1 $f9, THREAD_FPR9(\thread)
- lwc1 $f10, THREAD_FPR10(\thread)
- lwc1 $f11, THREAD_FPR11(\thread)
- lwc1 $f12, THREAD_FPR12(\thread)
- lwc1 $f13, THREAD_FPR13(\thread)
- lwc1 $f14, THREAD_FPR14(\thread)
- lwc1 $f15, THREAD_FPR15(\thread)
- lwc1 $f16, THREAD_FPR16(\thread)
- lwc1 $f17, THREAD_FPR17(\thread)
- lwc1 $f18, THREAD_FPR18(\thread)
- lwc1 $f19, THREAD_FPR19(\thread)
- lwc1 $f20, THREAD_FPR20(\thread)
- lwc1 $f21, THREAD_FPR21(\thread)
- lwc1 $f22, THREAD_FPR22(\thread)
- lwc1 $f23, THREAD_FPR23(\thread)
- lwc1 $f24, THREAD_FPR24(\thread)
- lwc1 $f25, THREAD_FPR25(\thread)
- lwc1 $f26, THREAD_FPR26(\thread)
- lwc1 $f27, THREAD_FPR27(\thread)
- lwc1 $f28, THREAD_FPR28(\thread)
- lwc1 $f29, THREAD_FPR29(\thread)
- lwc1 $f30, THREAD_FPR30(\thread)
- lwc1 $f31, THREAD_FPR31(\thread)
- ctc1 \tmp, fcr31
- .endm
-
.macro cpu_save_nonscratch thread
LONG_S s0, THREAD_REG16(\thread)
LONG_S s1, THREAD_REG17(\thread)
@@ -155,4 +39,274 @@
LONG_L ra, THREAD_REG31(\thread)
.endm
+/* preprocessor replaces the fp in ".set fp=64" with $30 otherwise */
+#undef fp
+
+ .macro fpu_get_fcr31 thread status tmp
+#ifdef CONFIG_CPU_MIPSR6
+ lw \status, THREAD_FCR31(\thread)
+ lui \tmp, (FPU_CSR_COND0|FPU_CSR_COND1|FPU_CSR_COND2|FPU_CSR_COND3| \
+ FPU_CSR_COND4|FPU_CSR_COND5|FPU_CSR_COND6|FPU_CSR_COND7)>>16
+ and \tmp, \status, \tmp
+ cfc1 \status, fcr31
+ or \tmp, \status, \tmp
+#else
+ cfc1 \tmp, fcr31
+#endif
+ .endm
+
+#if defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS32_R6)
+
+ /* copy stuff from MIPS64 */
+
+ .macro fpu_save_16even thread tmp=t0
+ .set push
+ SET_HARDFLOAT
+ sdc1 $f0, THREAD_FPR0_LS64(\thread)
+ sdc1 $f2, THREAD_FPR2_LS64(\thread)
+ sdc1 $f4, THREAD_FPR4_LS64(\thread)
+ sdc1 $f6, THREAD_FPR6_LS64(\thread)
+ sdc1 $f8, THREAD_FPR8_LS64(\thread)
+ sdc1 $f10, THREAD_FPR10_LS64(\thread)
+ sdc1 $f12, THREAD_FPR12_LS64(\thread)
+ sdc1 $f14, THREAD_FPR14_LS64(\thread)
+ sdc1 $f16, THREAD_FPR16_LS64(\thread)
+ sdc1 $f18, THREAD_FPR18_LS64(\thread)
+ sdc1 $f20, THREAD_FPR20_LS64(\thread)
+ sdc1 $f22, THREAD_FPR22_LS64(\thread)
+ sdc1 $f24, THREAD_FPR24_LS64(\thread)
+ sdc1 $f26, THREAD_FPR26_LS64(\thread)
+ sdc1 $f28, THREAD_FPR28_LS64(\thread)
+ sdc1 $f30, THREAD_FPR30_LS64(\thread)
+ .set pop
+ .endm
+
+ .macro fpu_save_16odd thread
+ .set push
+ SET_HARDFLOAT
+ .set fp=64
+ sdc1 $f1, THREAD_FPR1_LS64(\thread)
+ sdc1 $f3, THREAD_FPR3_LS64(\thread)
+ sdc1 $f5, THREAD_FPR5_LS64(\thread)
+ sdc1 $f7, THREAD_FPR7_LS64(\thread)
+ sdc1 $f9, THREAD_FPR9_LS64(\thread)
+ sdc1 $f11, THREAD_FPR11_LS64(\thread)
+ sdc1 $f13, THREAD_FPR13_LS64(\thread)
+ sdc1 $f15, THREAD_FPR15_LS64(\thread)
+ sdc1 $f17, THREAD_FPR17_LS64(\thread)
+ sdc1 $f19, THREAD_FPR19_LS64(\thread)
+ sdc1 $f21, THREAD_FPR21_LS64(\thread)
+ sdc1 $f23, THREAD_FPR23_LS64(\thread)
+ sdc1 $f25, THREAD_FPR25_LS64(\thread)
+ sdc1 $f27, THREAD_FPR27_LS64(\thread)
+ sdc1 $f29, THREAD_FPR29_LS64(\thread)
+ sdc1 $f31, THREAD_FPR31_LS64(\thread)
+ .set pop
+ .endm
+
+ .macro fpu_save_double thread status tmp
+ .set push
+ .set noreorder
+ SET_HARDFLOAT
+ sll \tmp, \status, 31 - _ST0_FR
+ bgez \tmp, 2f
+ nop
+ fpu_save_16odd \thread
+2:
+ fpu_save_16even \thread \tmp
+ fpu_get_fcr31 \thread \status \tmp
+ sw \tmp, THREAD_FCR31(\thread)
+ .set pop
+ .endm
+
+ .macro fpu_restore_16even thread tmp=t0
+ .set push
+ SET_HARDFLOAT
+ lw \tmp, THREAD_FCR31(\thread)
+ ldc1 $f0, THREAD_FPR0_LS64(\thread)
+ ldc1 $f2, THREAD_FPR2_LS64(\thread)
+ ldc1 $f4, THREAD_FPR4_LS64(\thread)
+ ldc1 $f6, THREAD_FPR6_LS64(\thread)
+ ldc1 $f8, THREAD_FPR8_LS64(\thread)
+ ldc1 $f10, THREAD_FPR10_LS64(\thread)
+ ldc1 $f12, THREAD_FPR12_LS64(\thread)
+ ldc1 $f14, THREAD_FPR14_LS64(\thread)
+ ldc1 $f16, THREAD_FPR16_LS64(\thread)
+ ldc1 $f18, THREAD_FPR18_LS64(\thread)
+ ldc1 $f20, THREAD_FPR20_LS64(\thread)
+ ldc1 $f22, THREAD_FPR22_LS64(\thread)
+ ldc1 $f24, THREAD_FPR24_LS64(\thread)
+ ldc1 $f26, THREAD_FPR26_LS64(\thread)
+ ldc1 $f28, THREAD_FPR28_LS64(\thread)
+ ldc1 $f30, THREAD_FPR30_LS64(\thread)
+ ctc1 \tmp, fcr31
+ .set pop
+ .endm
+
+ .macro fpu_restore_16odd thread
+ .set push
+ SET_HARDFLOAT
+ .set fp=64
+ ldc1 $f1, THREAD_FPR1_LS64(\thread)
+ ldc1 $f3, THREAD_FPR3_LS64(\thread)
+ ldc1 $f5, THREAD_FPR5_LS64(\thread)
+ ldc1 $f7, THREAD_FPR7_LS64(\thread)
+ ldc1 $f9, THREAD_FPR9_LS64(\thread)
+ ldc1 $f11, THREAD_FPR11_LS64(\thread)
+ ldc1 $f13, THREAD_FPR13_LS64(\thread)
+ ldc1 $f15, THREAD_FPR15_LS64(\thread)
+ ldc1 $f17, THREAD_FPR17_LS64(\thread)
+ ldc1 $f19, THREAD_FPR19_LS64(\thread)
+ ldc1 $f21, THREAD_FPR21_LS64(\thread)
+ ldc1 $f23, THREAD_FPR23_LS64(\thread)
+ ldc1 $f25, THREAD_FPR25_LS64(\thread)
+ ldc1 $f27, THREAD_FPR27_LS64(\thread)
+ ldc1 $f29, THREAD_FPR29_LS64(\thread)
+ ldc1 $f31, THREAD_FPR31_LS64(\thread)
+ .set pop
+ .endm
+
+ .macro fpu_restore_double thread status tmp
+ .set push
+ .set noreorder
+ sll \tmp, \status, 31 - _ST0_FR
+ bgez \tmp, 1f # 16 register mode?
+ nop
+
+ fpu_restore_16odd \thread
+1: fpu_restore_16even \thread \tmp
+ .set pop
+ .endm
+
+#else
+
+ .macro fpu_save_double thread status tmp1=t0
+ .set push
+ SET_HARDFLOAT
+ fpu_get_fcr31 \thread \status \tmp1
+ sdc1 $f0, THREAD_FPR0_LS64(\thread)
+ sdc1 $f2, THREAD_FPR2_LS64(\thread)
+ sdc1 $f4, THREAD_FPR4_LS64(\thread)
+ sdc1 $f6, THREAD_FPR6_LS64(\thread)
+ sdc1 $f8, THREAD_FPR8_LS64(\thread)
+ sdc1 $f10, THREAD_FPR10_LS64(\thread)
+ sdc1 $f12, THREAD_FPR12_LS64(\thread)
+ sdc1 $f14, THREAD_FPR14_LS64(\thread)
+ sdc1 $f16, THREAD_FPR16_LS64(\thread)
+ sdc1 $f18, THREAD_FPR18_LS64(\thread)
+ sdc1 $f20, THREAD_FPR20_LS64(\thread)
+ sdc1 $f22, THREAD_FPR22_LS64(\thread)
+ sdc1 $f24, THREAD_FPR24_LS64(\thread)
+ sdc1 $f26, THREAD_FPR26_LS64(\thread)
+ sdc1 $f28, THREAD_FPR28_LS64(\thread)
+ sdc1 $f30, THREAD_FPR30_LS64(\thread)
+ sw \tmp1, THREAD_FCR31(\thread)
+ .set pop
+ .endm
+
+ .macro fpu_save_single thread tmp=t0
+ .set push
+ SET_HARDFLOAT
+ cfc1 \tmp, fcr31
+ swc1 $f0, THREAD_FPR0_LS64(\thread)
+ swc1 $f1, THREAD_FPR1_LS64(\thread)
+ swc1 $f2, THREAD_FPR2_LS64(\thread)
+ swc1 $f3, THREAD_FPR3_LS64(\thread)
+ swc1 $f4, THREAD_FPR4_LS64(\thread)
+ swc1 $f5, THREAD_FPR5_LS64(\thread)
+ swc1 $f6, THREAD_FPR6_LS64(\thread)
+ swc1 $f7, THREAD_FPR7_LS64(\thread)
+ swc1 $f8, THREAD_FPR8_LS64(\thread)
+ swc1 $f9, THREAD_FPR9_LS64(\thread)
+ swc1 $f10, THREAD_FPR10_LS64(\thread)
+ swc1 $f11, THREAD_FPR11_LS64(\thread)
+ swc1 $f12, THREAD_FPR12_LS64(\thread)
+ swc1 $f13, THREAD_FPR13_LS64(\thread)
+ swc1 $f14, THREAD_FPR14_LS64(\thread)
+ swc1 $f15, THREAD_FPR15_LS64(\thread)
+ swc1 $f16, THREAD_FPR16_LS64(\thread)
+ swc1 $f17, THREAD_FPR17_LS64(\thread)
+ swc1 $f18, THREAD_FPR18_LS64(\thread)
+ swc1 $f19, THREAD_FPR19_LS64(\thread)
+ swc1 $f20, THREAD_FPR20_LS64(\thread)
+ swc1 $f21, THREAD_FPR21_LS64(\thread)
+ swc1 $f22, THREAD_FPR22_LS64(\thread)
+ swc1 $f23, THREAD_FPR23_LS64(\thread)
+ swc1 $f24, THREAD_FPR24_LS64(\thread)
+ swc1 $f25, THREAD_FPR25_LS64(\thread)
+ swc1 $f26, THREAD_FPR26_LS64(\thread)
+ swc1 $f27, THREAD_FPR27_LS64(\thread)
+ swc1 $f28, THREAD_FPR28_LS64(\thread)
+ swc1 $f29, THREAD_FPR29_LS64(\thread)
+ swc1 $f30, THREAD_FPR30_LS64(\thread)
+ swc1 $f31, THREAD_FPR31_LS64(\thread)
+ sw \tmp, THREAD_FCR31(\thread)
+ .set pop
+ .endm
+
+ .macro fpu_restore_double thread status tmp=t0
+ .set push
+ SET_HARDFLOAT
+ lw \tmp, THREAD_FCR31(\thread)
+ ldc1 $f0, THREAD_FPR0_LS64(\thread)
+ ldc1 $f2, THREAD_FPR2_LS64(\thread)
+ ldc1 $f4, THREAD_FPR4_LS64(\thread)
+ ldc1 $f6, THREAD_FPR6_LS64(\thread)
+ ldc1 $f8, THREAD_FPR8_LS64(\thread)
+ ldc1 $f10, THREAD_FPR10_LS64(\thread)
+ ldc1 $f12, THREAD_FPR12_LS64(\thread)
+ ldc1 $f14, THREAD_FPR14_LS64(\thread)
+ ldc1 $f16, THREAD_FPR16_LS64(\thread)
+ ldc1 $f18, THREAD_FPR18_LS64(\thread)
+ ldc1 $f20, THREAD_FPR20_LS64(\thread)
+ ldc1 $f22, THREAD_FPR22_LS64(\thread)
+ ldc1 $f24, THREAD_FPR24_LS64(\thread)
+ ldc1 $f26, THREAD_FPR26_LS64(\thread)
+ ldc1 $f28, THREAD_FPR28_LS64(\thread)
+ ldc1 $f30, THREAD_FPR30_LS64(\thread)
+ ctc1 \tmp, fcr31
+ .set pop
+ .endm
+
+ .macro fpu_restore_single thread tmp=t0
+ .set push
+ SET_HARDFLOAT
+ lw \tmp, THREAD_FCR31(\thread)
+ lwc1 $f0, THREAD_FPR0_LS64(\thread)
+ lwc1 $f1, THREAD_FPR1_LS64(\thread)
+ lwc1 $f2, THREAD_FPR2_LS64(\thread)
+ lwc1 $f3, THREAD_FPR3_LS64(\thread)
+ lwc1 $f4, THREAD_FPR4_LS64(\thread)
+ lwc1 $f5, THREAD_FPR5_LS64(\thread)
+ lwc1 $f6, THREAD_FPR6_LS64(\thread)
+ lwc1 $f7, THREAD_FPR7_LS64(\thread)
+ lwc1 $f8, THREAD_FPR8_LS64(\thread)
+ lwc1 $f9, THREAD_FPR9_LS64(\thread)
+ lwc1 $f10, THREAD_FPR10_LS64(\thread)
+ lwc1 $f11, THREAD_FPR11_LS64(\thread)
+ lwc1 $f12, THREAD_FPR12_LS64(\thread)
+ lwc1 $f13, THREAD_FPR13_LS64(\thread)
+ lwc1 $f14, THREAD_FPR14_LS64(\thread)
+ lwc1 $f15, THREAD_FPR15_LS64(\thread)
+ lwc1 $f16, THREAD_FPR16_LS64(\thread)
+ lwc1 $f17, THREAD_FPR17_LS64(\thread)
+ lwc1 $f18, THREAD_FPR18_LS64(\thread)
+ lwc1 $f19, THREAD_FPR19_LS64(\thread)
+ lwc1 $f20, THREAD_FPR20_LS64(\thread)
+ lwc1 $f21, THREAD_FPR21_LS64(\thread)
+ lwc1 $f22, THREAD_FPR22_LS64(\thread)
+ lwc1 $f23, THREAD_FPR23_LS64(\thread)
+ lwc1 $f24, THREAD_FPR24_LS64(\thread)
+ lwc1 $f25, THREAD_FPR25_LS64(\thread)
+ lwc1 $f26, THREAD_FPR26_LS64(\thread)
+ lwc1 $f27, THREAD_FPR27_LS64(\thread)
+ lwc1 $f28, THREAD_FPR28_LS64(\thread)
+ lwc1 $f29, THREAD_FPR29_LS64(\thread)
+ lwc1 $f30, THREAD_FPR30_LS64(\thread)
+ lwc1 $f31, THREAD_FPR31_LS64(\thread)
+ ctc1 \tmp, fcr31
+ .set pop
+ .endm
+
+#endif // CONFIG_CPU_MIPS32_R2
#endif /* _ASM_ASMMACRO_32_H */
diff --git a/arch/mips/include/asm/asmmacro-64.h b/arch/mips/include/asm/asmmacro-64.h
index 08a527d..1f6d2a5 100644
--- a/arch/mips/include/asm/asmmacro-64.h
+++ b/arch/mips/include/asm/asmmacro-64.h
@@ -14,95 +14,123 @@
#include <asm/mipsregs.h>
.macro fpu_save_16even thread tmp=t0
- cfc1 \tmp, fcr31
- sdc1 $f0, THREAD_FPR0(\thread)
- sdc1 $f2, THREAD_FPR2(\thread)
- sdc1 $f4, THREAD_FPR4(\thread)
- sdc1 $f6, THREAD_FPR6(\thread)
- sdc1 $f8, THREAD_FPR8(\thread)
- sdc1 $f10, THREAD_FPR10(\thread)
- sdc1 $f12, THREAD_FPR12(\thread)
- sdc1 $f14, THREAD_FPR14(\thread)
- sdc1 $f16, THREAD_FPR16(\thread)
- sdc1 $f18, THREAD_FPR18(\thread)
- sdc1 $f20, THREAD_FPR20(\thread)
- sdc1 $f22, THREAD_FPR22(\thread)
- sdc1 $f24, THREAD_FPR24(\thread)
- sdc1 $f26, THREAD_FPR26(\thread)
- sdc1 $f28, THREAD_FPR28(\thread)
- sdc1 $f30, THREAD_FPR30(\thread)
- sw \tmp, THREAD_FCR31(\thread)
+ .set push
+ SET_HARDFLOAT
+ sdc1 $f0, THREAD_FPR0_LS64(\thread)
+ sdc1 $f2, THREAD_FPR2_LS64(\thread)
+ sdc1 $f4, THREAD_FPR4_LS64(\thread)
+ sdc1 $f6, THREAD_FPR6_LS64(\thread)
+ sdc1 $f8, THREAD_FPR8_LS64(\thread)
+ sdc1 $f10, THREAD_FPR10_LS64(\thread)
+ sdc1 $f12, THREAD_FPR12_LS64(\thread)
+ sdc1 $f14, THREAD_FPR14_LS64(\thread)
+ sdc1 $f16, THREAD_FPR16_LS64(\thread)
+ sdc1 $f18, THREAD_FPR18_LS64(\thread)
+ sdc1 $f20, THREAD_FPR20_LS64(\thread)
+ sdc1 $f22, THREAD_FPR22_LS64(\thread)
+ sdc1 $f24, THREAD_FPR24_LS64(\thread)
+ sdc1 $f26, THREAD_FPR26_LS64(\thread)
+ sdc1 $f28, THREAD_FPR28_LS64(\thread)
+ sdc1 $f30, THREAD_FPR30_LS64(\thread)
+ .set pop
.endm
.macro fpu_save_16odd thread
- sdc1 $f1, THREAD_FPR1(\thread)
- sdc1 $f3, THREAD_FPR3(\thread)
- sdc1 $f5, THREAD_FPR5(\thread)
- sdc1 $f7, THREAD_FPR7(\thread)
- sdc1 $f9, THREAD_FPR9(\thread)
- sdc1 $f11, THREAD_FPR11(\thread)
- sdc1 $f13, THREAD_FPR13(\thread)
- sdc1 $f15, THREAD_FPR15(\thread)
- sdc1 $f17, THREAD_FPR17(\thread)
- sdc1 $f19, THREAD_FPR19(\thread)
- sdc1 $f21, THREAD_FPR21(\thread)
- sdc1 $f23, THREAD_FPR23(\thread)
- sdc1 $f25, THREAD_FPR25(\thread)
- sdc1 $f27, THREAD_FPR27(\thread)
- sdc1 $f29, THREAD_FPR29(\thread)
- sdc1 $f31, THREAD_FPR31(\thread)
+ .set push
+ SET_HARDFLOAT
+ sdc1 $f1, THREAD_FPR1_LS64(\thread)
+ sdc1 $f3, THREAD_FPR3_LS64(\thread)
+ sdc1 $f5, THREAD_FPR5_LS64(\thread)
+ sdc1 $f7, THREAD_FPR7_LS64(\thread)
+ sdc1 $f9, THREAD_FPR9_LS64(\thread)
+ sdc1 $f11, THREAD_FPR11_LS64(\thread)
+ sdc1 $f13, THREAD_FPR13_LS64(\thread)
+ sdc1 $f15, THREAD_FPR15_LS64(\thread)
+ sdc1 $f17, THREAD_FPR17_LS64(\thread)
+ sdc1 $f19, THREAD_FPR19_LS64(\thread)
+ sdc1 $f21, THREAD_FPR21_LS64(\thread)
+ sdc1 $f23, THREAD_FPR23_LS64(\thread)
+ sdc1 $f25, THREAD_FPR25_LS64(\thread)
+ sdc1 $f27, THREAD_FPR27_LS64(\thread)
+ sdc1 $f29, THREAD_FPR29_LS64(\thread)
+ sdc1 $f31, THREAD_FPR31_LS64(\thread)
+ .set pop
+ .endm
+
+ .macro fpu_get_fcr31 thread status tmp
+#ifdef CONFIG_CPU_MIPSR6
+ lw \status, THREAD_FCR31(\thread)
+ lui \tmp, (FPU_CSR_COND0|FPU_CSR_COND1|FPU_CSR_COND2|FPU_CSR_COND3| \
+ FPU_CSR_COND4|FPU_CSR_COND5|FPU_CSR_COND6|FPU_CSR_COND7)>>16
+ and \tmp, \status, \tmp
+ cfc1 \status, fcr31
+ or \tmp, \status, \tmp
+#else
+ cfc1 \tmp, fcr31
+#endif
.endm
.macro fpu_save_double thread status tmp
- sll \tmp, \status, 5
+ .set push
+ SET_HARDFLOAT
+ sll \tmp, \status, 31 - _ST0_FR
bgez \tmp, 2f
fpu_save_16odd \thread
2:
fpu_save_16even \thread \tmp
+ fpu_get_fcr31 \thread \status \tmp
+ sw \tmp, THREAD_FCR31(\thread)
+ .set pop
.endm
.macro fpu_restore_16even thread tmp=t0
+ .set push
+ SET_HARDFLOAT
lw \tmp, THREAD_FCR31(\thread)
- ldc1 $f0, THREAD_FPR0(\thread)
- ldc1 $f2, THREAD_FPR2(\thread)
- ldc1 $f4, THREAD_FPR4(\thread)
- ldc1 $f6, THREAD_FPR6(\thread)
- ldc1 $f8, THREAD_FPR8(\thread)
- ldc1 $f10, THREAD_FPR10(\thread)
- ldc1 $f12, THREAD_FPR12(\thread)
- ldc1 $f14, THREAD_FPR14(\thread)
- ldc1 $f16, THREAD_FPR16(\thread)
- ldc1 $f18, THREAD_FPR18(\thread)
- ldc1 $f20, THREAD_FPR20(\thread)
- ldc1 $f22, THREAD_FPR22(\thread)
- ldc1 $f24, THREAD_FPR24(\thread)
- ldc1 $f26, THREAD_FPR26(\thread)
- ldc1 $f28, THREAD_FPR28(\thread)
- ldc1 $f30, THREAD_FPR30(\thread)
+ ldc1 $f0, THREAD_FPR0_LS64(\thread)
+ ldc1 $f2, THREAD_FPR2_LS64(\thread)
+ ldc1 $f4, THREAD_FPR4_LS64(\thread)
+ ldc1 $f6, THREAD_FPR6_LS64(\thread)
+ ldc1 $f8, THREAD_FPR8_LS64(\thread)
+ ldc1 $f10, THREAD_FPR10_LS64(\thread)
+ ldc1 $f12, THREAD_FPR12_LS64(\thread)
+ ldc1 $f14, THREAD_FPR14_LS64(\thread)
+ ldc1 $f16, THREAD_FPR16_LS64(\thread)
+ ldc1 $f18, THREAD_FPR18_LS64(\thread)
+ ldc1 $f20, THREAD_FPR20_LS64(\thread)
+ ldc1 $f22, THREAD_FPR22_LS64(\thread)
+ ldc1 $f24, THREAD_FPR24_LS64(\thread)
+ ldc1 $f26, THREAD_FPR26_LS64(\thread)
+ ldc1 $f28, THREAD_FPR28_LS64(\thread)
+ ldc1 $f30, THREAD_FPR30_LS64(\thread)
ctc1 \tmp, fcr31
+ .set pop
.endm
.macro fpu_restore_16odd thread
- ldc1 $f1, THREAD_FPR1(\thread)
- ldc1 $f3, THREAD_FPR3(\thread)
- ldc1 $f5, THREAD_FPR5(\thread)
- ldc1 $f7, THREAD_FPR7(\thread)
- ldc1 $f9, THREAD_FPR9(\thread)
- ldc1 $f11, THREAD_FPR11(\thread)
- ldc1 $f13, THREAD_FPR13(\thread)
- ldc1 $f15, THREAD_FPR15(\thread)
- ldc1 $f17, THREAD_FPR17(\thread)
- ldc1 $f19, THREAD_FPR19(\thread)
- ldc1 $f21, THREAD_FPR21(\thread)
- ldc1 $f23, THREAD_FPR23(\thread)
- ldc1 $f25, THREAD_FPR25(\thread)
- ldc1 $f27, THREAD_FPR27(\thread)
- ldc1 $f29, THREAD_FPR29(\thread)
- ldc1 $f31, THREAD_FPR31(\thread)
+ .set push
+ SET_HARDFLOAT
+ ldc1 $f1, THREAD_FPR1_LS64(\thread)
+ ldc1 $f3, THREAD_FPR3_LS64(\thread)
+ ldc1 $f5, THREAD_FPR5_LS64(\thread)
+ ldc1 $f7, THREAD_FPR7_LS64(\thread)
+ ldc1 $f9, THREAD_FPR9_LS64(\thread)
+ ldc1 $f11, THREAD_FPR11_LS64(\thread)
+ ldc1 $f13, THREAD_FPR13_LS64(\thread)
+ ldc1 $f15, THREAD_FPR15_LS64(\thread)
+ ldc1 $f17, THREAD_FPR17_LS64(\thread)
+ ldc1 $f19, THREAD_FPR19_LS64(\thread)
+ ldc1 $f21, THREAD_FPR21_LS64(\thread)
+ ldc1 $f23, THREAD_FPR23_LS64(\thread)
+ ldc1 $f25, THREAD_FPR25_LS64(\thread)
+ ldc1 $f27, THREAD_FPR27_LS64(\thread)
+ ldc1 $f29, THREAD_FPR29_LS64(\thread)
+ ldc1 $f31, THREAD_FPR31_LS64(\thread)
+ .set pop
.endm
.macro fpu_restore_double thread status tmp
- sll \tmp, \status, 5
+ sll \tmp, \status, 31 - _ST0_FR
bgez \tmp, 1f # 16 register mode?
fpu_restore_16odd \thread
diff --git a/arch/mips/include/asm/asmmacro.h b/arch/mips/include/asm/asmmacro.h
index 6c8342a..d8e3892 100644
--- a/arch/mips/include/asm/asmmacro.h
+++ b/arch/mips/include/asm/asmmacro.h
@@ -9,6 +9,8 @@
#define _ASM_ASMMACRO_H
#include <asm/hazards.h>
+#include <asm/asm-offsets.h>
+#include <asm/msa.h>
#ifdef CONFIG_32BIT
#include <asm/asmmacro-32.h>
@@ -20,6 +22,9 @@
#include <asm/mipsmtregs.h>
#endif
+/* preprocessor replaces the fp in ".set fp=64" with $30 otherwise */
+#undef fp
+
#ifdef CONFIG_MIPS_MT_SMTC
.macro local_irq_enable reg=t0
mfc0 \reg, CP0_TCSTATUS
@@ -35,7 +40,7 @@
mtc0 \reg, CP0_TCSTATUS
_ehb
.endm
-#elif defined(CONFIG_CPU_MIPSR2)
+#elif defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
.macro local_irq_enable reg=t0
ei
irq_enable_hazard
@@ -62,6 +67,17 @@
.endm
#endif /* CONFIG_MIPS_MT_SMTC */
+#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
+ .macro _EXT rd, rs, p, s
+ ext \rd, \rs, \p, \s
+ .endm
+#else
+ .macro _EXT rd, rs, p, s
+ srl \rd, \rs, \p
+ andi \rd, \rd, (1 << \s) - 1
+ .endm
+#endif
+
/*
* Temporary until all gas have MT ASE support
*/
@@ -89,4 +105,293 @@
.word 0x41800000 | (\rt << 16) | (\rd << 11) | (\u << 5) | (\sel)
.endm
+#ifdef TOOLCHAIN_SUPPORTS_MSA
+ .macro _cfcmsa rd, cs
+ .set push
+ .set fp=64
+ SET_HARDFLOAT
+ .set msa
+ cfcmsa \rd, $\cs
+ .set pop
+ .endm
+
+ .macro _ctcmsa cd, rs
+ .set push
+ .set fp=64
+ SET_HARDFLOAT
+ .set msa
+ ctcmsa $\cd, \rs
+ .set pop
+ .endm
+
+ .macro ld_d wd, off, base
+ .set push
+ .set fp=64
+ SET_HARDFLOAT
+ .set msa
+ ld.d $w\wd, \off(\base)
+ .set pop
+ .endm
+
+ .macro st_d wd, off, base
+ .set push
+ .set fp=64
+ SET_HARDFLOAT
+ .set msa
+ st.d $w\wd, \off(\base)
+ .set pop
+ .endm
+
+ .macro copy_u_w ws, n
+ .set push
+ .set fp=64
+ SET_HARDFLOAT
+ .set msa
+ copy_u.w $1, $w\ws[\n]
+ .set pop
+ .endm
+
+ .macro copy_u_d ws, n
+ .set push
+ .set fp=64
+ SET_HARDFLOAT
+ .set msa
+ copy_u.d $1, $w\ws[\n]
+ .set pop
+ .endm
+
+ .macro insert_w wd, n
+ .set push
+ .set fp=64
+ SET_HARDFLOAT
+ .set msa
+ insert.w $w\wd[\n], $1
+ .set pop
+ .endm
+
+ .macro insert_d wd, n
+ .set push
+ .set fp=64
+ SET_HARDFLOAT
+ .set msa
+ insert.d $w\wd[\n], $1
+ .set pop
+ .endm
+#else
+
+#ifdef CONFIG_CPU_MICROMIPS
+#define CFC_MSA_INSN 0x587e0056
+#define CTC_MSA_INSN 0x583e0816
+#define LDD_MSA_INSN 0x58000837
+#define STD_MSA_INSN 0x5800083f
+#define COPY_UW_MSA_INSN 0x58f00056
+#define COPY_UD_MSA_INSN 0x58f80056
+#define INSERT_W_MSA_INSN 0x59300816
+#define INSERT_D_MSA_INSN 0x59380816
+#else
+#define CFC_MSA_INSN 0x787e0059
+#define CTC_MSA_INSN 0x783e0819
+#define LDD_MSA_INSN 0x78000823
+#define STD_MSA_INSN 0x78000827
+#define COPY_UW_MSA_INSN 0x78f00059
+#define COPY_UD_MSA_INSN 0x78f80059
+#define INSERT_W_MSA_INSN 0x79300819
+#define INSERT_D_MSA_INSN 0x79380819
+#endif
+
+ /*
+ * Temporary until all toolchains in use include MSA support.
+ */
+ .macro _cfcmsa rd, cs
+ .set push
+ .set noat
+ .insn
+ .word CFC_MSA_INSN | (\cs << 11)
+ move \rd, $1
+ .set pop
+ .endm
+
+ .macro _ctcmsa cd, rs
+ .set push
+ .set noat
+ move $1, \rs
+ .word CTC_MSA_INSN | (\cd << 6)
+ .set pop
+ .endm
+
+ .macro ld_d wd, off, base
+ .set push
+ .set noat
+ addiu $1, \base, \off
+ .word LDD_MSA_INSN | (\wd << 6)
+ .set pop
+ .endm
+
+ .macro st_d wd, off, base
+ .set push
+ .set noat
+ addiu $1, \base, \off
+ .word STD_MSA_INSN | (\wd << 6)
+ .set pop
+ .endm
+
+ .macro copy_u_w ws, n
+ .set push
+ .set noat
+ .insn
+ .word COPY_UW_MSA_INSN | (\n << 16) | (\ws << 11)
+ .set pop
+ .endm
+
+ .macro copy_u_d ws, n
+ .set push
+ .set noat
+ .insn
+ .word COPY_UD_MSA_INSN | (\n << 16) | (\ws << 11)
+ .set pop
+ .endm
+
+ .macro insert_w wd, n
+ .set push
+ .set noat
+ .word INSERT_W_MSA_INSN | (\n << 16) | (\wd << 6)
+ .set pop
+ .endm
+
+ .macro insert_d wd, n
+ .set push
+ .set noat
+ .word INSERT_D_MSA_INSN | (\n << 16) | (\wd << 6)
+ .set pop
+ .endm
+#endif
+
+ .macro msa_save_all thread
+ st_d 0, THREAD_FPR0, \thread
+ st_d 1, THREAD_FPR1, \thread
+ st_d 2, THREAD_FPR2, \thread
+ st_d 3, THREAD_FPR3, \thread
+ st_d 4, THREAD_FPR4, \thread
+ st_d 5, THREAD_FPR5, \thread
+ st_d 6, THREAD_FPR6, \thread
+ st_d 7, THREAD_FPR7, \thread
+ st_d 8, THREAD_FPR8, \thread
+ st_d 9, THREAD_FPR9, \thread
+ st_d 10, THREAD_FPR10, \thread
+ st_d 11, THREAD_FPR11, \thread
+ st_d 12, THREAD_FPR12, \thread
+ st_d 13, THREAD_FPR13, \thread
+ st_d 14, THREAD_FPR14, \thread
+ st_d 15, THREAD_FPR15, \thread
+ st_d 16, THREAD_FPR16, \thread
+ st_d 17, THREAD_FPR17, \thread
+ st_d 18, THREAD_FPR18, \thread
+ st_d 19, THREAD_FPR19, \thread
+ st_d 20, THREAD_FPR20, \thread
+ st_d 21, THREAD_FPR21, \thread
+ st_d 22, THREAD_FPR22, \thread
+ st_d 23, THREAD_FPR23, \thread
+ st_d 24, THREAD_FPR24, \thread
+ st_d 25, THREAD_FPR25, \thread
+ st_d 26, THREAD_FPR26, \thread
+ st_d 27, THREAD_FPR27, \thread
+ st_d 28, THREAD_FPR28, \thread
+ st_d 29, THREAD_FPR29, \thread
+ st_d 30, THREAD_FPR30, \thread
+ st_d 31, THREAD_FPR31, \thread
+ .set push
+ .set noat
+ _cfcmsa $1, MSA_CSR
+ sw $1, THREAD_MSA_CSR(\thread)
+ .set pop
+ .endm
+
+ .macro msa_restore_all thread
+ .set push
+ .set noat
+ lw $1, THREAD_MSA_CSR(\thread)
+ _ctcmsa MSA_CSR, $1
+ ld_d 0, THREAD_FPR0, \thread
+ ld_d 1, THREAD_FPR1, \thread
+ ld_d 2, THREAD_FPR2, \thread
+ ld_d 3, THREAD_FPR3, \thread
+ ld_d 4, THREAD_FPR4, \thread
+ ld_d 5, THREAD_FPR5, \thread
+ ld_d 6, THREAD_FPR6, \thread
+ ld_d 7, THREAD_FPR7, \thread
+ ld_d 8, THREAD_FPR8, \thread
+ ld_d 9, THREAD_FPR9, \thread
+ ld_d 10, THREAD_FPR10, \thread
+ ld_d 11, THREAD_FPR11, \thread
+ ld_d 12, THREAD_FPR12, \thread
+ ld_d 13, THREAD_FPR13, \thread
+ ld_d 14, THREAD_FPR14, \thread
+ ld_d 15, THREAD_FPR15, \thread
+ ld_d 16, THREAD_FPR16, \thread
+ ld_d 17, THREAD_FPR17, \thread
+ ld_d 18, THREAD_FPR18, \thread
+ ld_d 19, THREAD_FPR19, \thread
+ ld_d 20, THREAD_FPR20, \thread
+ ld_d 21, THREAD_FPR21, \thread
+ ld_d 22, THREAD_FPR22, \thread
+ ld_d 23, THREAD_FPR23, \thread
+ ld_d 24, THREAD_FPR24, \thread
+ ld_d 25, THREAD_FPR25, \thread
+ ld_d 26, THREAD_FPR26, \thread
+ ld_d 27, THREAD_FPR27, \thread
+ ld_d 28, THREAD_FPR28, \thread
+ ld_d 29, THREAD_FPR29, \thread
+ ld_d 30, THREAD_FPR30, \thread
+ ld_d 31, THREAD_FPR31, \thread
+ .set pop
+ .endm
+
+ .macro msa_init_upper wd
+#ifdef CONFIG_64BIT
+ insert_d \wd, 1
+#else
+ insert_w \wd, 2
+ insert_w \wd, 3
+#endif
+ .endm
+
+ .macro msa_init_all_upper
+ .set push
+ .set noat
+ not $1, zero
+ msa_init_upper 0
+ msa_init_upper 1
+ msa_init_upper 2
+ msa_init_upper 3
+ msa_init_upper 4
+ msa_init_upper 5
+ msa_init_upper 6
+ msa_init_upper 7
+ msa_init_upper 8
+ msa_init_upper 9
+ msa_init_upper 10
+ msa_init_upper 11
+ msa_init_upper 12
+ msa_init_upper 13
+ msa_init_upper 14
+ msa_init_upper 15
+ msa_init_upper 16
+ msa_init_upper 17
+ msa_init_upper 18
+ msa_init_upper 19
+ msa_init_upper 20
+ msa_init_upper 21
+ msa_init_upper 22
+ msa_init_upper 23
+ msa_init_upper 24
+ msa_init_upper 25
+ msa_init_upper 26
+ msa_init_upper 27
+ msa_init_upper 28
+ msa_init_upper 29
+ msa_init_upper 30
+ msa_init_upper 31
+ .set pop
+ .endm
+
#endif /* _ASM_ASMMACRO_H */
diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
index 08b6079..e1640d5 100644
--- a/arch/mips/include/asm/atomic.h
+++ b/arch/mips/include/asm/atomic.h
@@ -63,16 +63,34 @@
: "Ir" (i));
} else if (kernel_uses_llsc) {
int temp;
+#ifdef CONFIG_CPU_MIPSR6
+ register int temp2;
+#endif
do {
__asm__ __volatile__(
- " .set mips3 \n"
- " ll %0, %1 # atomic_add \n"
+#ifdef CONFIG_CPU_MIPSR6
+ " .set mips64r6 \n"
+#ifdef CONFIG_64BIT
+ " dla %2, %1 \n"
+#else
+ " la %2, %1 \n"
+#endif
+ " ll %0, 0(%2) # atomic_add \n"
+ " addu %0, %3 \n"
+ " sc %0, 0(%2) \n"
+ " .set mips0 \n"
+ : "=&r" (temp), "+m" (v->counter), "=&r" (temp2)
+ : "Ir" (i));
+#else
+ " .set mips3 \n"
+ " ll %0, %1 # atomic_add \n"
" addu %0, %2 \n"
" sc %0, %1 \n"
" .set mips0 \n"
: "=&r" (temp), "+m" (v->counter)
: "Ir" (i));
+#endif
} while (unlikely(!temp));
} else {
unsigned long flags;
@@ -106,16 +124,34 @@
: "Ir" (i));
} else if (kernel_uses_llsc) {
int temp;
+#ifdef CONFIG_CPU_MIPSR6
+ register int temp2;
+#endif
do {
__asm__ __volatile__(
+#ifdef CONFIG_CPU_MIPSR6
+ " .set mips64r6 \n"
+#ifdef CONFIG_64BIT
+ " dla %2, %1 \n"
+#else
+ " la %2, %1 \n"
+#endif
+ " ll %0, 0(%2) # atomic_sub \n"
+ " subu %0, %3 \n"
+ " sc %0, 0(%2) \n"
+ " .set mips0 \n"
+ : "=&r" (temp), "+m" (v->counter), "=&r" (temp2)
+ : "Ir" (i));
+#else
" .set mips3 \n"
- " ll %0, %1 # atomic_sub \n"
+ " ll %0, %1 # atomic_sub \n"
" subu %0, %2 \n"
" sc %0, %1 \n"
" .set mips0 \n"
: "=&r" (temp), "+m" (v->counter)
: "Ir" (i));
+#endif
} while (unlikely(!temp));
} else {
unsigned long flags;
@@ -150,16 +186,35 @@
: "Ir" (i));
} else if (kernel_uses_llsc) {
int temp;
+#ifdef CONFIG_CPU_MIPSR6
+ register int temp2;
+#endif
do {
__asm__ __volatile__(
+#ifdef CONFIG_CPU_MIPSR6
+ " .set mips64r6 \n"
+#ifdef CONFIG_64BIT
+ " dla %3, %2 \n"
+#else
+ " la %3, %2 \n"
+#endif
+ " ll %1, 0(%3) # atomic_add_return \n"
+ " addu %0, %1, %4 \n"
+ " sc %0, 0(%3) \n"
+ " .set mips0 \n"
+ : "=&r" (result), "=&r" (temp), "+m" (v->counter),
+ "=&r" (temp2)
+ : "Ir" (i));
+#else
" .set mips3 \n"
- " ll %1, %2 # atomic_add_return \n"
- " addu %0, %1, %3 \n"
+ " ll %1, %2 # atomic_add_return \n"
+ " addu %0, %1, %3 \n"
" sc %0, %2 \n"
" .set mips0 \n"
: "=&r" (result), "=&r" (temp), "+m" (v->counter)
: "Ir" (i));
+#endif
} while (unlikely(!result));
result = temp + i;
@@ -202,16 +257,35 @@
result = temp - i;
} else if (kernel_uses_llsc) {
int temp;
+#ifdef CONFIG_CPU_MIPSR6
+ register int temp2;
+#endif
do {
__asm__ __volatile__(
+#ifdef CONFIG_CPU_MIPSR6
+ " .set mips64r6 \n"
+#ifdef CONFIG_64BIT
+ " dla %3, %2 \n"
+#else
+ " la %3, %2 \n"
+#endif
+ " ll %1, 0(%3) # atomic_sub_return \n"
+ " subu %0, %1, %4 \n"
+ " sc %0, 0(%3) \n"
+ " .set mips0 \n"
+ : "=&r" (result), "=&r" (temp), "+m" (v->counter),
+ "=&r" (temp2)
+ : "Ir" (i));
+#else
" .set mips3 \n"
- " ll %1, %2 # atomic_sub_return \n"
+ " ll %1, %2 # atomic_sub_return \n"
" subu %0, %1, %3 \n"
" sc %0, %2 \n"
" .set mips0 \n"
: "=&r" (result), "=&r" (temp), "+m" (v->counter)
: "Ir" (i));
+#endif
} while (unlikely(!result));
result = temp - i;
@@ -264,10 +338,34 @@
: "memory");
} else if (kernel_uses_llsc) {
int temp;
+#ifdef CONFIG_CPU_MIPSR6
+ register int temp2;
+#endif
__asm__ __volatile__(
+#ifdef CONFIG_CPU_MIPSR6
+ " .set mips64r6 \n"
+#ifdef CONFIG_64BIT
+ " dla %3, %2 \n"
+#else
+ " la %3, %2 \n"
+#endif
+ "1: ll %1, 0(%3) # atomic_sub_if_positive \n"
+ " subu %0, %1, %4 \n"
+ " bltz %0, 1f \n"
+ " sc %0, 0(%3) \n"
+ " .set noreorder \n"
+ " beqz %0, 1b \n"
+ " subu %0, %1, %4 \n"
+ " .set reorder \n"
+ "1: \n"
+ " .set mips0 \n"
+ : "=&r" (result), "=&r" (temp), "+m" (v->counter),
+ "=&r" (temp2)
+ : "Ir" (i));
+#else
" .set mips3 \n"
- "1: ll %1, %2 # atomic_sub_if_positive\n"
+ "1: ll %1, %2 # atomic_sub_if_positive\n"
" subu %0, %1, %3 \n"
" bltz %0, 1f \n"
" sc %0, %2 \n"
@@ -279,6 +377,7 @@
" .set mips0 \n"
: "=&r" (result), "=&r" (temp), "+m" (v->counter)
: "Ir" (i));
+#endif
} else {
unsigned long flags;
@@ -430,16 +529,34 @@
: "Ir" (i));
} else if (kernel_uses_llsc) {
long temp;
+#ifdef CONFIG_CPU_MIPSR6
+ register long temp2;
+#endif
do {
__asm__ __volatile__(
+#ifdef CONFIG_CPU_MIPSR6
+ " .set mips64r6 \n"
+#ifdef CONFIG_64BIT
+ " dla %2, %1 \n"
+#else
+ " la %2, %1 \n"
+#endif
+ " lld %0, 0(%2) # atomic64_add \n"
+ " daddu %0, %3 \n"
+ " scd %0, 0(%2) \n"
+ " .set mips0 \n"
+ : "=&r" (temp), "+m" (v->counter), "=&r" (temp2)
+ : "Ir" (i));
+#else
" .set mips3 \n"
- " lld %0, %1 # atomic64_add \n"
+ " lld %0, %1 # atomic64_add \n"
" daddu %0, %2 \n"
" scd %0, %1 \n"
" .set mips0 \n"
: "=&r" (temp), "+m" (v->counter)
: "Ir" (i));
+#endif
} while (unlikely(!temp));
} else {
unsigned long flags;
@@ -473,9 +590,26 @@
: "Ir" (i));
} else if (kernel_uses_llsc) {
long temp;
+#ifdef CONFIG_CPU_MIPSR6
+ register long temp2;
+#endif
do {
__asm__ __volatile__(
+#ifdef CONFIG_CPU_MIPSR6
+ " .set mips64r6 \n"
+#ifdef CONFIG_64BIT
+ " dla %2, %1 \n"
+#else
+ " la %2, %1 \n"
+#endif
+ " lld %0, 0(%2) # atomic64_sub \n"
+ " dsubu %0, %3 \n"
+ " scd %0, 0(%2) \n"
+ " .set mips0 \n"
+ : "=&r" (temp), "+m" (v->counter), "=&r" (temp2)
+ : "Ir" (i));
+#else
" .set mips3 \n"
" lld %0, %1 # atomic64_sub \n"
" dsubu %0, %2 \n"
@@ -483,6 +617,7 @@
" .set mips0 \n"
: "=&r" (temp), "+m" (v->counter)
: "Ir" (i));
+#endif
} while (unlikely(!temp));
} else {
unsigned long flags;
@@ -517,9 +652,27 @@
: "Ir" (i));
} else if (kernel_uses_llsc) {
long temp;
+#ifdef CONFIG_CPU_MIPSR6
+ register long temp2;
+#endif
do {
__asm__ __volatile__(
+#ifdef CONFIG_CPU_MIPSR6
+ " .set mips64r6 \n"
+#ifdef CONFIG_64BIT
+ " dla %3, %2 \n"
+#else
+ " la %3, %2 \n"
+#endif
+ " lld %1, 0(%3) # atomic64_add_return \n"
+ " daddu %0, %1, %4 \n"
+ " scd %0, 0(%3) \n"
+ " .set mips0 \n"
+ : "=&r" (result), "=&r" (temp), "+m" (v->counter),
+ "=&r" (temp2)
+ : "Ir" (i));
+#else
" .set mips3 \n"
" lld %1, %2 # atomic64_add_return \n"
" daddu %0, %1, %3 \n"
@@ -528,6 +681,7 @@
: "=&r" (result), "=&r" (temp), "=m" (v->counter)
: "Ir" (i), "m" (v->counter)
: "memory");
+#endif
} while (unlikely(!result));
result = temp + i;
@@ -568,17 +722,36 @@
: "memory");
} else if (kernel_uses_llsc) {
long temp;
+#ifdef CONFIG_CPU_MIPSR6
+ register long temp2;
+#endif
do {
__asm__ __volatile__(
+#ifdef CONFIG_CPU_MIPSR6
+ " .set mips64r6 \n"
+#ifdef CONFIG_64BIT
+ " dla %3, %2 \n"
+#else
+ " la %3, %2 \n"
+#endif
+ " lld %1, 0(%3) # atomic64_sub_return \n"
+ " dsubu %0, %1, %4 \n"
+ " scd %0, 0(%3) \n"
+ " .set mips0 \n"
+ : "=&r" (result), "=&r" (temp), "+m" (v->counter),
+ "=&r" (temp2)
+ : "Ir" (i));
+#else
" .set mips3 \n"
- " lld %1, %2 # atomic64_sub_return \n"
+ " lld %1, %2 # atomic64_sub_return \n"
" dsubu %0, %1, %3 \n"
" scd %0, %2 \n"
" .set mips0 \n"
: "=&r" (result), "=&r" (temp), "=m" (v->counter)
: "Ir" (i), "m" (v->counter)
: "memory");
+#endif
} while (unlikely(!result));
result = temp - i;
@@ -631,10 +804,34 @@
: "memory");
} else if (kernel_uses_llsc) {
long temp;
+#ifdef CONFIG_CPU_MIPSR6
+ register long temp2;
+#endif
__asm__ __volatile__(
+#ifdef CONFIG_CPU_MIPSR6
+ " .set mips64r6 \n"
+#ifdef CONFIG_64BIT
+ " dla %3, %2 \n"
+#else
+ " la %3, %2 \n"
+#endif
+ "1: lld %1, 0(%3) # atomic64_sub_if_positive \n"
+ " dsubu %0, %1, %4 \n"
+ " bltz %0, 1f \n"
+ " scd %0, 0(%3) \n"
+ " .set noreorder \n"
+ " beqz %0, 1b \n"
+ " dsubu %0, %1, %4 \n"
+ " .set reorder \n"
+ "1: \n"
+ " .set mips0 \n"
+ : "=&r" (result), "=&r" (temp), "+m" (v->counter),
+ "=&r" (temp2)
+ : "Ir" (i));
+#else
" .set mips3 \n"
- "1: lld %1, %2 # atomic64_sub_if_positive\n"
+ "1: lld %1, %2 # atomic64_sub_if_positive\n"
" dsubu %0, %1, %3 \n"
" bltz %0, 1f \n"
" scd %0, %2 \n"
@@ -646,6 +843,7 @@
" .set mips0 \n"
: "=&r" (result), "=&r" (temp), "+m" (v->counter)
: "Ir" (i));
+#endif
} else {
unsigned long flags;
diff --git a/arch/mips/include/asm/bitops.h b/arch/mips/include/asm/bitops.h
index 71305a8..b7142c3 100644
--- a/arch/mips/include/asm/bitops.h
+++ b/arch/mips/include/asm/bitops.h
@@ -28,6 +28,8 @@
#define __SC "sc "
#define __INS "ins "
#define __EXT "ext "
+#define __ADDU "addu "
+#define __SUBU "subu "
#elif _MIPS_SZLONG == 64
#define SZLONG_LOG 6
#define SZLONG_MASK 63UL
@@ -35,6 +37,8 @@
#define __SC "scd "
#define __INS "dins "
#define __EXT "dext "
+#define __ADDU "daddu "
+#define __SUBU "dsubu "
#endif
/*
@@ -76,6 +80,9 @@
unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
int bit = nr & SZLONG_MASK;
unsigned long temp;
+#ifdef CONFIG_CPU_MIPSR6
+ register unsigned long temp2;
+#endif
if (kernel_uses_llsc && R10000_LLSC_WAR) {
__asm__ __volatile__(
@@ -87,7 +94,7 @@
" .set mips0 \n"
: "=&r" (temp), "=m" (*m)
: "ir" (1UL << bit), "m" (*m));
-#ifdef CONFIG_CPU_MIPSR2
+#if defined(CONFIG_CPU_MIPSR2)
} else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
do {
__asm__ __volatile__(
@@ -97,17 +104,32 @@
: "=&r" (temp), "+m" (*m)
: "ir" (bit), "r" (~0));
} while (unlikely(!temp));
-#endif /* CONFIG_CPU_MIPSR2 */
+#endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */
} else if (kernel_uses_llsc) {
do {
__asm__ __volatile__(
+#ifdef CONFIG_CPU_MIPSR6
+ " .set mips64r6 \n"
+#ifdef CONFIG_64BIT
+ " dla %2, %1 \n"
+#else
+ " la %2, %1 \n"
+#endif
+ " " __LL "%0, 0(%2) # set_bit \n"
+ " or %0, %3 \n"
+ " " __SC "%0, 0(%2) \n"
+ " .set mips0 \n"
+ : "=&r" (temp), "+m" (*m), "=&r" (temp2)
+ : "ir" (1UL << bit));
+#else
" .set mips3 \n"
- " " __LL "%0, %1 # set_bit \n"
+ " " __LL "%0, %1 # set_bit \n"
" or %0, %2 \n"
" " __SC "%0, %1 \n"
" .set mips0 \n"
: "=&r" (temp), "+m" (*m)
: "ir" (1UL << bit));
+#endif
} while (unlikely(!temp));
} else
__mips_set_bit(nr, addr);
@@ -128,6 +150,9 @@
unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
int bit = nr & SZLONG_MASK;
unsigned long temp;
+#ifdef CONFIG_CPU_MIPSR6
+ register unsigned long temp2;
+#endif
if (kernel_uses_llsc && R10000_LLSC_WAR) {
__asm__ __volatile__(
@@ -139,7 +164,7 @@
" .set mips0 \n"
: "=&r" (temp), "+m" (*m)
: "ir" (~(1UL << bit)));
-#ifdef CONFIG_CPU_MIPSR2
+#if defined(CONFIG_CPU_MIPSR2)
} else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
do {
__asm__ __volatile__(
@@ -149,17 +174,32 @@
: "=&r" (temp), "+m" (*m)
: "ir" (bit));
} while (unlikely(!temp));
-#endif /* CONFIG_CPU_MIPSR2 */
+#endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */
} else if (kernel_uses_llsc) {
do {
__asm__ __volatile__(
+#ifdef CONFIG_CPU_MIPSR6
+ " .set mips64r6 \n"
+#ifdef CONFIG_64BIT
+ " dla %2, %1 \n"
+#else
+ " la %2, %1 \n"
+#endif
+ " " __LL "%0, 0(%2) # clear_bit \n"
+ " and %0, %3 \n"
+ " " __SC "%0, 0(%2) \n"
+ " .set mips0 \n"
+ : "=&r" (temp), "+m" (*m), "=&r" (temp2)
+ : "ir" (~(1UL << bit)));
+#else
" .set mips3 \n"
- " " __LL "%0, %1 # clear_bit \n"
+ " " __LL "%0, %1 # clear_bit \n"
" and %0, %2 \n"
" " __SC "%0, %1 \n"
" .set mips0 \n"
: "=&r" (temp), "+m" (*m)
: "ir" (~(1UL << bit)));
+#endif
} while (unlikely(!temp));
} else
__mips_clear_bit(nr, addr);
@@ -208,16 +248,34 @@
} else if (kernel_uses_llsc) {
unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
unsigned long temp;
+#ifdef CONFIG_CPU_MIPSR6
+ register unsigned long temp2;
+#endif
do {
__asm__ __volatile__(
+#ifdef CONFIG_CPU_MIPSR6
+ " .set mips64r6 \n"
+#ifdef CONFIG_64BIT
+ " dla %2, %1 \n"
+#else
+ " la %2, %1 \n"
+#endif
+ " " __LL "%0, 0(%2) # change_bit \n"
+ " xor %0, %3 \n"
+ " " __SC "%0, 0(%2) \n"
+ " .set mips0 \n"
+ : "=&r" (temp), "+m" (*m), "=&r" (temp2)
+ : "ir" (1UL << bit));
+#else
" .set mips3 \n"
- " " __LL "%0, %1 # change_bit \n"
+ " " __LL "%0, %1 # change_bit \n"
" xor %0, %2 \n"
" " __SC "%0, %1 \n"
" .set mips0 \n"
: "=&r" (temp), "+m" (*m)
: "ir" (1UL << bit));
+#endif
} while (unlikely(!temp));
} else
__mips_change_bit(nr, addr);
@@ -257,17 +315,36 @@
} else if (kernel_uses_llsc) {
unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
unsigned long temp;
+#ifdef CONFIG_CPU_MIPSR6
+ register unsigned long temp2;
+#endif
do {
__asm__ __volatile__(
+#ifdef CONFIG_CPU_MIPSR6
+ " .set mips64r6 \n"
+#ifdef CONFIG_64BIT
+ " dla %3, %1 \n"
+#else
+ " la %3, %1 \n"
+#endif
+ " " __LL "%0, 0(%3) # test_and_set_bit \n"
+ " or %2, %0, %4 \n"
+ " " __SC "%2, 0(%3) \n"
+ " .set mips0 \n"
+ : "=&r" (temp), "+m" (*m), "=&r" (res),
+ "=&r" (temp2)
+ : "r" (1UL << bit));
+#else
" .set mips3 \n"
- " " __LL "%0, %1 # test_and_set_bit \n"
+ " " __LL "%0, %1 # test_and_set_bit \n"
" or %2, %0, %3 \n"
" " __SC "%2, %1 \n"
" .set mips0 \n"
: "=&r" (temp), "+m" (*m), "=&r" (res)
: "r" (1UL << bit)
: "memory");
+#endif
} while (unlikely(!res));
res = temp & (1UL << bit);
@@ -311,17 +388,36 @@
} else if (kernel_uses_llsc) {
unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
unsigned long temp;
+#ifdef CONFIG_CPU_MIPSR6
+ register unsigned long temp2;
+#endif
do {
__asm__ __volatile__(
+#ifdef CONFIG_CPU_MIPSR6
+ " .set mips64r6 \n"
+#ifdef CONFIG_64BIT
+ " dla %3, %1 \n"
+#else
+ " la %3, %1 \n"
+#endif
+ " " __LL "%0, 0(%3) # test_and_set_bit \n"
+ " or %2, %0, %4 \n"
+ " " __SC "%2, 0(%3) \n"
+ " .set mips0 \n"
+ : "=&r" (temp), "+m" (*m), "=&r" (res),
+ "=&r" (temp2)
+ : "r" (1UL << bit));
+#else
" .set mips3 \n"
- " " __LL "%0, %1 # test_and_set_bit \n"
+ " " __LL "%0, %1 # test_and_set_bit \n"
" or %2, %0, %3 \n"
" " __SC "%2, %1 \n"
" .set mips0 \n"
: "=&r" (temp), "+m" (*m), "=&r" (res)
: "r" (1UL << bit)
: "memory");
+#endif
} while (unlikely(!res));
res = temp & (1UL << bit);
@@ -345,6 +441,9 @@
{
int bit = nr & SZLONG_MASK;
unsigned long res;
+#ifdef CONFIG_CPU_MIPSR6
+ register unsigned long temp2;
+#endif
smp_mb__before_llsc();
@@ -364,7 +463,7 @@
: "=&r" (temp), "+m" (*m), "=&r" (res)
: "r" (1UL << bit)
: "memory");
-#ifdef CONFIG_CPU_MIPSR2
+#if defined(CONFIG_CPU_MIPSR2)
} else if (kernel_uses_llsc && __builtin_constant_p(nr)) {
unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
unsigned long temp;
@@ -386,8 +485,24 @@
do {
__asm__ __volatile__(
+#ifdef CONFIG_CPU_MIPSR6
+ " .set mips64r6 \n"
+#ifdef CONFIG_64BIT
+ " dla %3, %1 \n"
+#else
+ " la %3, %1 \n"
+#endif
+ " " __LL "%0, 0(%3) # test_and_clear_bit \n"
+ " or %2, %0, %4 \n"
+ " xor %2, %4 \n"
+ " " __SC "%2, 0(%3) \n"
+ " .set mips0 \n"
+ : "=&r" (temp), "+m" (*m), "=&r" (res),
+ "=&r" (temp2)
+ : "r" (1UL << bit));
+#else
" .set mips3 \n"
- " " __LL "%0, %1 # test_and_clear_bit \n"
+ " " __LL "%0, %1 # test_and_clear_bit \n"
" or %2, %0, %3 \n"
" xor %2, %3 \n"
" " __SC "%2, %1 \n"
@@ -395,6 +510,7 @@
: "=&r" (temp), "+m" (*m), "=&r" (res)
: "r" (1UL << bit)
: "memory");
+#endif
} while (unlikely(!res));
res = temp & (1UL << bit);
@@ -440,17 +556,36 @@
} else if (kernel_uses_llsc) {
unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
unsigned long temp;
+#ifdef CONFIG_CPU_MIPSR6
+ register unsigned long temp2;
+#endif
do {
__asm__ __volatile__(
+#ifdef CONFIG_CPU_MIPSR6
+ " .set mips64r6 \n"
+#ifdef CONFIG_64BIT
+ " dla %3, %1 \n"
+#else
+ " la %3, %1 \n"
+#endif
+ " " __LL "%0, 0(%3) # test_and_change_bit \n"
+ " xor %2, %0, %4 \n"
+ " " __SC "%2, 0(%3) \n"
+ " .set mips0 \n"
+ : "=&r" (temp), "+m" (*m), "=&r" (res),
+ "=&r" (temp2)
+ : "r" (1UL << bit));
+#else
" .set mips3 \n"
- " " __LL "%0, %1 # test_and_change_bit \n"
+ " " __LL "%0, %1 # test_and_change_bit \n"
" xor %2, %0, %3 \n"
" " __SC "\t%2, %1 \n"
" .set mips0 \n"
: "=&r" (temp), "+m" (*m), "=&r" (res)
: "r" (1UL << bit)
: "memory");
+#endif
} while (unlikely(!res));
res = temp & (1UL << bit);
@@ -491,8 +626,12 @@
__builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) {
__asm__(
" .set push \n"
+#ifdef CONFIG_CPU_MIPSR6
+ " .set mips64r6 \n"
+#else
" .set mips32 \n"
- " clz %0, %1 \n"
+#endif
+ " clz %0, %1 \n"
" .set pop \n"
: "=r" (num)
: "r" (word));
@@ -504,8 +643,12 @@
__builtin_constant_p(cpu_has_mips64) && cpu_has_mips64) {
__asm__(
" .set push \n"
+#ifdef CONFIG_CPU_MIPSR6
+ " .set mips64r6 \n"
+#else
" .set mips64 \n"
- " dclz %0, %1 \n"
+#endif
+ " dclz %0, %1 \n"
" .set pop \n"
: "=r" (num)
: "r" (word));
diff --git a/arch/mips/include/asm/bootinfo.h b/arch/mips/include/asm/bootinfo.h
index 4d2cdea..6b1ebee 100644
--- a/arch/mips/include/asm/bootinfo.h
+++ b/arch/mips/include/asm/bootinfo.h
@@ -87,6 +87,7 @@
#define BOOT_MEM_ROM_DATA 2
#define BOOT_MEM_RESERVED 3
#define BOOT_MEM_INIT_RAM 4
+#define BOOT_MEM_INUSE 5
/*
* A memory map that's built upon what was determined
diff --git a/arch/mips/include/asm/cacheflush.h b/arch/mips/include/asm/cacheflush.h
index 69468de..04b8782 100644
--- a/arch/mips/include/asm/cacheflush.h
+++ b/arch/mips/include/asm/cacheflush.h
@@ -66,20 +66,20 @@
extern void (*flush_icache_range)(unsigned long start, unsigned long end);
extern void (*local_flush_icache_range)(unsigned long start, unsigned long end);
-extern void (*__flush_cache_vmap)(void);
+extern void (*__flush_cache_vmap)(unsigned long start, unsigned long end);
static inline void flush_cache_vmap(unsigned long start, unsigned long end)
{
if (cpu_has_dc_aliases)
- __flush_cache_vmap();
+ __flush_cache_vmap(start,end);
}
-extern void (*__flush_cache_vunmap)(void);
+extern void (*__flush_cache_vunmap)(unsigned long start, unsigned long end);
static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
{
if (cpu_has_dc_aliases)
- __flush_cache_vunmap();
+ __flush_cache_vunmap(start,end);
}
extern void copy_to_user_page(struct vm_area_struct *vma,
@@ -94,6 +94,9 @@
extern void (*flush_icache_all)(void);
extern void (*local_flush_data_cache_page)(void * addr);
extern void (*flush_data_cache_page)(unsigned long addr);
+extern void (*mips_flush_data_cache_range)(struct vm_area_struct *vma,
+ unsigned long vaddr, struct page *page, unsigned long addr,
+ unsigned long size);
/*
* This flag is used to indicate that the page pointed to by a pte
@@ -117,7 +120,8 @@
#define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
static inline void flush_kernel_dcache_page(struct page *page)
{
- BUG_ON(cpu_has_dc_aliases && PageHighMem(page));
+ if (cpu_has_dc_aliases || !cpu_has_ic_fills_f_dc)
+ __flush_dcache_page(page);
}
/*
diff --git a/arch/mips/include/asm/checksum.h b/arch/mips/include/asm/checksum.h
index ac3d2b8..f0df06b 100644
--- a/arch/mips/include/asm/checksum.h
+++ b/arch/mips/include/asm/checksum.h
@@ -16,61 +16,6 @@
#include <asm/uaccess.h>
/*
- * computes the checksum of a memory block at buff, length len,
- * and adds in "sum" (32-bit)
- *
- * returns a 32-bit number suitable for feeding into itself
- * or csum_tcpudp_magic
- *
- * this function must be called with even lengths, except
- * for the last fragment, which may be odd
- *
- * it's best to have buff aligned on a 32-bit boundary
- */
-__wsum csum_partial(const void *buff, int len, __wsum sum);
-
-__wsum __csum_partial_copy_user(const void *src, void *dst,
- int len, __wsum sum, int *err_ptr);
-
-/*
- * this is a new version of the above that records errors it finds in *errp,
- * but continues and zeros the rest of the buffer.
- */
-static inline
-__wsum csum_partial_copy_from_user(const void __user *src, void *dst, int len,
- __wsum sum, int *err_ptr)
-{
- might_fault();
- return __csum_partial_copy_user((__force void *)src, dst,
- len, sum, err_ptr);
-}
-
-/*
- * Copy and checksum to user
- */
-#define HAVE_CSUM_COPY_USER
-static inline
-__wsum csum_and_copy_to_user(const void *src, void __user *dst, int len,
- __wsum sum, int *err_ptr)
-{
- might_fault();
- if (access_ok(VERIFY_WRITE, dst, len))
- return __csum_partial_copy_user(src, (__force void *)dst,
- len, sum, err_ptr);
- if (len)
- *err_ptr = -EFAULT;
-
- return (__force __wsum)-1; /* invalid checksum */
-}
-
-/*
- * the same as csum_partial, but copies from user space (but on MIPS
- * we have just one address space, so this is identical to the above)
- */
-__wsum csum_partial_copy_nocheck(const void *src, void *dst,
- int len, __wsum sum);
-
-/*
* Fold a partial checksum without adding pseudo headers
*/
static inline __sum16 csum_fold(__wsum sum)
@@ -91,6 +36,177 @@
return (__force __sum16)sum;
}
+#ifdef CONFIG_CPU_MIPSR6
+
+extern __sum16 ip_fast_csum(const void *iph, unsigned int ihl);
+extern __sum16 ip_compute_csum(const void *buff, int len);
+
+#ifndef csum_tcpudp_nofold
+/*
+ * computes the checksum of the TCP/UDP pseudo-header
+ * returns a 16-bit checksum, already complemented
+ */
+extern __wsum
+csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len,
+ unsigned short proto, __wsum sum);
+#endif
+
+#ifndef csum_tcpudp_magic
+static inline __sum16
+csum_tcpudp_magic(__be32 saddr, __be32 daddr, unsigned short len,
+ unsigned short proto, __wsum sum)
+{
+ return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
+}
+#endif
+
+/*
+ * computes the checksum of a memory block at buff, length len,
+ * and adds in "sum" (32-bit)
+ *
+ * returns a 32-bit number suitable for feeding into itself
+ * or csum_tcpudp_magic
+ *
+ * this function must be called with even lengths, except
+ * for the last fragment, which may be odd
+ *
+ * it's best to have buff aligned on a 32-bit boundary
+ */
+extern __wsum csum_partial(const void *buff, int len, __wsum sum);
+
+/*
+ * the same as csum_partial, but copies from src while it
+ * checksums
+ *
+ * here even more important to align src and dst on a 32-bit (or even
+ * better 64-bit) boundary
+ */
+extern __wsum csum_partial_copy(const void *src, void *dst, int len, __wsum sum);
+
+/*
+ * the same as csum_partial_copy, but copies from user space.
+ *
+ * here even more important to align src and dst on a 32-bit (or even
+ * better 64-bit) boundary
+ */
+extern __wsum csum_partial_copy_from_user(const void __user *src, void *dst,
+ int len, __wsum sum, int *csum_err);
+
+#ifndef csum_partial_copy_nocheck
+#define csum_partial_copy_nocheck(src, dst, len, sum) \
+ csum_partial_copy((src), (dst), (len), (sum))
+#endif
+
+#else /* !CONFIG_CPU_MIPSR6 */
+
+/*
+ * computes the checksum of a memory block at buff, length len,
+ * and adds in "sum" (32-bit)
+ *
+ * returns a 32-bit number suitable for feeding into itself
+ * or csum_tcpudp_magic
+ *
+ * this function must be called with even lengths, except
+ * for the last fragment, which may be odd
+ *
+ * it's best to have buff aligned on a 32-bit boundary
+ */
+__wsum csum_partial(const void *buff, int len, __wsum sum);
+
+__wsum __csum_partial_copy_user(const void *src, void *dst,
+ int len, __wsum sum, int *err_ptr);
+#ifdef CONFIG_EVA
+__wsum __csum_partial_copy_fromuser(const void *src, void *dst,
+ int len, __wsum sum, int *err_ptr);
+__wsum __csum_partial_copy_touser(const void *src, void *dst,
+ int len, __wsum sum, int *err_ptr);
+#endif
+
+/*
+ * this is a new version of the above that records errors it finds in *errp,
+ * but continues and zeros the rest of the buffer.
+ */
+static inline
+__wsum csum_partial_copy_from_user(const void __user *src, void *dst, int len,
+ __wsum sum, int *err_ptr)
+{
+#ifndef CONFIG_EVA
+ might_fault();
+ return __csum_partial_copy_user((__force void *)src, dst,
+ len, sum, err_ptr);
+#else
+ if (segment_eq(get_fs(), KERNEL_DS))
+ return __csum_partial_copy_user((__force void *)src, dst,
+ len, sum, err_ptr);
+ else {
+ might_fault();
+ return __csum_partial_copy_fromuser((__force void *)src, dst,
+ len, sum, err_ptr);
+ }
+#endif
+}
+
+#define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER
+static inline
+__wsum csum_and_copy_from_user (const void __user *src, void *dst,
+ int len, __wsum sum, int *err_ptr)
+{
+ if (access_ok(VERIFY_READ, src, len))
+ return csum_partial_copy_from_user(src, dst, len, sum, err_ptr);
+
+ if (len)
+ *err_ptr = -EFAULT;
+
+ return sum;
+}
+
+/*
+ * Copy and checksum to user
+ */
+#define HAVE_CSUM_COPY_USER
+static inline
+__wsum csum_and_copy_to_user(const void *src, void __user *dst, int len,
+ __wsum sum, int *err_ptr)
+{
+ if (access_ok(VERIFY_WRITE, dst, len)) {
+#ifndef CONFIG_EVA
+ might_fault();
+ return __csum_partial_copy_user(src, (__force void *)dst,
+ len, sum, err_ptr);
+#else
+ if (segment_eq(get_fs(), KERNEL_DS))
+ return __csum_partial_copy_user(src, (__force void *)dst,
+ len, sum, err_ptr);
+ else {
+ might_fault();
+ return __csum_partial_copy_touser(src, (__force void *)dst,
+ len, sum, err_ptr);
+ }
+#endif
+ }
+ if (len)
+ *err_ptr = -EFAULT;
+
+ return (__force __wsum)-1; /* invalid checksum */
+}
+
+/*
+ * the same as csum_partial, but copies from user space (but on MIPS
+ * we have just one address space, so this is identical to the above)
+ */
+__wsum csum_partial_copy_nocheck(const void *src, void *dst,
+ int len, __wsum sum);
+
+/*
+ * this routine is used for miscellaneous IP-like checksums, mainly
+ * in icmp.c
+ */
+
+static inline __sum16 ip_compute_csum(const void *buff, int len)
+{
+ return csum_fold(csum_partial(buff, len, 0));
+}
+
/*
* This is a version of ip_compute_csum() optimized for IP headers,
* which always checksum on 4 octet boundaries.
@@ -183,14 +299,7 @@
return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
}
-/*
- * this routine is used for miscellaneous IP-like checksums, mainly
- * in icmp.c
- */
-static inline __sum16 ip_compute_csum(const void *buff, int len)
-{
- return csum_fold(csum_partial(buff, len, 0));
-}
+#endif /* !CONFIG_CPU_MIPSR6 */
#define _HAVE_ARCH_IPV6_CSUM
static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
diff --git a/arch/mips/include/asm/cmpxchg.h b/arch/mips/include/asm/cmpxchg.h
index 466069b..41055ef 100644
--- a/arch/mips/include/asm/cmpxchg.h
+++ b/arch/mips/include/asm/cmpxchg.h
@@ -35,19 +35,40 @@
: "memory");
} else if (kernel_uses_llsc) {
unsigned long dummy;
+#ifdef CONFIG_CPU_MIPSR6
+ register unsigned long tmp;
+#endif
do {
__asm__ __volatile__(
+#ifdef CONFIG_CPU_MIPSR6
+ " .set mips64r6 \n"
+#ifdef CONFIG_64BIT
+ " dla %2, %3 \n"
+#else
+ " la %2, %3 \n"
+#endif
+ " ll %0, 0(%2) # xchg_u32 \n"
+ " .set mips0 \n"
+ " move %1, %z4 \n"
+ " .set mips64r6 \n"
+ " sc %1, 0(%2) \n"
+ " .set mips0 \n"
+ : "=&r" (retval), "=&r" (dummy), "=&r" (tmp),
+ "+m" (*m)
+ : "Jr" (val));
+#else
" .set mips3 \n"
- " ll %0, %3 # xchg_u32 \n"
+ " ll %0, %3 # xchg_u32 \n"
" .set mips0 \n"
" move %2, %z4 \n"
" .set mips3 \n"
- " sc %2, %1 \n"
+ " sc %2, %1 \n"
" .set mips0 \n"
: "=&r" (retval), "=m" (*m), "=&r" (dummy)
: "R" (*m), "Jr" (val)
: "memory");
+#endif
} while (unlikely(!dummy));
} else {
unsigned long flags;
@@ -85,17 +106,36 @@
: "memory");
} else if (kernel_uses_llsc) {
unsigned long dummy;
+#ifdef CONFIG_CPU_MIPSR6
+ register unsigned long tmp;
+#endif
do {
__asm__ __volatile__(
+#ifdef CONFIG_CPU_MIPSR6
+ " .set mips64r6 \n"
+#ifdef CONFIG_64BIT
+ " dla %2, %3 \n"
+#else
+ " la %2, %3 \n"
+#endif
+ " lld %0, 0(%2) # xchg_u64 \n"
+ " move %1, %z4 \n"
+ " scd %1, 0(%2) \n"
+ " .set mips0 \n"
+ : "=&r" (retval), "=&r" (dummy), "=&r" (tmp),
+ "+m" (*m)
+ : "Jr" (val));
+#else
" .set mips3 \n"
- " lld %0, %3 # xchg_u64 \n"
+ " lld %0, %3 # xchg_u64 \n"
" move %2, %z4 \n"
" scd %2, %1 \n"
" .set mips0 \n"
: "=&r" (retval), "=m" (*m), "=&r" (dummy)
: "R" (*m), "Jr" (val)
: "memory");
+#endif
} while (unlikely(!dummy));
} else {
unsigned long flags;
@@ -137,6 +177,78 @@
#define __HAVE_ARCH_CMPXCHG 1
+#if defined(CONFIG_CPU_MIPSR6) && defined(CONFIG_64BIT)
+#define __cmpxchg_asm(ld, st, m, old, new) \
+({ \
+ __typeof(*(m)) __ret; \
+ register unsigned long tmp; \
+ \
+ if (kernel_uses_llsc) { \
+ __asm__ __volatile__( \
+ " .set push \n" \
+ " .set noat \n" \
+ " .set mips64r6 \n" \
+ "1: dla %1, %2 \n" \
+ ld " %0, 0(%1) # __cmpxchg_asm \n" \
+ " bne %0, %z3, 2f \n" \
+ " .set mips0 \n" \
+ " move $1, %z4 \n" \
+ " .set mips64r6 \n" \
+ " " st " $1, 0(%1) \n" \
+ " beqz $1, 1b \n" \
+ " .set pop \n" \
+ "2: \n" \
+ : "=&r" (__ret), "=&r" (tmp), "+m" (*m) \
+ : "Jr" (old), "Jr" (new)); \
+ } else { \
+ unsigned long __flags; \
+ \
+ raw_local_irq_save(__flags); \
+ __ret = *m; \
+ if (__ret == old) \
+ *m = new; \
+ raw_local_irq_restore(__flags); \
+ } \
+ \
+ __ret; \
+})
+#else
+#ifdef CONFIG_CPU_MIPSR6
+#define __cmpxchg_asm(ld, st, m, old, new) \
+({ \
+ __typeof(*(m)) __ret; \
+ register unsigned long tmp; \
+ \
+ if (kernel_uses_llsc) { \
+ __asm__ __volatile__( \
+ " .set push \n" \
+ " .set noat \n" \
+ " .set mips64r6 \n" \
+ "1: la %1, %2 \n" \
+ ld " %0, 0(%1) # __cmpxchg_asm \n" \
+ " bne %0, %z3, 2f \n" \
+ " .set mips0 \n" \
+ " move $1, %z4 \n" \
+ " .set mips64r6 \n" \
+ " " st " $1, 0(%1) \n" \
+ " beqz $1, 1b \n" \
+ " .set pop \n" \
+ "2: \n" \
+ : "=&r" (__ret), "=&r" (tmp), "+m" (*m) \
+ : "Jr" (old), "Jr" (new)); \
+ } else { \
+ unsigned long __flags; \
+ \
+ raw_local_irq_save(__flags); \
+ __ret = *m; \
+ if (__ret == old) \
+ *m = new; \
+ raw_local_irq_restore(__flags); \
+ } \
+ \
+ __ret; \
+})
+#else /* !CONFIG_CPU_MIPSR6 */
#define __cmpxchg_asm(ld, st, m, old, new) \
({ \
__typeof(*(m)) __ret; \
@@ -146,12 +258,12 @@
" .set push \n" \
" .set noat \n" \
" .set mips3 \n" \
- "1: " ld " %0, %2 # __cmpxchg_asm \n" \
+ "1: " ld " %0, %2 # __cmpxchg_asm \n" \
" bne %0, %z3, 2f \n" \
" .set mips0 \n" \
" move $1, %z4 \n" \
" .set mips3 \n" \
- " " st " $1, %1 \n" \
+ " " st " $1, %1 \n" \
" beqzl $1, 1b \n" \
"2: \n" \
" .set pop \n" \
@@ -163,12 +275,12 @@
" .set push \n" \
" .set noat \n" \
" .set mips3 \n" \
- "1: " ld " %0, %2 # __cmpxchg_asm \n" \
+ "1: " ld " %0, %2 # __cmpxchg_asm \n" \
" bne %0, %z3, 2f \n" \
" .set mips0 \n" \
" move $1, %z4 \n" \
" .set mips3 \n" \
- " " st " $1, %1 \n" \
+ " " st " $1, %1 \n" \
" beqz $1, 1b \n" \
" .set pop \n" \
"2: \n" \
@@ -187,6 +299,8 @@
\
__ret; \
})
+#endif
+#endif /* CONFIG_CPU_MIPSR6 */
/*
* This function doesn't exist, so you'll get a linker error
diff --git a/arch/mips/include/asm/compat.h b/arch/mips/include/asm/compat.h
index c4bd54a..b6e254a 100644
--- a/arch/mips/include/asm/compat.h
+++ b/arch/mips/include/asm/compat.h
@@ -298,7 +298,13 @@
static inline int is_compat_task(void)
{
+#ifdef CONFIG_MIPS32_O32
+ return test_thread_flag(TIF_32BIT_REGS);
+#elif defined(CONFIG_MIPS32_N32)
return test_thread_flag(TIF_32BIT_ADDR);
+#else
+#error No MIPS32 compatibility mode defined
+#endif /* CONFIG_MIPS32_O32 */
}
#endif /* _ASM_COMPAT_H */
diff --git a/arch/mips/include/asm/cpcregs.h b/arch/mips/include/asm/cpcregs.h
new file mode 100644
index 0000000..caa81ad
--- /dev/null
+++ b/arch/mips/include/asm/cpcregs.h
@@ -0,0 +1,80 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Cluster Power Controller Subsystem Register Definitions
+ *
+ * Copyright (C) 2013 Imagination Technologies Ltd
+ * Leonid Yegoshin (Leonid.Yegoshin@imgtec.com)
+ *
+ */
+#ifndef _ASM_CPCREGS_H
+#define _ASM_CPCREGS_H
+
+
+/* Offsets to major blocks within CPC from CPC base */
+#define CPC_GCB_OFS 0x0000 /* Global Control Block */
+#define CPC_CLCB_OFS 0x2000 /* Core Local Control Block */
+#define CPC_COCB_OFS 0x4000 /* Core Other Control Block */
+
+#define CPCGCBOFS(x) (CPC_##x##_OFS + CPC_GCB_OFS)
+#define CPCLCBOFS(x) (CPC_##x##_OFS + CPC_CLCB_OFS)
+#define CPCOCBOFS(x) (CPC_##x##_OFS + CPC_COCB_OFS)
+
+#define CPCGCB(x) REGP(_cpc_base, CPCGCBOFS(x))
+#define CPCLCB(x) REGP(_cpc_base, CPCLCBOFS(x))
+#define CPCOCB(x) REGP(_cpc_base, CPCOCBOFS(x))
+
+/* Global section registers offsets */
+#define CPC_CSRAPR_OFS 0x000
+#define CPC_SEQDELAY_OFS 0x008
+#define CPC_RAILDELAY_OFS 0x010
+#define CPC_RESETWIDTH_OFS 0x018
+#define CPC_REVID_OFS 0x020
+#define CPC_CLCTL_OFS 0x028
+#define CPC_PWRUP_OFS 0x030
+#define CPC_RESETST_OFS 0x040
+
+/* Local and Other Local sections registers offsets */
+#define CPC_CMD_OFS 0x000
+#define CPC_STATUS_OFS 0x008
+#define CPC_OTHER_OFS 0x010
+#define CPC_CCCTL_OFS 0x018
+#define CPC_LPACK_OFS 0x020
+#define CPC_VCRUN_OFS 0x028
+#define CPC_VCSPND_OFS 0x030
+#define CPC_RAMSLEEP_OFS 0x050
+
+/* Command and Status registers fields masks and offsets */
+
+#define CPCL_PWRUP_EVENT_MASK 0x00800000
+#define CPCL_PWRUP_EVENT_SH 23
+
+#define CPCL_STATUS_MASK 0x00780000
+#define CPCL_STATUS_SH 19
+#define CPCL_STATUS_U5 0x6
+#define CPCL_STATUS_U6 0x7
+
+
+#define CPCL_CLKGAT_IMPL_MASK 0x00020000
+#define CPCL_CLKGAT_IMPL_SH 17
+
+#define CPCL_PWRDN_IMPL_MASK 0x00010000
+#define CPCL_PWRDN_IMPL_SH 16
+
+#define CPCL_EJTAG_MASK 0x00008000
+#define CPCL_EJTAG_SH 15
+
+#define CPCL_IO_PWUP_POLICY_MASK 0x00000300
+#define CPCL_IO_PWUP_POLICY_SH 8
+
+#define CPCL_IO_TRFFC_EN_MASK 0x00000010
+#define CPCL_IO_TRFFC_EN_SH 4
+
+#define CPCL_CMD_MASK 0xf
+#define CPCL_CMD_SH 0
+
+extern int __init cpc_probe(unsigned long defaddr, unsigned long defsize);
+
+#endif /* _ASM_CPCREGS_H */
diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h
index e5ec8fc..a6a8e32 100644
--- a/arch/mips/include/asm/cpu-features.h
+++ b/arch/mips/include/asm/cpu-features.h
@@ -24,6 +24,12 @@
#ifndef cpu_has_tlb
#define cpu_has_tlb (cpu_data[0].options & MIPS_CPU_TLB)
#endif
+#ifndef cpu_has_tlbinv
+#define cpu_has_tlbinv (cpu_data[0].options & MIPS_CPU_TLBINV)
+#endif
+#ifndef cpu_has_tlbinv_full
+#define cpu_has_tlbinv_full (cpu_data[0].options & MIPS_CPU_TLBINV_FULL)
+#endif
#ifndef cpu_has_4kex
#define cpu_has_4kex (cpu_data[0].options & MIPS_CPU_4KEX)
#endif
@@ -69,7 +75,11 @@
#define cpu_has_cache_cdex_s (cpu_data[0].options & MIPS_CPU_CACHE_CDEX_S)
#endif
#ifndef cpu_has_prefetch
-#define cpu_has_prefetch (cpu_data[0].options & MIPS_CPU_PREFETCH)
+#ifdef CONFIG_CPU_MIPSR6
+#define cpu_has_prefetch (0)
+#else
+#define cpu_has_prefetch (cpu_data[0].options & MIPS_CPU_PREFETCH)
+#endif
#endif
#ifndef cpu_has_mcheck
#define cpu_has_mcheck (cpu_data[0].options & MIPS_CPU_MCHECK)
@@ -98,12 +108,28 @@
#ifndef cpu_has_rixi
#define cpu_has_rixi (cpu_data[0].options & MIPS_CPU_RIXI)
#endif
+#ifndef cpu_has_himem
+#define cpu_has_himem (cpu_data[0].options2 & MIPS_CPU_HIMEM)
+#endif
+#ifndef cpu_has_rixi_except
+#define cpu_has_rixi_except (cpu_data[0].options & MIPS_CPU_RIXI_EXCEPT)
+#endif
#ifndef cpu_has_mmips
-#define cpu_has_mmips (cpu_data[0].options & MIPS_CPU_MICROMIPS)
+# ifdef CONFIG_SYS_SUPPORTS_MICROMIPS
+# define cpu_has_mmips (cpu_data[0].options & MIPS_CPU_MICROMIPS)
+# else
+# define cpu_has_mmips 0
+# endif
#endif
#ifndef cpu_has_vtag_icache
#define cpu_has_vtag_icache (cpu_data[0].icache.flags & MIPS_CACHE_VTAG)
#endif
+#ifndef cpu_has_vtag_dcache
+#define cpu_has_vtag_dcache (cpu_data[0].dcache.flags & MIPS_CACHE_VTAG)
+#endif
+#ifndef cpu_has_ic_aliases
+#define cpu_has_ic_aliases (cpu_data[0].icache.flags & MIPS_CACHE_ALIASES)
+#endif
#ifndef cpu_has_dc_aliases
#define cpu_has_dc_aliases (cpu_data[0].dcache.flags & MIPS_CACHE_ALIASES)
#endif
@@ -116,6 +142,36 @@
#ifndef cpu_has_local_ebase
#define cpu_has_local_ebase 1
#endif
+#ifdef CONFIG_MIPS_CMP
+#ifndef cpu_has_cm2
+#define cpu_has_cm2 (cpu_data[0].options & MIPS_CPU_CM2)
+#endif
+#ifndef cpu_has_cm2_l2sync
+#define cpu_has_cm2_l2sync (cpu_data[0].options & MIPS_CPU_CM2_L2SYNC)
+#endif
+#else
+#define cpu_has_cm2 (0)
+#define cpu_has_cm2_l2sync (0)
+#endif
+#ifndef cpu_has_maar
+#define cpu_has_maar (cpu_data[0].options2 & MIPS_CPU_MAAR)
+#endif
+#ifndef cpu_has_fre
+#ifdef CONFIG_MIPS_INCOMPATIBLE_ARCH_EMULATION
+#define cpu_has_fre (cpu_data[0].options2 & MIPS_CPU_FRE)
+#else
+#define cpu_has_fre 0
+#endif
+#endif /* cpu_has_fre */
+#ifndef cpu_has_htw
+#define cpu_has_htw (cpu_data[0].options2 & MIPS_CPU_HTW)
+#endif
+#ifndef cpu_has_l2c
+#define cpu_has_l2c (cpu_data[0].options2 & MIPS_CPU_L2C)
+#endif
+#ifndef cpu_has_vcmt
+#define cpu_has_vcmt (cpu_data[0].options2 & MIPS_CPU_VC)
+#endif
/*
* I-Cache snoops remote store. This only matters on SMP. Some multiprocessors
@@ -155,30 +211,38 @@
# ifndef cpu_has_mips32r2
# define cpu_has_mips32r2 (cpu_data[0].isa_level & MIPS_CPU_ISA_M32R2)
# endif
+# ifndef cpu_has_mips32r6
+# define cpu_has_mips32r6 (cpu_data[0].isa_level & MIPS_CPU_ISA_M32R6)
+# endif
# ifndef cpu_has_mips64r1
# define cpu_has_mips64r1 (cpu_data[0].isa_level & MIPS_CPU_ISA_M64R1)
# endif
# ifndef cpu_has_mips64r2
# define cpu_has_mips64r2 (cpu_data[0].isa_level & MIPS_CPU_ISA_M64R2)
# endif
+# ifndef cpu_has_mips64r6
+# define cpu_has_mips64r6 (cpu_data[0].isa_level & MIPS_CPU_ISA_M64R6)
+# endif
/*
* Shortcuts ...
*/
-#define cpu_has_mips32 (cpu_has_mips32r1 | cpu_has_mips32r2)
-#define cpu_has_mips64 (cpu_has_mips64r1 | cpu_has_mips64r2)
+#define cpu_has_mips32 (cpu_has_mips32r1 | cpu_has_mips32r2 | cpu_has_mips32r6)
+#define cpu_has_mips64 (cpu_has_mips64r1 | cpu_has_mips64r2 | cpu_has_mips64r6)
#define cpu_has_mips_r1 (cpu_has_mips32r1 | cpu_has_mips64r1)
#define cpu_has_mips_r2 (cpu_has_mips32r2 | cpu_has_mips64r2)
+#define cpu_has_mips_r6 (cpu_has_mips32r6 | cpu_has_mips64r6)
#define cpu_has_mips_r (cpu_has_mips32r1 | cpu_has_mips32r2 | \
- cpu_has_mips64r1 | cpu_has_mips64r2)
+ cpu_has_mips64r1 | cpu_has_mips64r2 | \
+ cpu_has_mips32r6 | cpu_has_mips64r6)
#ifndef cpu_has_mips_r2_exec_hazard
-#define cpu_has_mips_r2_exec_hazard cpu_has_mips_r2
+#define cpu_has_mips_r2_exec_hazard (cpu_has_mips_r2 | cpu_has_mips_r6)
#endif
/*
* MIPS32, MIPS64, VR5500, IDT32332, IDT32334 and maybe a few other
- * pre-MIPS32/MIPS53 processors have CLO, CLZ. The IDT RC64574 is 64-bit and
+ * pre-MIPS32/MIPS64 processors have CLO, CLZ. The IDT RC64574 is 64-bit and
* has CLO and CLZ but not DCLO nor DCLZ. For 64-bit kernels
* cpu_has_clo_clz also indicates the availability of DCLO and DCLZ.
*/
@@ -186,6 +250,13 @@
# define cpu_has_clo_clz cpu_has_mips_r
# endif
+#ifdef CONFIG_CPU_MIPSR6
+
+#define cpu_has_dsp 0
+#define cpu_has_dsp2 0
+
+#else /* !CONFIG_CPU_MIPSR6 */
+
#ifndef cpu_has_dsp
#define cpu_has_dsp (cpu_data[0].ases & MIPS_ASE_DSP)
#endif
@@ -194,6 +265,8 @@
#define cpu_has_dsp2 (cpu_data[0].ases & MIPS_ASE_DSP2P)
#endif
+#endif /* CONFIG_CPU_MIPSR6 */
+
#ifndef cpu_has_mipsmt
#define cpu_has_mipsmt (cpu_data[0].ases & MIPS_ASE_MIPSMT)
#endif
@@ -202,6 +275,14 @@
#define cpu_has_userlocal (cpu_data[0].options & MIPS_CPU_ULRI)
#endif
+#ifndef cpu_has_segments
+#define cpu_has_segments (cpu_data[0].options & MIPS_CPU_SEGMENTS)
+#endif
+
+#ifndef cpu_has_eva
+#define cpu_has_eva (cpu_data[0].options & MIPS_CPU_EVA)
+#endif
+
#ifdef CONFIG_32BIT
# ifndef cpu_has_nofpuex
# define cpu_has_nofpuex (cpu_data[0].options & MIPS_CPU_NOFPUEX)
@@ -261,6 +342,10 @@
#define cpu_has_inclusive_pcaches (cpu_data[0].options & MIPS_CPU_INCLUSIVE_CACHES)
#endif
+#ifndef cpu_has_cm3_inclusive_pcaches
+#define cpu_has_cm3_inclusive_pcaches (cpu_data[0].options2 & MIPS_CPU_CM3_INCLUSIVE_CACHES)
+#endif
+
#ifndef cpu_dcache_line_size
#define cpu_dcache_line_size() cpu_data[0].dcache.linesz
#endif
@@ -283,4 +368,10 @@
#define cpu_has_vz (cpu_data[0].ases & MIPS_ASE_VZ)
#endif
+#if defined(CONFIG_CPU_HAS_MSA) && !defined(cpu_has_msa)
+# define cpu_has_msa (cpu_data[0].ases & MIPS_ASE_MSA)
+#elif !defined(cpu_has_msa)
+# define cpu_has_msa 0
+#endif
+
#endif /* __ASM_CPU_FEATURES_H */
diff --git a/arch/mips/include/asm/cpu-info.h b/arch/mips/include/asm/cpu-info.h
index 41401d8..10846b69 100644
--- a/arch/mips/include/asm/cpu-info.h
+++ b/arch/mips/include/asm/cpu-info.h
@@ -41,22 +41,28 @@
struct cpuinfo_mips {
unsigned int udelay_val;
unsigned int asid_cache;
+ unsigned int htw_level;
/*
* Capability and feature descriptor structure for MIPS CPU
*/
unsigned long options;
+ unsigned long options2;
unsigned long ases;
unsigned int processor_id;
unsigned int fpu_id;
+ unsigned int msa_id;
unsigned int cputype;
int isa_level;
int tlbsize;
- struct cache_desc icache; /* Primary I-cache */
- struct cache_desc dcache; /* Primary D or combined I/D cache */
- struct cache_desc scache; /* Secondary cache */
- struct cache_desc tcache; /* Tertiary/split secondary cache */
- int srsets; /* Shadow register sets */
+ int tlbsizevtlb;
+ int tlbsizeftlbsets;
+ int tlbsizeftlbways;
+ struct cache_desc icache; /* Primary I-cache */
+ struct cache_desc dcache; /* Primary D or combined I/D cache */
+ struct cache_desc scache; /* Secondary cache */
+ struct cache_desc tcache; /* Tertiary/split secondary cache */
+ int srsets; /* Shadow register sets */
int core; /* physical core number */
#ifdef CONFIG_64BIT
int vmbits; /* Virtual memory size in bits */
@@ -68,7 +74,8 @@
* exception resources, ASID spaces, etc, are common
* to all TCs within the same VPE.
*/
- int vpe_id; /* Virtual Processor number */
+ int vpe_id; /* Virtual Processor number in Core */
+ int g_vpe; /* Global Virtual Processor number in CM3*/
#endif
#ifdef CONFIG_MIPS_MT_SMTC
int tc_id; /* Thread Context number */
@@ -79,6 +86,9 @@
#define NUM_WATCH_REGS 4
u16 watch_reg_masks[NUM_WATCH_REGS];
unsigned int kscratch_mask; /* Usable KScratch mask. */
+ unsigned int segctl0; /* Memory Segmentation Control 0 */
+ unsigned int segctl1; /* Memory Segmentation Control 1 */
+ unsigned int segctl2; /* Memory Segmentation Control 2 */
} __attribute__((aligned(SMP_CACHE_BYTES)));
extern struct cpuinfo_mips cpu_data[];
@@ -89,6 +99,6 @@
extern void cpu_report(void);
extern const char *__cpu_name[];
-#define cpu_name_string() __cpu_name[smp_processor_id()]
+#define cpu_name_string() __cpu_name[raw_smp_processor_id()]
#endif /* __ASM_CPU_INFO_H */
diff --git a/arch/mips/include/asm/cpu.h b/arch/mips/include/asm/cpu.h
index dd86ab2..764fca8 100644
--- a/arch/mips/include/asm/cpu.h
+++ b/arch/mips/include/asm/cpu.h
@@ -79,6 +79,7 @@
* These are the PRID's for when 23:16 == PRID_COMP_MIPS
*/
+#define PRID_IMP_QEMU 0x0000
#define PRID_IMP_4KC 0x8000
#define PRID_IMP_5KC 0x8100
#define PRID_IMP_20KC 0x8200
@@ -97,6 +98,13 @@
#define PRID_IMP_1074K 0x9a00
#define PRID_IMP_M14KC 0x9c00
#define PRID_IMP_M14KEC 0x9e00
+#define PRID_IMP_INTERAPTIV_UP 0xa000
+#define PRID_IMP_INTERAPTIV_MP 0xa100
+#define PRID_IMP_PROAPTIV_UP 0xa200
+#define PRID_IMP_PROAPTIV_MP 0xa300
+#define PRID_IMP_VIRTUOSO 0xa700
+#define PRID_IMP_P5600 0xa800
+#define PRID_IMP_SAMURAI_UP 0xa900
/*
* These are the PRID's for when 23:16 == PRID_COMP_SIBYTE
@@ -265,18 +273,21 @@
CPU_4KC, CPU_4KEC, CPU_4KSC, CPU_24K, CPU_34K, CPU_1004K, CPU_74K,
CPU_ALCHEMY, CPU_PR4450, CPU_BMIPS32, CPU_BMIPS3300, CPU_BMIPS4350,
CPU_BMIPS4380, CPU_BMIPS5000, CPU_JZRISC, CPU_LOONGSON1, CPU_M14KC,
- CPU_M14KEC,
+ CPU_M14KEC, CPU_PROAPTIV, CPU_INTERAPTIV, CPU_VIRTUOSO,
+ CPU_P5600,
/*
* MIPS64 class processors
*/
CPU_5KC, CPU_5KE, CPU_20KC, CPU_25KF, CPU_SB1, CPU_SB1A, CPU_LOONGSON2,
CPU_CAVIUM_OCTEON, CPU_CAVIUM_OCTEON_PLUS, CPU_CAVIUM_OCTEON2,
- CPU_XLR, CPU_XLP,
+ CPU_XLR, CPU_XLP, CPU_SAMURAI,
+ CPU_QEMU,
CPU_LAST
};
+#define MIPS_FTLB_CAPABLE 0x1
/*
* ISA Level encodings
@@ -291,11 +302,14 @@
#define MIPS_CPU_ISA_M32R2 0x00000040
#define MIPS_CPU_ISA_M64R1 0x00000080
#define MIPS_CPU_ISA_M64R2 0x00000100
+#define MIPS_CPU_ISA_M32R6 0x00000200
+#define MIPS_CPU_ISA_M64R6 0x00000400
#define MIPS_CPU_ISA_32BIT (MIPS_CPU_ISA_I | MIPS_CPU_ISA_II | \
- MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2)
+ MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 |MIPS_CPU_ISA_M32R6)
#define MIPS_CPU_ISA_64BIT (MIPS_CPU_ISA_III | MIPS_CPU_ISA_IV | \
- MIPS_CPU_ISA_V | MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2)
+ MIPS_CPU_ISA_V | MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2 | \
+ MIPS_CPU_ISA_M64R6)
/*
* CPU Option encodings
@@ -325,6 +339,24 @@
#define MIPS_CPU_PCI 0x00400000 /* CPU has Perf Ctr Int indicator */
#define MIPS_CPU_RIXI 0x00800000 /* CPU has TLB Read/eXec Inhibit */
#define MIPS_CPU_MICROMIPS 0x01000000 /* CPU has microMIPS capability */
+#define MIPS_CPU_SEGMENTS 0x02000000 /* CPU supports memory segmentation */
+#define MIPS_CPU_EVA 0x04000000 /* CPU supports EVA functionality */
+#define MIPS_CPU_TLBINV 0x08000000 /* CPU supports TLBINV/F */
+#define MIPS_CPU_CM2 0x10000000 /* CPU has CM2 */
+#define MIPS_CPU_CM2_L2SYNC 0x20000000 /* CPU has CM2 L2-only SYNC feature */
+#define MIPS_CPU_TLBINV_FULL 0x40000000 /* CPU supports single TLBINV/F for full V/FTLB */
+#define MIPS_CPU_RIXI_EXCEPT 0x80000000 /* CPU has TLB Read/eXec Inhibit exceptions */
+
+/*
+ * CPU Option2 encodings
+ */
+#define MIPS_CPU_MAAR 0x00000001 /* MAAR exists */
+#define MIPS_CPU_HIMEM 0x00000002 /* MIPS32: PA bits exceed PTE space */
+#define MIPS_CPU_FRE 0x00000004 /* CPU has FRE support */
+#define MIPS_CPU_HTW 0x00000008 /* CPU support Hardware Page Table Walker */
+#define MIPS_CPU_L2C 0x00000010 /* CPU has _NO_ L2 description in Config2 */
+#define MIPS_CPU_VC 0x00000020 /* CPU has VC-MT support: GNR, CM3 virtual GIC etc */
+#define MIPS_CPU_CM3_INCLUSIVE_CACHES 0x00000040 /* L1D is included in L2 */
/*
* CPU ASE encodings
@@ -337,5 +369,6 @@
#define MIPS_ASE_MIPSMT 0x00000020 /* CPU supports MIPS MT */
#define MIPS_ASE_DSP2P 0x00000040 /* Signal Processing ASE Rev 2 */
#define MIPS_ASE_VZ 0x00000080 /* Virtualization ASE */
+#define MIPS_ASE_MSA 0x00000100 /* MIPS SIMD Architecture */
#endif /* _ASM_CPU_H */
diff --git a/arch/mips/include/asm/cputime.h b/arch/mips/include/asm/cputime.h
deleted file mode 100644
index c00eacb..0000000
--- a/arch/mips/include/asm/cputime.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __MIPS_CPUTIME_H
-#define __MIPS_CPUTIME_H
-
-#include <asm-generic/cputime.h>
-
-#endif /* __MIPS_CPUTIME_H */
diff --git a/arch/mips/include/asm/current.h b/arch/mips/include/asm/current.h
deleted file mode 100644
index 4c51401..0000000
--- a/arch/mips/include/asm/current.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/current.h>
diff --git a/arch/mips/include/asm/dsp.h b/arch/mips/include/asm/dsp.h
index 7bfad05..175517e 100644
--- a/arch/mips/include/asm/dsp.h
+++ b/arch/mips/include/asm/dsp.h
@@ -15,6 +15,8 @@
#include <asm/hazards.h>
#include <asm/mipsregs.h>
+#ifndef CONFIG_CPU_MIPSR6
+
#define DSP_DEFAULT 0x00000000
#define DSP_MASK 0x3f
@@ -82,4 +84,15 @@
tsk->thread.dsp.dspr; \
})
+#else
+
+#define __init_dsp(void) do { } while(0)
+#define init_dsp(void) do { } while(0)
+#define save_dsp(void) do { } while(0)
+#define restore_dsp(void) do { } while(0)
+#define __save_dsp(void) do { } while(0)
+#define __restore_dsp(void) do { } while(0)
+
+#endif /* CONFIG_CPU_MIPSR6 */
+
#endif /* _ASM_DSP_H */
diff --git a/arch/mips/include/asm/edac.h b/arch/mips/include/asm/edac.h
index 4da0c1f..5c5cbec 100644
--- a/arch/mips/include/asm/edac.h
+++ b/arch/mips/include/asm/edac.h
@@ -18,14 +18,17 @@
*/
__asm__ __volatile__ (
+#ifdef CONFIG_CPU_MIPSR6
+ " .set mips64r6 \n"
+#else
" .set mips2 \n"
+#endif
"1: ll %0, %1 # atomic_scrub \n"
" addu %0, $0 \n"
" sc %0, %1 \n"
" beqz %0, 1b \n"
" .set mips0 \n"
- : "=&r" (temp), "=m" (*virt_addr)
- : "m" (*virt_addr));
+ : "=&r" (temp), "+m" (*virt_addr));
virt_addr++;
}
diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
index cf3ae24..e493e22 100644
--- a/arch/mips/include/asm/elf.h
+++ b/arch/mips/include/asm/elf.h
@@ -8,6 +8,8 @@
#ifndef _ASM_ELF_H
#define _ASM_ELF_H
+#include <linux/fs.h>
+#include <uapi/linux/elf.h>
/* ELF header e_flags defines. */
/* MIPS architecture level. */
@@ -20,6 +22,8 @@
#define EF_MIPS_ARCH_64 0x60000000 /* MIPS64 code. */
#define EF_MIPS_ARCH_32R2 0x70000000 /* MIPS32 R2 code. */
#define EF_MIPS_ARCH_64R2 0x80000000 /* MIPS64 R2 code. */
+#define EF_MIPS_ARCH_32R6 0x90000000 /* MIPS32 R6 code. */
+#define EF_MIPS_ARCH_64R6 0xa0000000 /* MIPS64 R6 code. */
/* The ABI of a file. */
#define EF_MIPS_ABI_O32 0x00001000 /* O32 ABI. */
@@ -28,6 +32,7 @@
#define PT_MIPS_REGINFO 0x70000000
#define PT_MIPS_RTPROC 0x70000001
#define PT_MIPS_OPTIONS 0x70000002
+#define PT_MIPS_ABIFLAGS 0x70000003
/* Flags in the e_flags field of the header */
#define EF_MIPS_NOREORDER 0x00000001
@@ -36,6 +41,7 @@
#define EF_MIPS_ABI2 0x00000020
#define EF_MIPS_OPTIONS_FIRST 0x00000080
#define EF_MIPS_32BITMODE 0x00000100
+#define EF_MIPS_32BITMODE_FP64 0x00000200
#define EF_MIPS_ABI 0x0000f000
#define EF_MIPS_ARCH 0xf0000000
@@ -173,6 +179,36 @@
typedef double elf_fpreg_t;
typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
+struct mips_elf_abiflags_v0 {
+ uint16_t version; /* Version of flags structure */
+ uint8_t isa_level; /* The level of the ISA: 1-5, 32, 64 */
+ uint8_t isa_rev; /* The revision of ISA: 0 for MIPS V and below,
+ 1-n otherwise */
+ uint8_t gpr_size; /* The size of general purpose registers */
+ uint8_t cpr1_size; /* The size of co-processor 1 registers */
+ uint8_t cpr2_size; /* The size of co-processor 2 registers */
+ uint8_t fp_abi; /* The floating-point ABI */
+ uint32_t isa_ext; /* Mask of processor-specific extensions */
+ uint32_t ases; /* Mask of ASEs used */
+ uint32_t flags1; /* Mask of general flags */
+ uint32_t flags2;
+};
+
+#define MIPS_ABI_FP_UNKNOWN (-1) /* Unknown FP ABI (kernel internal) */
+#define MIPS_ABI_FP_NXX (-2) /* any nXX ABI */
+#define FP_DOUBLE_64A (-3) /* DOUBLE + 64A */
+#define FP_ERROR (-4) /* ERROR in match */
+
+#define MIPS_ABI_FP_ANY 0 /* FP ABI doesn't matter */
+#define MIPS_ABI_FP_DOUBLE 1 /* -mdouble-float */
+#define MIPS_ABI_FP_SINGLE 2 /* -msingle-float */
+#define MIPS_ABI_FP_SOFT 3 /* -msoft-float */
+#define MIPS_ABI_FP_OLD_64 4 /* -mips32r2 -mfp64 */
+#define MIPS_ABI_FP_XX 5 /* -mfpxx */
+#define MIPS_ABI_FP_64 6 /* -mips32r2 -mfp64 */
+#define MIPS_ABI_FP_64A 7 /* -mips32r2 -mfp64 -mno-odd-spreg */
+#define MIPS_ABI_FP_MAX 8
+
#ifdef CONFIG_32BIT
/*
@@ -228,6 +264,32 @@
#endif /* CONFIG_64BIT */
/*
+ * Ensure we don't load the incompatible architecture lib via uselib() -
+ * - verify FPU model.
+ */
+#define elf_lib_check_arch(hdr) \
+({ \
+ int __res = 1; \
+ struct elfhdr *__h = (hdr); \
+ \
+ if (!test_thread_local_flags(LTIF_FPU_FR)) { \
+ if ((__h->e_flags & EF_MIPS_ABI2) != 0) \
+ __res = 0; \
+ if (((__h->e_flags & EF_MIPS_ABI) != 0) && \
+ ((__h->e_flags & EF_MIPS_ABI) != EF_MIPS_ABI_O32)) \
+ __res = 0; \
+ if (__h->e_flags & EF_MIPS_32BITMODE_FP64) \
+ __res = 0; \
+ } else { \
+ if (((__h->e_flags & EF_MIPS_ABI) == 0) || \
+ ((__h->e_flags & EF_MIPS_ABI) == EF_MIPS_ABI_O32)) \
+ if (!(__h->e_flags & EF_MIPS_32BITMODE_FP64)) \
+ __res = 0; \
+ } \
+ __res; \
+})
+
+/*
* These are used to set parameters in the core dumps.
*/
#ifdef __MIPSEB__
@@ -247,11 +309,13 @@
#ifdef CONFIG_32BIT
-#define SET_PERSONALITY(ex) \
+#define SET_PERSONALITY2(ex, state) \
do { \
if (personality(current->personality) != PER_LINUX) \
set_personality(PER_LINUX); \
\
+ mips_set_personality_fp(state); \
+ \
current->thread.abi = &mips_abi; \
} while (0)
@@ -262,6 +326,9 @@
#ifdef CONFIG_MIPS32_N32
#define __SET_PERSONALITY32_N32() \
do { \
+ set_thread_local_flags(LTIF_FPU_FR); \
+ clear_thread_local_flags(LTIF_FPU_FRE); \
+ clear_thread_flag(TIF_32BIT_REGS); \
set_thread_flag(TIF_32BIT_ADDR); \
current->thread.abi = &mips_abi_n32; \
} while (0)
@@ -271,39 +338,50 @@
#endif
#ifdef CONFIG_MIPS32_O32
-#define __SET_PERSONALITY32_O32() \
+#define __SET_PERSONALITY32_O32(ex, state) \
do { \
+ if ((ex).e_flags & EF_MIPS_32BITMODE_FP64) \
+ set_thread_local_flags(LTIF_FPU_FR); \
+ else \
+ clear_thread_local_flags(LTIF_FPU_FR); \
+ clear_thread_local_flags(LTIF_FPU_FRE); \
+ \
set_thread_flag(TIF_32BIT_REGS); \
set_thread_flag(TIF_32BIT_ADDR); \
+ \
+ mips_set_personality_fp(state); \
+ \
current->thread.abi = &mips_abi_32; \
} while (0)
#else
-#define __SET_PERSONALITY32_O32() \
+#define __SET_PERSONALITY32_O32(ex, state) \
do { } while (0)
#endif
#ifdef CONFIG_MIPS32_COMPAT
-#define __SET_PERSONALITY32(ex) \
+#define __SET_PERSONALITY32(ex, state) \
do { \
if ((((ex).e_flags & EF_MIPS_ABI2) != 0) && \
((ex).e_flags & EF_MIPS_ABI) == 0) \
__SET_PERSONALITY32_N32(); \
else \
- __SET_PERSONALITY32_O32(); \
+ __SET_PERSONALITY32_O32(ex, state); \
} while (0)
#else
-#define __SET_PERSONALITY32(ex) do { } while (0)
+#define __SET_PERSONALITY32(ex, state) do { } while (0)
#endif
-#define SET_PERSONALITY(ex) \
+#define SET_PERSONALITY2(ex, state) \
do { \
unsigned int p; \
\
- clear_thread_flag(TIF_32BIT_REGS); \
+ set_thread_local_flags(LTIF_FPU_FR); \
+ clear_thread_local_flags(LTIF_FPU_FRE); \
+ clear_thread_flag(TIF_32BIT_REGS); \
clear_thread_flag(TIF_32BIT_ADDR); \
\
if ((ex).e_ident[EI_CLASS] == ELFCLASS32) \
- __SET_PERSONALITY32(ex); \
+ __SET_PERSONALITY32(ex, state); \
else \
current->thread.abi = &mips_abi; \
\
@@ -337,7 +415,9 @@
instruction set this cpu supports. This could be done in userspace,
but it's not easy, and we've already done it here. */
-#define ELF_HWCAP (0)
+#define ELF_HWCAP (elf_hwcap)
+extern unsigned int elf_hwcap;
+#include <asm/hwcap.h>
/*
* This yields a string that ld.so will use to load implementation
@@ -381,4 +461,27 @@
extern unsigned long arch_randomize_brk(struct mm_struct *mm);
#define arch_randomize_brk arch_randomize_brk
+/* duplicate */
+#define MIPS_ABI_FP_UNKNOWN (-1) /* Unknown FP ABI (kernel internal) */
+
+struct arch_elf_state {
+ int fp_abi;
+ int interp_fp_abi;
+ int overall_abi;
+};
+
+#define INIT_ARCH_ELF_STATE { \
+ .fp_abi = MIPS_ABI_FP_UNKNOWN, \
+ .interp_fp_abi = MIPS_ABI_FP_UNKNOWN, \
+ .overall_abi = MIPS_ABI_FP_UNKNOWN, \
+}
+
+extern int arch_elf_pt_proc(void *ehdr, void *phdr, struct file *elf,
+ bool is_interp, struct arch_elf_state *state);
+
+extern int arch_check_elf(void *ehdr, bool has_interpreter,
+ struct arch_elf_state *state);
+
+extern void mips_set_personality_fp(struct arch_elf_state *state);
+
#endif /* _ASM_ELF_H */
diff --git a/arch/mips/include/asm/emergency-restart.h b/arch/mips/include/asm/emergency-restart.h
deleted file mode 100644
index 108d8c4..0000000
--- a/arch/mips/include/asm/emergency-restart.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _ASM_EMERGENCY_RESTART_H
-#define _ASM_EMERGENCY_RESTART_H
-
-#include <asm-generic/emergency-restart.h>
-
-#endif /* _ASM_EMERGENCY_RESTART_H */
diff --git a/arch/mips/include/asm/fixmap.h b/arch/mips/include/asm/fixmap.h
index dfaaf493..7d05241 100644
--- a/arch/mips/include/asm/fixmap.h
+++ b/arch/mips/include/asm/fixmap.h
@@ -20,6 +20,7 @@
#include <asm/kmap_types.h>
#endif
+#ifndef CONFIG_EVA_3GB
/*
* Here we define all the compile-time 'special' virtual
* addresses. The point is to have a constant address at
@@ -46,7 +47,19 @@
* fix-mapped?
*/
enum fixed_addresses {
+
+/* must be <= 8, last_pkmap_nr_arr[] is initialized to 8 elements,
+ keep the total L1 size <= 512KB with 4 ways */
+#ifdef CONFIG_PAGE_SIZE_64KB
+#define FIX_N_COLOURS 2
+#endif
+#ifdef CONFIG_PAGE_SIZE_32KB
+#define FIX_N_COLOURS 4
+#endif
+#ifndef FIX_N_COLOURS
#define FIX_N_COLOURS 8
+#endif
+
FIX_CMAP_BEGIN,
#ifdef CONFIG_MIPS_MT_SMTC
FIX_CMAP_END = FIX_CMAP_BEGIN + (FIX_N_COLOURS * NR_CPUS * 2),
@@ -56,7 +69,7 @@
#ifdef CONFIG_HIGHMEM
/* reserved pte's for temporary kernel mappings */
FIX_KMAP_BEGIN = FIX_CMAP_END + 1,
- FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
+ FIX_KMAP_END = FIX_KMAP_BEGIN+(8*NR_CPUS*FIX_N_COLOURS)-1,
#endif
__end_of_fixed_addresses
};
@@ -115,3 +128,4 @@
#endif
+#endif
diff --git a/arch/mips/include/asm/fpregdef.h b/arch/mips/include/asm/fpregdef.h
index 429481f..f184ba0 100644
--- a/arch/mips/include/asm/fpregdef.h
+++ b/arch/mips/include/asm/fpregdef.h
@@ -14,6 +14,20 @@
#include <asm/sgidefs.h>
+/*
+ * starting with binutils 2.24.51.20140729, MIPS binutils warn about mixing
+ * hardfloat and softfloat object files. The kernel build uses soft-float by
+ * default, so we also need to pass -msoft-float along to GAS if it supports it.
+ * But this in turn causes assembler errors in files which access hardfloat
+ * registers. We detect if GAS supports "-msoft-float" in the Makefile and
+ * explicitly put ".set hardfloat" where floating point registers are touched.
+ */
+#ifdef GAS_HAS_SET_HARDFLOAT
+#define SET_HARDFLOAT .set hardfloat
+#else
+#define SET_HARDFLOAT
+#endif
+
#if _MIPS_SIM == _MIPS_SIM_ABI32
/*
diff --git a/arch/mips/include/asm/fpu.h b/arch/mips/include/asm/fpu.h
index d088e5d..519359e 100644
--- a/arch/mips/include/asm/fpu.h
+++ b/arch/mips/include/asm/fpu.h
@@ -20,6 +20,7 @@
#include <asm/hazards.h>
#include <asm/processor.h>
#include <asm/current.h>
+#include <asm/msa.h>
#ifdef CONFIG_MIPS_MT_FPAFF
#include <asm/mips_mt.h>
@@ -28,36 +29,21 @@
struct sigcontext;
struct sigcontext32;
-extern void fpu_emulator_init_fpu(void);
-extern void _init_fpu(void);
+extern void fpu_emulator_init_fpu(struct task_struct *target);
+extern int _init_fpu(void);
extern void _save_fp(struct task_struct *);
extern void _restore_fp(struct task_struct *);
-#define __enable_fpu() \
+/*
+ * This macro is used only to obtain FIR from FPU and it seems
+ * like a BUG in 34K with single FPU affinity to VPE0.
+ */
+#define __enable_fpu() \
do { \
set_c0_status(ST0_CU1); \
enable_fpu_hazard(); \
} while (0)
-#define __disable_fpu() \
-do { \
- clear_c0_status(ST0_CU1); \
- disable_fpu_hazard(); \
-} while (0)
-
-#define enable_fpu() \
-do { \
- if (cpu_has_fpu) \
- __enable_fpu(); \
-} while (0)
-
-#define disable_fpu() \
-do { \
- if (cpu_has_fpu) \
- __disable_fpu(); \
-} while (0)
-
-
#define clear_fpu_owner() clear_thread_flag(TIF_USEDFPU)
static inline int __is_fpu_owner(void)
@@ -70,52 +56,177 @@
return cpu_has_fpu && __is_fpu_owner();
}
-static inline void __own_fpu(void)
+static inline void thread_fpu_flags_update(void)
{
- __enable_fpu();
- KSTK_STATUS(current) |= ST0_CU1;
- set_thread_flag(TIF_USEDFPU);
+ if (current->mm)
+ change_thread_local_flags(LTIF_FPU_FR|LTIF_FPU_FRE,
+ current->mm->context.thread_flags & (LTIF_FPU_FR|LTIF_FPU_FRE));
}
-static inline void own_fpu_inatomic(int restore)
+static inline int __own_fpu(void)
{
+ int ret = 0;
+
+#if defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS32_R6) || defined(CONFIG_CPU_MIPS64)
+ u32 status;
+
+ thread_fpu_flags_update();
+
+ if (!test_thread_local_flags(LTIF_FPU_FR)) {
+ status = change_c0_status(ST0_CU1|ST0_FR,ST0_CU1);
+ enable_fpu_hazard();
+ if (read_c0_status() & ST0_FR) {
+ if (cpu_has_fre) {
+ set_c0_config5(MIPS_CONF5_FRE);
+ back_to_back_c0_hazard();
+ KSTK_STATUS(current) |= ST0_CU1|ST0_FR;
+ } else {
+ write_c0_status(status & ~ST0_CU1);
+ disable_fpu_hazard();
+ return(SIGFPE);
+ }
+ } else {
+ if (test_thread_local_flags(LTIF_FPU_FRE)) {
+ if (cpu_has_fre) {
+ set_c0_config5(MIPS_CONF5_FRE);
+ back_to_back_c0_hazard();
+ } else {
+ write_c0_status(status & ~ST0_CU1);
+ disable_fpu_hazard();
+ return(SIGFPE);
+ }
+ }
+ KSTK_STATUS(current) = (KSTK_STATUS(current) & ~ST0_FR) | ST0_CU1;
+ }
+ } else {
+ status = set_c0_status(ST0_CU1|ST0_FR);
+ enable_fpu_hazard();
+ if (!(read_c0_status() & ST0_FR)) {
+ write_c0_status(status & ~ST0_CU1);
+ disable_fpu_hazard();
+ return(SIGFPE);
+ }
+ if (cpu_has_fre) {
+ if (test_thread_local_flags(LTIF_FPU_FRE)) {
+ set_c0_config5(MIPS_CONF5_FRE);
+ back_to_back_c0_hazard();
+ } else {
+ clear_c0_config5(MIPS_CONF5_FRE);
+ back_to_back_c0_hazard();
+ }
+ } else if (test_thread_local_flags(LTIF_FPU_FRE)) {
+ write_c0_status(status & ~ST0_CU1);
+ disable_fpu_hazard();
+ return(SIGFPE);
+ }
+ KSTK_STATUS(current) |= ST0_CU1|ST0_FR;
+ }
+#else
+ thread_fpu_flags_update();
+
+ if (test_thread_local_flags(LTIF_FPU_FR))
+ return SIGFPE; /* core has no 64bit FPU, so ... */
+
+ set_c0_status(ST0_CU1);
+ KSTK_STATUS(current) |= ST0_CU1;
+ enable_fpu_hazard();
+#endif
+ set_thread_flag(TIF_USEDFPU);
+ return ret;
+}
+
+static inline int own_fpu_inatomic(int restore)
+{
+ int ret = 0;
+
if (cpu_has_fpu && !__is_fpu_owner()) {
- __own_fpu();
- if (restore)
+ ret =__own_fpu();
+ if (restore && !ret)
_restore_fp(current);
}
+ return ret;
}
-static inline void own_fpu(int restore)
+static inline int own_fpu(int restore)
{
+ int ret;
+
preempt_disable();
- own_fpu_inatomic(restore);
+ ret = own_fpu_inatomic(restore);
preempt_enable();
+
+ return ret;
+}
+
+static inline unsigned int fpu_get_fcr31(void)
+{
+ unsigned int cp1status = read_32bit_cp1_register(CP1_STATUS);
+
+#ifdef CONFIG_CPU_MIPSR6
+ cp1status |= (current->thread.fpu.fcr31 &
+ (FPU_CSR_COND0|FPU_CSR_COND1|FPU_CSR_COND2|FPU_CSR_COND3|
+ FPU_CSR_COND4|FPU_CSR_COND5|FPU_CSR_COND6|FPU_CSR_COND7));
+#endif
+ return cp1status;
+}
+
+static inline void lose_fpu_inatomic(int save)
+{
+ if (is_msa_enabled()) {
+ if (save) {
+ save_msa(current);
+ current->thread.fpu.fcr31 = fpu_get_fcr31();
+ }
+ disable_msa();
+ clear_thread_flag(TIF_USEDMSA);
+ } else if (is_fpu_owner()) {
+ if (save)
+ _save_fp(current);
+ }
+ clear_c0_status(ST0_CU1);
+ disable_fpu_hazard();
+ KSTK_STATUS(current) &= ~ST0_CU1;
+ clear_thread_flag(TIF_USEDFPU);
}
static inline void lose_fpu(int save)
{
preempt_disable();
- if (is_fpu_owner()) {
- if (save)
- _save_fp(current);
- KSTK_STATUS(current) &= ~ST0_CU1;
- clear_thread_flag(TIF_USEDFPU);
- __disable_fpu();
- }
+ lose_fpu_inatomic(save);
preempt_enable();
}
-static inline void init_fpu(void)
+static inline int init_fpu(void)
{
+ int ret = 0;
+
preempt_disable();
- if (cpu_has_fpu) {
- __own_fpu();
+ if (cpu_has_fpu && !(ret = __own_fpu()))
_init_fpu();
- } else {
- fpu_emulator_init_fpu();
- }
+ else
+ fpu_emulator_init_fpu(current);
+
preempt_enable();
+
+ set_used_math();
+
+ return ret;
+}
+
+static inline void init_fp_ctx(struct task_struct *target)
+{
+ /* If FP has been used then the target already has context */
+ if (used_math())
+ return;
+
+ fpu_emulator_init_fpu(target);
+
+ /*
+ * Record that the target has "used" math, such that the context
+ * just initialised, and any modifications made by the caller,
+ * aren't discarded.
+ */
+ set_used_math();
}
static inline void save_fp(struct task_struct *tsk)
@@ -130,7 +241,7 @@
_restore_fp(tsk);
}
-static inline fpureg_t *get_fpu_regs(struct task_struct *tsk)
+static inline union fpureg *get_fpu_regs(struct task_struct *tsk)
{
if (tsk == current) {
preempt_disable();
diff --git a/arch/mips/include/asm/fpu_emulator.h b/arch/mips/include/asm/fpu_emulator.h
index 2abb587..c2506ff 100644
--- a/arch/mips/include/asm/fpu_emulator.h
+++ b/arch/mips/include/asm/fpu_emulator.h
@@ -36,6 +36,8 @@
local_t cp1ops;
local_t cp1xops;
local_t errors;
+ local_t sprecision;
+ local_t dprecision;
};
DECLARE_PER_CPU(struct mips_fpu_emulator_stats, fpuemustats);
@@ -52,7 +54,7 @@
#endif /* CONFIG_DEBUG_FS */
extern int mips_dsemul(struct pt_regs *regs, mips_instruction ir,
- unsigned long cpc);
+ unsigned long cpc, unsigned long bpc, unsigned long r31);
extern int do_dsemulret(struct pt_regs *xcp);
extern int fpu_emulator_cop1Handler(struct pt_regs *xcp,
struct mips_fpu_struct *ctx, int has_fpu,
@@ -61,6 +63,13 @@
int mm_isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
unsigned long *contpc);
+#ifdef CONFIG_MIPS_INCOMPATIBLE_ARCH_EMULATION
+extern int mipsr2_emulation;
+extern const unsigned int fpucondbit[];
+int mipsr2_decoder(struct pt_regs *regs, u32 instruction);
+#else
+#define mipsr2_emulation 0
+#endif
/*
* Instruction inserted following the badinst to further tag the sequence
*/
diff --git a/arch/mips/include/asm/futex.h b/arch/mips/include/asm/futex.h
index 6ea1581..5660a6e 100644
--- a/arch/mips/include/asm/futex.h
+++ b/arch/mips/include/asm/futex.h
@@ -16,6 +16,40 @@
#include <asm/errno.h>
#include <asm/war.h>
+#ifndef CONFIG_EVA
+#ifdef CONFIG_CPU_MIPSR6
+#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \
+{ \
+ if (cpu_has_llsc) { \
+ __asm__ __volatile__( \
+ " .set push \n" \
+ " .set noat \n" \
+ " .set mips64r6 \n" \
+ "1: ll %1, %4 # __futex_atomic_op \n" \
+ " .set mips0 \n" \
+ " " insn " \n" \
+ " .set mips64r6 \n" \
+ "2: sc $1, %2 \n" \
+ " beqz $1, 1b \n" \
+ __WEAK_LLSC_MB \
+ "3: \n" \
+ " .set pop \n" \
+ " .set mips0 \n" \
+ " .section .fixup,\"ax\" \n" \
+ "4: li %0, %6 \n" \
+ " j 3b \n" \
+ " .previous \n" \
+ " .section __ex_table,\"a\" \n" \
+ " "__UA_ADDR "\t1b, 4b \n" \
+ " "__UA_ADDR "\t2b, 4b \n" \
+ " .previous \n" \
+ : "=r" (ret), "=&r" (oldval), "=R" (*uaddr) \
+ : "0" (0), "R" (*uaddr), "Jr" (oparg), "i" (-EFAULT) \
+ : "memory"); \
+ } else \
+ ret = -ENOSYS; \
+}
+#else /* !CONFIG_CPU_MIPSR6 */
#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \
{ \
if (cpu_has_llsc && R10000_LLSC_WAR) { \
@@ -73,6 +107,68 @@
} else \
ret = -ENOSYS; \
}
+#endif /* CONFIG_CPU_MIPSR6 */
+#else
+#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \
+{ \
+ if (cpu_has_llsc && R10000_LLSC_WAR) { \
+ __asm__ __volatile__( \
+ " .set push \n" \
+ " .set noat \n" \
+ " .set mips3 \n" \
+ "1: ll %1, %4 # __futex_atomic_op \n" \
+ " .set mips0 \n" \
+ " " insn " \n" \
+ " .set mips3 \n" \
+ "2: sc $1, %2 \n" \
+ " beqzl $1, 1b \n" \
+ __WEAK_LLSC_MB \
+ "3: \n" \
+ " .insn \n" \
+ " .set pop \n" \
+ " .set mips0 \n" \
+ " .section .fixup,\"ax\" \n" \
+ "4: li %0, %6 \n" \
+ " j 3b \n" \
+ " .previous \n" \
+ " .section __ex_table,\"a\" \n" \
+ " "__UA_ADDR "\t1b, 4b \n" \
+ " "__UA_ADDR "\t2b, 4b \n" \
+ " .previous \n" \
+ : "=r" (ret), "=&r" (oldval), "=R" (*uaddr) \
+ : "0" (0), "R" (*uaddr), "Jr" (oparg), "i" (-EFAULT) \
+ : "memory"); \
+ } else if (cpu_has_llsc) { \
+ __asm__ __volatile__( \
+ " .set push \n" \
+ " .set noat \n" \
+ " .set eva \n" \
+ "1: lle %1, %4 \n" \
+ " .set mips0 \n" \
+ " " insn " \n" \
+ " .set eva \n" \
+ "2: sce $1, %2 \n" \
+ " beqz $1, 1b \n" \
+ __WEAK_LLSC_MB \
+ "3: \n" \
+ " .insn \n" \
+ " .set pop \n" \
+ " .set mips0 \n" \
+ " .section .fixup,\"ax\" \n" \
+ "4: li %0, %6 \n" \
+ " j 3b \n" \
+ " .previous \n" \
+ " .section __ex_table,\"a\" \n" \
+ " "__UA_ADDR "\t1b, 4b \n" \
+ " "__UA_ADDR "\t2b, 4b \n" \
+ " .previous \n" \
+ : "=r" (ret), "=&r" (oldval), "=R" (*uaddr) \
+ : "0" (0), "R" (*uaddr), "Jr" (oparg), "i" (-EFAULT) \
+ : "memory"); \
+ } else \
+ ret = -ENOSYS; \
+}
+#endif
static inline int
futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
@@ -131,6 +227,7 @@
return ret;
}
+#ifndef CONFIG_EVA
static inline int
futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
u32 oldval, u32 newval)
@@ -173,12 +270,20 @@
"# futex_atomic_cmpxchg_inatomic \n"
" .set push \n"
" .set noat \n"
+#ifdef CONFIG_CPU_MIPSR6
+ " .set mips64r6 \n"
+#else
" .set mips3 \n"
+#endif
"1: ll %1, %3 \n"
" bne %1, %z4, 3f \n"
" .set mips0 \n"
" move $1, %z5 \n"
+#ifdef CONFIG_CPU_MIPSR6
+ " .set mips64r6 \n"
+#else
" .set mips3 \n"
+#endif
"2: sc $1, %2 \n"
" beqz $1, 1b \n"
__WEAK_LLSC_MB
@@ -201,6 +306,80 @@
*uval = val;
return ret;
}
+#else
+static inline int
+futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
+ u32 oldval, u32 newval)
+{
+ int ret = 0;
+ u32 val;
+
+ if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
+ return -EFAULT;
+
+ if (cpu_has_llsc && R10000_LLSC_WAR) {
+ __asm__ __volatile__(
+ "# futex_atomic_cmpxchg_inatomic \n"
+ " .set push \n"
+ " .set noat \n"
+ " .set mips3 \n"
+ "1: ll %1, %3 \n"
+ " bne %1, %z4, 3f \n"
+ " .set mips0 \n"
+ " move $1, %z5 \n"
+ " .set mips3 \n"
+ "2: sc $1, %2 \n"
+ " beqzl $1, 1b \n"
+ __WEAK_LLSC_MB
+ "3: \n"
+ " .insn \n"
+ " .set pop \n"
+ " .section .fixup,\"ax\" \n"
+ "4: li %0, %6 \n"
+ " j 3b \n"
+ " .previous \n"
+ " .section __ex_table,\"a\" \n"
+ " "__UA_ADDR "\t1b, 4b \n"
+ " "__UA_ADDR "\t2b, 4b \n"
+ " .previous \n"
+ : "+r" (ret), "=&r" (val), "=R" (*uaddr)
+ : "R" (*uaddr), "Jr" (oldval), "Jr" (newval), "i" (-EFAULT)
+ : "memory");
+ } else if (cpu_has_llsc) {
+ __asm__ __volatile__(
+ "# futex_atomic_cmpxchg_inatomic \n"
+ " .set push \n"
+ " .set noat \n"
+ " .set eva \n"
+ "1: lle %1, %3 \n"
+ " bne %1, %z4, 3f \n"
+ " .set mips0 \n"
+ " move $1, %z5 \n"
+ " .set eva \n"
+ "2: sce $1, %2 \n"
+ " beqz $1, 1b \n"
+ __WEAK_LLSC_MB
+ "3: \n"
+ " .insn \n"
+ " .set pop \n"
+ " .section .fixup,\"ax\" \n"
+ "4: li %0, %6 \n"
+ " j 3b \n"
+ " .previous \n"
+ " .section __ex_table,\"a\" \n"
+ " "__UA_ADDR "\t1b, 4b \n"
+ " "__UA_ADDR "\t2b, 4b \n"
+ " .previous \n"
+ : "+r" (ret), "=&r" (val), "=R" (*uaddr)
+ : "R" (*uaddr), "Jr" (oldval), "Jr" (newval), "i" (-EFAULT)
+ : "memory");
+ } else
+ return -ENOSYS;
+
+ *uval = val;
+ return ret;
+}
+#endif
#endif
#endif /* _ASM_FUTEX_H */
diff --git a/arch/mips/include/asm/fw/fw.h b/arch/mips/include/asm/fw/fw.h
index d6c50a7..c632702 100644
--- a/arch/mips/include/asm/fw/fw.h
+++ b/arch/mips/include/asm/fw/fw.h
@@ -14,11 +14,12 @@
fw_dontuse,
fw_code,
fw_free,
+ fw_inuse,
};
typedef struct {
unsigned long base; /* Within KSEG0 */
- unsigned int size; /* bytes */
+ unsigned long size; /* bytes */
enum fw_memtypes type; /* fw_memtypes */
} fw_memblock_t;
diff --git a/arch/mips/include/asm/gcmpregs.h b/arch/mips/include/asm/gcmpregs.h
index a7359f7..aae3dc1 100644
--- a/arch/mips/include/asm/gcmpregs.h
+++ b/arch/mips/include/asm/gcmpregs.h
@@ -27,14 +27,32 @@
#define GCMPGCBOFS(reg) GCMPOFS(GCB, GCB, reg)
#define GCMPGCBOFSn(reg, n) GCMPOFSn(GCB, GCB, reg, n)
#define GCMPCLCBOFS(reg) GCMPOFS(CLCB, CCB, reg)
+#define GCMPCLCBOFSn(reg, n) GCMPOFSn(CLCB, CCB, reg, n)
#define GCMPCOCBOFS(reg) GCMPOFS(COCB, CCB, reg)
+#define GCMPCOCBOFSn(reg, n) GCMPOFSn(COCB, CCB, reg, n)
#define GCMPGDBOFS(reg) GCMPOFS(GDB, GDB, reg)
/* GCMP register access */
#define GCMPGCB(reg) REGP(_gcmp_base, GCMPGCBOFS(reg))
+#define GCMPGCBhi(reg) REGP(_gcmp_base, (GCMPGCBOFS(reg) + 4))
+#define GCMPGCBlo(reg) GCMPGCB(reg)
+//#define GCMPGCBaddr(reg) REGA(_gcmp_base, GCMPGCBOFS(reg))
+#if defined(CONFIG_64BIT) || defined(CONFIG_64BIT_PHYS_ADDR)
+#define GCMPGCBaddr(reg) ((((phys_addr_t)(GCMPGCBhi(reg))) << 32) | \
+ GCMPGCBlo(reg))
+#define GCMPGCBaddrWrite(reg,val) (GCMPGCBhi(reg) = (u32)((phys_addr_t)(val) >> 32), \
+ GCMPGCBlo(reg) = (u32)(val))
+#else
+#define GCMPGCBaddr(reg) GCMPGCB(reg)
+#define GCMPGCBaddrWrite(reg,val) (GCMPGCB(reg) = (u32)(val))
+#endif
#define GCMPGCBn(reg, n) REGP(_gcmp_base, GCMPGCBOFSn(reg, n))
#define GCMPCLCB(reg) REGP(_gcmp_base, GCMPCLCBOFS(reg))
+#define GCMPCLCBn(reg, n) REGP(_gcmp_base, GCMPCLCBOFSn(reg, n))
#define GCMPCOCB(reg) REGP(_gcmp_base, GCMPCOCBOFS(reg))
+#define GCMPCOCBhi(reg) REGP(_gcmp_base, (GCMPCOCBOFS(reg) + 4))
+#define GCMPCOCBlo(reg) GCMPCOCB(reg)
+#define GCMPCOCBn(reg, n) REGP(_gcmp_base, GCMPCOCBOFSn(reg, n))
#define GCMPGDB(reg) REGP(_gcmp_base, GCMPGDBOFS(reg))
/* Mask generation */
@@ -51,33 +69,79 @@
#define GCMP_GCB_GC_NUMCORES_MSK GCMPGCBMSK(GC_NUMCORES, 8)
#define GCMP_GCB_GCMPB_OFS 0x0008 /* Global GCMP Base */
#define GCMP_GCB_GCMPB_GCMPBASE_SHF 15
-#define GCMP_GCB_GCMPB_GCMPBASE_MSK GCMPGCBMSK(GCMPB_GCMPBASE, 17)
+#ifndef CONFIG_64BIT
+#define GCMP_GCB_GCMPB_GCMPBASE_MSK GCMPGCBMSK(GCMPB_GCMPBASE, 17)
+#else
+#define GCMP_GCB_GCMPB_GCMPBASE_MSK GCMPGCBMSK(GCMPB_GCMPBASE, 32)
+#endif
#define GCMP_GCB_GCMPB_CMDEFTGT_SHF 0
#define GCMP_GCB_GCMPB_CMDEFTGT_MSK GCMPGCBMSK(GCMPB_CMDEFTGT, 2)
#define GCMP_GCB_GCMPB_CMDEFTGT_DISABLED 0
#define GCMP_GCB_GCMPB_CMDEFTGT_MEM 1
#define GCMP_GCB_GCMPB_CMDEFTGT_IOCU1 2
#define GCMP_GCB_GCMPB_CMDEFTGT_IOCU2 3
-#define GCMP_GCB_CCMC_OFS 0x0010 /* Global CM Control */
+#define GCMP_GCB_GCMC_OFS 0x0010 /* Global CM Control */
+#define GCMP_GCB_GCMC2_OFS 0x0018 /* Global CM Control2/CM3 Alt Control */
#define GCMP_GCB_GCSRAP_OFS 0x0020 /* Global CSR Access Privilege */
#define GCMP_GCB_GCSRAP_CMACCESS_SHF 0
#define GCMP_GCB_GCSRAP_CMACCESS_MSK GCMPGCBMSK(GCSRAP_CMACCESS, 8)
#define GCMP_GCB_GCMPREV_OFS 0x0030 /* GCMP Revision Register */
+#define GCMP_GCB_GCMPREV_MAJOR_SHF 8
+#define GCMP_GCB_GCMPREV_MAJOR_MSK GCMPGCBMSK(GCMPREV_MAJOR, 8)
+#define GCMP_GCB_GCMPREV_MINOR_SHF 0
+#define GCMP_GCB_GCMPREV_MINOR_MSK GCMPGCBMSK(GCMPREV_MINOR, 8)
+#define GCMP_GCB_GCMECTL_OFS 0x0038 /* Global CM3 Error Control */
#define GCMP_GCB_GCMEM_OFS 0x0040 /* Global CM Error Mask */
#define GCMP_GCB_GCMEC_OFS 0x0048 /* Global CM Error Cause */
#define GCMP_GCB_GMEC_ERROR_TYPE_SHF 27
#define GCMP_GCB_GMEC_ERROR_TYPE_MSK GCMPGCBMSK(GMEC_ERROR_TYPE, 5)
#define GCMP_GCB_GMEC_ERROR_INFO_SHF 0
-#define GCMP_GCB_GMEC_ERROR_INFO_MSK GCMPGCBMSK(GMEC_ERROR_INFO, 27)
+#define GCMP_GCB_GMEC_ERROR_INFO_MSK GCMPGCBMSK(GMEC_ERROR_INFO, 27)
#define GCMP_GCB_GCMEA_OFS 0x0050 /* Global CM Error Address */
#define GCMP_GCB_GCMEO_OFS 0x0058 /* Global CM Error Multiple */
#define GCMP_GCB_GMEO_ERROR_2ND_SHF 0
#define GCMP_GCB_GMEO_ERROR_2ND_MSK GCMPGCBMSK(GMEO_ERROR_2ND, 5)
-#define GCMP_GCB_GICBA_OFS 0x0080 /* Global Interrupt Controller Base Address */
+#define GCMP_GCB_GCMCUS_OFS 0x0060 /* GCR Custom Base */
+#define GCMP_GCB_GCMCST_OFS 0x0068 /* GCR Custom Status */
+#define GCMP_GCB_GCML2S_OFS 0x0070 /* Global L2 only Sync Register */
+#define GCMP_GCB_GCML2S_EN_SHF 0
+#define GCMP_GCB_GCML2S_EN_MSK GCMPGCBMSK(GCML2S_EN, 1)
+#define GCMP_GCB_GICBA_OFS 0x0080 /* Global Interrupt Controller Base Address */
#define GCMP_GCB_GICBA_BASE_SHF 17
#define GCMP_GCB_GICBA_BASE_MSK GCMPGCBMSK(GICBA_BASE, 15)
#define GCMP_GCB_GICBA_EN_SHF 0
#define GCMP_GCB_GICBA_EN_MSK GCMPGCBMSK(GICBA_EN, 1)
+#define GCMP_GCB_CPCBA_OFS 0x0088 /* CPC Base Address */
+#define GCMP_GCB_CPCBA_SHF 15
+#define GCMP_GCB_CPCBA_MSK GCMPGCBMSK(CPCBA, 17)
+#define GCMP_GCB_CPCBA_EN_SHF 0
+#define GCMP_GCB_CPCBA_EN_MSK GCMPGCBMSK(CPCBA_EN, 1)
+
+#define GCMP_GCB_GICST_OFS 0x00D0 /* Global Interrupt Controller Status */
+#define GCMP_GCB_GICST_EN_SHF 0
+#define GCMP_GCB_GICST_EN_MSK GCMPGCBMSK(GICST_EN, 1)
+#define GCMP_GCB_GCSHREV_OFS 0x00E0 /* Cache Revision */
+#define GCMP_GCB_CPCST_OFS 0x00F0 /* CPC Status */
+#define GCMP_GCB_CPCST_EN_SHF 0
+#define GCMP_GCB_CPCST_EN_MSK GCMPGCBMSK(CPCST_EN, 1)
+
+#define GCMP_GCB_IOCBASE_OFS 0x0100 /* IOCU Base Address */
+#define GCMP_GCB_IOST_OFS 0x0108 /* IOMMU Status */
+#define GCMP_GCB_G3CSRAP_OFS 0x0120 /* CM3 CSR Access Privilege Register */
+#define GCMP_GCB_L2CONFIG_OFS 0x0130 /* CM3 L3 Config */
+#define GCMP_GCB_L2CONFIG_ASSOC_SHF 0
+#define GCMP_GCB_L2CONFIG_ASSOC_MASK GCMPGCBMSK(L2CONFIG_ASSOC, 8)
+#define GCMP_GCB_L2CONFIG_LSIZE_SHF 8
+#define GCMP_GCB_L2CONFIG_LSIZE_MASK GCMPGCBMSK(L2CONFIG_LSIZE, 4)
+#define GCMP_GCB_L2CONFIG_SSIZE_SHF 12
+#define GCMP_GCB_L2CONFIG_SSIZE_MASK GCMPGCBMSK(L2CONFIG_SSIZE, 4)
+#define GCMP_GCB_L2CONFIG_BYPASS_SHF 20
+#define GCMP_GCB_L2CONFIG_BYPASS_MASK GCMPGCBMSK(L2CONFIG_BYPASS, 1)
+
+#define GCMP_GCB_SYSCONF_OFS 0x0140 /* CM3 SYS Config */
+#define GCMP_GCB_SYSCONF2_OFS 0x0150 /* CM3 SYS Config2 */
+#define GCMP_GCB_SYSCONF2_VPWIDTH_SHF 0
+#define GCMP_GCB_SYSCONF2_VPWIDTH_MASK GCMPGCBMSK(SYSCONF2_VPWIDTH, 4)
/* GCB Regions */
#define GCMP_GCB_CMxBASE_OFS(n) (0x0090+16*(n)) /* Global Region[0-3] Base Address */
@@ -93,15 +157,53 @@
#define GCMP_GCB_CMxMASK_CMREGTGT_IOCU1 2
#define GCMP_GCB_CMxMASK_CMREGTGT_IOCU2 3
+#define GCMP_GCB_GAOR0BA_OFS 0x0190 /* Attribute-Only Region0 Base Address */
+#define GCMP_GCB_GAOR0MASK_OFS 0x0198 /* Attribute-Only Region0 Mask */
+#define GCMP_GCB_GAOR1BA_OFS 0x01A0 /* Attribute-Only Region1 Base Address */
+#define GCMP_GCB_GAOR1MASK_OFS 0x01A8 /* Attribute-Only Region1 Mask */
+
+#define GCMP_GCB_IOCUREV_OFS 0x0200 /* IOCU Revision */
+
+#define GCMP_GCB_GAOR2BA_OFS 0x0210 /* Attribute-Only Region2 Base Address */
+#define GCMP_GCB_GAOR2MASK_OFS 0x0218 /* Attribute-Only Region2 Mask */
+#define GCMP_GCB_GAOR3BA_OFS 0x0220 /* Attribute-Only Region3 Base Address */
+#define GCMP_GCB_GAOR3MASK_OFS 0x0228 /* Attribute-Only Region3 Mask */
+#define GCMP_GCB_L2RAMCONF_OFS 0x0240 /* CM3 L2 RAM Configuration */
+#define GCMP_GCB_SCRATCH0_OFS 0x0280 /* CM3 Scratch 0 */
+#define GCMP_GCB_SCRATCH1_OFS 0x0288 /* CM3 Scratch 1 */
+#define GCMP_GCB_GCML2P_OFS 0x0300 /* L2 Prefetch Control */
+#define GCMP_GCB_GCML2P_PAGE_MASK 0xfffff000 /* ... page mask */
+#define GCMP_GCB_GCML2P_PFTEN 0x00000100 /* L2 Prefetch Enable */
+#define GCMP_GCB_GCML2P_NPFT 0x000000ff /* N.of L2 Prefetch */
+#define GCMP_GCB_GCML2PB_OFS 0x0308 /* L2 Prefetch Control B */
+#define GCMP_GCB_GCML2PB_CODE_PFTEN 0x00000100 /* L2 Code Prefetch Enable */
+#define GCMP_GCB_L2PREF_OFS 0x0320 /* CM3 L2 Prefetch Tuning */
+#define GCMP_GCB_L2PREFAT0_OFS 0x0340 /* CM3 L2 Prefetch Tuning A Tier 0 */
+#define GCMP_GCB_L2PREFBT0_OFS 0x0348 /* CM3 L2 Prefetch Tuning B Tier 0 */
+#define GCMP_GCB_L2PREFAT1_OFS 0x0360 /* CM3 L2 Prefetch Tuning A Tier 1 */
+#define GCMP_GCB_L2PREFBT1_OFS 0x0368 /* CM3 L2 Prefetch Tuning B Tier 1 */
+#define GCMP_GCB_L2PREFAT2_OFS 0x0380 /* CM3 L2 Prefetch Tuning A Tier 2 */
+#define GCMP_GCB_L2PREFBT2_OFS 0x0388 /* CM3 L2 Prefetch Tuning B Tier 2 */
+#define GCMP_GCB_L2PREFAT3_OFS 0x03A0 /* CM3 L2 Prefetch Tuning A Tier 3 */
+#define GCMP_GCB_L2PREFBT3_OFS 0x03A8 /* CM3 L2 Prefetch Tuning B Tier 3 */
+#define GCMP_GCB_L2PREFAT4_OFS 0x03C0 /* CM3 L2 Prefetch Tuning A Tier 4 */
+#define GCMP_GCB_L2PREFBT4_OFS 0x03C8 /* CM3 L2 Prefetch Tuning B Tier 4 */
+#define GCMP_GCB_L2TRCADDR_OFS 0x0600 /* CM3 L2 Tag RAM Cache OP Addr */
+#define GCMP_GCB_L2TRCST_OFS 0x0608 /* CM3 L2 Tag RAM Cache OP State */
+#define GCMP_GCB_L2DRCOP_OFS 0x0610 /* CM3 L2 DATA RAM Cache OP */
+#define GCMP_GCB_L2TDECCOP_OFS 0x0618 /* CM3 L2 Tag and DATA ECC Cache OP */
+#define GCMP_GCB_BEVBASE_OFS 0x0680 /* CM3 BEV Base */
/* Core local/Core other control block registers */
#define GCMP_CCB_RESETR_OFS 0x0000 /* Reset Release */
#define GCMP_CCB_RESETR_INRESET_SHF 0
#define GCMP_CCB_RESETR_INRESET_MSK GCMPCCBMSK(RESETR_INRESET, 16)
+#define GCMP_CCB_COHENB_OFS 0x0008 /* CM3 Coherence Enable */
#define GCMP_CCB_COHCTL_OFS 0x0008 /* Coherence Control */
#define GCMP_CCB_COHCTL_DOMAIN_SHF 0
#define GCMP_CCB_COHCTL_DOMAIN_MSK GCMPCCBMSK(COHCTL_DOMAIN, 8)
-#define GCMP_CCB_CFG_OFS 0x0010 /* Config */
+#define GCMP_CCB_COHCTL_DOMAIN_ENABLE (GCMP_CCB_COHCTL_DOMAIN_MSK)
+#define GCMP_CCB_CFG_OFS 0x0010 /* Config */
#define GCMP_CCB_CFG_IOCUTYPE_SHF 10
#define GCMP_CCB_CFG_IOCUTYPE_MSK GCMPCCBMSK(CFG_IOCUTYPE, 2)
#define GCMP_CCB_CFG_IOCUTYPE_CPU 0
@@ -115,11 +217,31 @@
#define GCMP_CCB_RESETBASE_OFS 0x0020 /* Reset Exception Base */
#define GCMP_CCB_RESETBASE_BEV_SHF 12
#define GCMP_CCB_RESETBASE_BEV_MSK GCMPCCBMSK(RESETBASE_BEV, 20)
+#define GCMP_CCB_RESETBASEEXT_OFS 0x0030 /* Reset Exception Base Extention */
+#define GCMP_CCB_RESETEXTBASE_BEV_SHF 20
+#define GCMP_CCB_RESETEXTBASE_BEV_MASK_MSK GCMPCCBMSK(RESETEXTBASE_BEV, 8)
+#define GCMP_CCB_RESETEXTBASE_LOWBITS_SHF 0
+#define GCMP_CCB_RESETEXTBASE_BEV_MASK_LOWBITS GCMPCCBMSK(RESETEXTBASE_LOWBITS, 20)
#define GCMP_CCB_ID_OFS 0x0028 /* Identification */
#define GCMP_CCB_DINTGROUP_OFS 0x0030 /* DINT Group Participate */
#define GCMP_CCB_DBGGROUP_OFS 0x0100 /* DebugBreak Group */
+#define GCMP_CCB_TCIDxPRI_OFS(n) (0x0040+8*(n)) /* TCID x PRIORITY */
+
extern int __init gcmp_probe(unsigned long, unsigned long);
-extern int __init gcmp_niocu(void);
extern void __init gcmp_setregion(int, unsigned long, unsigned long, int);
+#ifdef CONFIG_MIPS_CMP
+extern int __init gcmp_niocu(void);
+extern int gcmp_present;
+extern int gcmp3_present;
+#define GCR3_OTHER(core,vpe) ((core << 8) | vpe)
+#define GCR3_OTHER_CPU_DATA(cpu) ((cpu_data[cpu].core << 8) | cpu_data[cpu].vpe_id)
+#else
+#define gcmp_niocu(x) (0)
+#define gcmp_present (0)
+#define gcmp3_present (0)
+#endif
+extern unsigned long _gcmp_base;
+#define GCMP_L2SYNC_OFFSET 0x8100
+
#endif /* _ASM_GCMPREGS_H */
diff --git a/arch/mips/include/asm/gic.h b/arch/mips/include/asm/gic.h
index 7153b32..168b230 100644
--- a/arch/mips/include/asm/gic.h
+++ b/arch/mips/include/asm/gic.h
@@ -21,14 +21,18 @@
#define GIC_NUM_INTRS (24 + NR_CPUS * 2)
-#define MSK(n) ((1 << (n)) - 1)
+#define MSK(n) ((1UL << (n)) - 1)
#define REG32(addr) (*(volatile unsigned int *) (addr))
+#define REGUL(addr) (*(volatile unsigned long *) (addr))
#define REG(base, offs) REG32((unsigned long)(base) + offs##_##OFS)
#define REGP(base, phys) REG32((unsigned long)(base) + (phys))
+#define REGA(base, phys) REGUL((unsigned long)(base) + (phys))
/* Accessors */
#define GIC_REG(segment, offset) \
REG32(_gic_base + segment##_##SECTION_OFS + offset##_##OFS)
+#define GIC_REGhi(segment, offset) \
+ REG32(_gic_base + segment##_##SECTION_OFS + offset##_##OFS + 4)
#define GIC_REG_ADDR(segment, offset) \
REG32(_gic_base + segment##_##SECTION_OFS + offset)
@@ -159,6 +163,15 @@
(GIC_SH_INTR_MAP_TO_VPE_BASE_OFS + (32 * (intr)) + (((vpe) / 32) * 4))
#define GIC_SH_MAP_TO_VPE_REG_BIT(vpe) (1 << ((vpe) % 32))
+#define GIC_DINT_OFS 0x6000
+#define GIC_EJTAG_OFS 0x6010
+#define GIC_DTLOW_OFS 0x6020
+#define GIC_DTHI_OFS 0x6028
+#define GIC_DTEXT_OFS 0x6070
+#define GIC_DMODECONF_OFS 0x6080
+#define GIC_DINTGRP_OFS 0x6090
+#define GIC_DMSTATUS_OFS 0x60A0
+
/* Convert an interrupt number to a byte offset/bit for multi-word registers */
#define GIC_INTR_OFS(intr) (((intr) / 32)*4)
#define GIC_INTR_BIT(intr) ((intr) % 32)
@@ -194,10 +207,12 @@
#define GIC_VPE_WD_MAP_OFS 0x0040
#define GIC_VPE_COMPARE_MAP_OFS 0x0044
#define GIC_VPE_TIMER_MAP_OFS 0x0048
+#define GIC_VPE_FDEBUG_MAP_OFS 0x004c
#define GIC_VPE_PERFCTR_MAP_OFS 0x0050
#define GIC_VPE_SWINT0_MAP_OFS 0x0054
#define GIC_VPE_SWINT1_MAP_OFS 0x0058
#define GIC_VPE_OTHER_ADDR_OFS 0x0080
+#define GIC_VPE_ID_OFS 0x0088
#define GIC_VPE_WD_CONFIG0_OFS 0x0090
#define GIC_VPE_WD_COUNT0_OFS 0x0094
#define GIC_VPE_WD_INITIAL0_OFS 0x0098
@@ -208,7 +223,9 @@
#define GIC_VPE_EIC_SS(intr) \
(GIC_VPE_EIC_SHADOW_SET_BASE + (4 * intr))
-#define GIC_VPE_EIC_VEC_BASE 0x0800
+#define GIC_VPE_GCTR_OFFSET_OFS 0x0200
+
+#define GIC_VPE_EIC_VEC_BASE 0x0800
#define GIC_VPE_EIC_VEC(intr) \
(GIC_VPE_EIC_VEC_BASE + (4 * intr))
@@ -217,6 +234,9 @@
#define GIC_VPE_TENABLE_INT_31_0_OFS 0x1080
#define GIC_VPE_TENABLE_INT_63_32_OFS 0x1084
+#define GIC_VPE_DINT_OFS 0x3000
+#define GIC_VPE_DEBUG_BREAK_OFS 0x3080
+
/* User Mode Visible Section Register Map */
#define GIC_UMV_SH_COUNTER_31_00_OFS 0x0000
#define GIC_UMV_SH_COUNTER_63_32_OFS 0x0004
@@ -351,7 +371,7 @@
/* Local GIC interrupts. */
#define GIC_INT_TMR (GIC_CPU_INT5)
-#define GIC_INT_PERFCTR (GIC_CPU_INT5)
+#define GIC_INT_PERFCTR (GIC_CPU_INT4)
/* Add 2 to convert non-EIC hardware interrupt to EIC vector number. */
#define GIC_CPU_TO_VEC_OFFSET (2)
@@ -359,6 +379,8 @@
/* Mapped interrupt to pin X, then GIC will generate the vector (X+1). */
#define GIC_PIN_TO_VEC_OFFSET (1)
+#define GIC_ID(cpu) (cpu_data[cpu].g_vpe)
+
#include <linux/clocksource.h>
#include <linux/irq.h>
@@ -388,4 +410,5 @@
extern void gic_irq_ack(struct irq_data *d);
extern void gic_finish_irq(struct irq_data *d);
extern void gic_platform_init(int irqs, struct irq_chip *irq_controller);
+extern void vpe_gic_setup(void);
#endif /* _ASM_GICREGS_H */
diff --git a/arch/mips/include/asm/gt64120.h b/arch/mips/include/asm/gt64120.h
index 2e72abb..8aeb687 100644
--- a/arch/mips/include/asm/gt64120.h
+++ b/arch/mips/include/asm/gt64120.h
@@ -24,7 +24,7 @@
#include <asm/addrspace.h>
#include <asm/byteorder.h>
-#define MSK(n) ((1 << (n)) - 1)
+#define MSK(n) ((1UL << (n)) - 1)
/*
* Register offset addresses
diff --git a/arch/mips/include/asm/hazards.h b/arch/mips/include/asm/hazards.h
index e3ee92d..5593482 100644
--- a/arch/mips/include/asm/hazards.h
+++ b/arch/mips/include/asm/hazards.h
@@ -21,7 +21,7 @@
/*
* TLB hazards
*/
-#if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_CPU_CAVIUM_OCTEON)
+#if defined(CONFIG_CPU_MIPSR6) || (defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_CPU_CAVIUM_OCTEON))
/*
* MIPSR2 defines ehb for hazard avoidance
@@ -52,7 +52,41 @@
* rsp. 64-bit code, so can't be used without conditional compilation.
* The alterantive is switching the assembler to 64-bit code which happens
* to work right even for 32-bit code ...
+ *
+ * LY22: Bad news - new toolchain (binutils) starts producing warning
+ * if ABI=32 for DLA, even for MIPS64. So, multiple variants.
*/
+#if defined(CONFIG_CPU_MIPSR6) && defined(CONFIG_64BIT)
+#define instruction_hazard() \
+do { \
+ unsigned long tmp; \
+ \
+ __asm__ __volatile__( \
+ " .set push \n" \
+ " .set mips64r6 \n" \
+ " dla %0, 1f \n" \
+ " jr.hb %0 \n" \
+ " .set pop \n" \
+ "1: \n" \
+ : "=r" (tmp)); \
+} while (0)
+#else
+#ifdef CONFIG_CPU_MIPSR6
+#define instruction_hazard() \
+do { \
+ unsigned long tmp; \
+ \
+ __asm__ __volatile__( \
+ " .set push \n" \
+ " .set mips64r6 \n" \
+ " la %0, 1f \n" \
+ " jr.hb %0 \n" \
+ " .set pop \n" \
+ "1: \n" \
+ : "=r" (tmp)); \
+} while (0)
+#else /* !CONFIG_CPU_MIPSR6 */
+#ifdef CONFIG_64BIT
#define instruction_hazard() \
do { \
unsigned long tmp; \
@@ -65,6 +99,22 @@
"1: \n" \
: "=r" (tmp)); \
} while (0)
+#else
+#define instruction_hazard() \
+do { \
+ unsigned long tmp; \
+ \
+ __asm__ __volatile__( \
+ " .set mips64r2 \n" \
+ " la %0, 1f \n" \
+ " jr.hb %0 \n" \
+ " .set mips0 \n" \
+ "1: \n" \
+ : "=r" (tmp)); \
+} while (0)
+#endif
+#endif
+#endif
#elif (defined(CONFIG_CPU_MIPSR1) && !defined(CONFIG_MIPS_ALCHEMY)) || \
defined(CONFIG_CPU_BMIPS)
@@ -132,7 +182,7 @@
#define instruction_hazard() \
do { \
- if (cpu_has_mips_r2) \
+ if (cpu_has_mips_r2 || cpu_has_mips_r6) \
__instruction_hazard(); \
} while (0)
@@ -240,7 +290,7 @@
#define __disable_fpu_hazard
-#elif defined(CONFIG_CPU_MIPSR2)
+#elif defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
#define __enable_fpu_hazard \
___ehb
diff --git a/arch/mips/include/asm/highmem.h b/arch/mips/include/asm/highmem.h
index b0dd0c8..d0310dc0 100644
--- a/arch/mips/include/asm/highmem.h
+++ b/arch/mips/include/asm/highmem.h
@@ -37,11 +37,38 @@
* easily, subsequent pte tables have to be allocated in one physical
* chunk of RAM.
*/
+
+/* 8 colors pages are here */
+#ifdef CONFIG_PAGE_SIZE_4KB
+#define LAST_PKMAP 4096
+#endif
+#ifdef CONFIG_PAGE_SIZE_8KB
+#define LAST_PKMAP 2048
+#endif
+#ifdef CONFIG_PAGE_SIZE_16KB
#define LAST_PKMAP 1024
+#endif
+
+/* 32KB and 64KB pages should have 4 and 2 colors to keep space under control */
+#ifndef LAST_PKMAP
+#define LAST_PKMAP 1024
+#endif
+
#define LAST_PKMAP_MASK (LAST_PKMAP-1)
#define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT)
#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
+#define ARCH_PKMAP_COLORING 1
+#define set_pkmap_color(pg,cl) { cl = ((unsigned long)lowmem_page_address(pg) \
+ >> PAGE_SHIFT) & (FIX_N_COLOURS-1); }
+#define get_last_pkmap_nr(p,cl) (last_pkmap_nr_arr[cl])
+#define get_next_pkmap_nr(p,cl) (last_pkmap_nr_arr[cl] = \
+ ((p + FIX_N_COLOURS) & LAST_PKMAP_MASK))
+#define is_no_more_pkmaps(p,cl) (p < FIX_N_COLOURS)
+#define get_next_pkmap_counter(c,cl) (c - FIX_N_COLOURS)
+extern unsigned int last_pkmap_nr_arr[];
+
+
extern void * kmap_high(struct page *page);
extern void kunmap_high(struct page *page);
diff --git a/arch/mips/include/asm/io.h b/arch/mips/include/asm/io.h
index b7e5985..4bbea8d 100644
--- a/arch/mips/include/asm/io.h
+++ b/arch/mips/include/asm/io.h
@@ -162,6 +162,9 @@
#define virt_to_bus virt_to_phys
#define bus_to_virt phys_to_virt
+#define phys_to_bus(x) ((dma_addr_t)(x))
+#define bus_to_phys(x) ((phys_t)(x))
+
/*
* Change "struct page" to physical address.
*/
@@ -170,6 +173,11 @@
extern void __iomem * __ioremap(phys_t offset, phys_t size, unsigned long flags);
extern void __iounmap(const volatile void __iomem *addr);
+#ifndef CONFIG_PCI
+struct pci_dev;
+static inline void pci_iounmap(struct pci_dev *dev, void __iomem *addr) {}
+#endif
+
static inline void __iomem * __ioremap_mode(phys_t offset, unsigned long size,
unsigned long flags)
{
diff --git a/arch/mips/include/asm/irq_cpu.h b/arch/mips/include/asm/irq_cpu.h
index 3f11fdb..dbcadab 100644
--- a/arch/mips/include/asm/irq_cpu.h
+++ b/arch/mips/include/asm/irq_cpu.h
@@ -22,5 +22,6 @@
extern int mips_cpu_intc_init(struct device_node *of_node,
struct device_node *parent);
#endif
+extern unsigned mips_smp_c0_status_mask;
#endif /* _ASM_IRQ_CPU_H */
diff --git a/arch/mips/include/asm/irqflags.h b/arch/mips/include/asm/irqflags.h
index 45c0095..73df9bc 100644
--- a/arch/mips/include/asm/irqflags.h
+++ b/arch/mips/include/asm/irqflags.h
@@ -17,7 +17,7 @@
#include <linux/stringify.h>
#include <asm/hazards.h>
-#if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC)
+#if (defined(CONFIG_CPU_MIPSR6) || defined(CONFIG_CPU_MIPSR2)) && !defined(CONFIG_MIPS_MT_SMTC)
static inline void arch_local_irq_disable(void)
{
@@ -118,7 +118,7 @@
unsigned long arch_local_irq_save(void);
void arch_local_irq_restore(unsigned long flags);
void __arch_local_irq_restore(unsigned long flags);
-#endif /* if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC) */
+#endif /* if (defined(CONFIG_CPU_MIPSR6) || defined(CONFIG_CPU_MIPSR2)) && !defined(CONFIG_MIPS_MT_SMTC) */
extern void smtc_ipi_replay(void);
@@ -141,7 +141,7 @@
" ori $1, 0x400 \n"
" xori $1, 0x400 \n"
" mtc0 $1, $2, 1 \n"
-#elif defined(CONFIG_CPU_MIPSR2)
+#elif defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
" ei \n"
#else
" mfc0 $1,$12 \n"
diff --git a/arch/mips/include/asm/kspd.h b/arch/mips/include/asm/kspd.h
deleted file mode 100644
index ec68329..0000000
--- a/arch/mips/include/asm/kspd.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * Copyright (C) 2005 MIPS Technologies, Inc. All rights reserved.
- *
- * This program is free software; you can distribute it and/or modify it
- * under the terms of the GNU General Public License (Version 2) as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
- *
- */
-
-#ifndef _ASM_KSPD_H
-#define _ASM_KSPD_H
-
-struct kspd_notifications {
- void (*kspd_sp_exit)(int sp_id);
-
- struct list_head list;
-};
-
-static inline void kspd_notify(struct kspd_notifications *notify)
-{
-}
-
-#endif
diff --git a/arch/mips/include/asm/local.h b/arch/mips/include/asm/local.h
index d44622c..c74b048 100644
--- a/arch/mips/include/asm/local.h
+++ b/arch/mips/include/asm/local.h
@@ -35,10 +35,10 @@
__asm__ __volatile__(
" .set mips3 \n"
"1:" __LL "%1, %2 # local_add_return \n"
- " addu %0, %1, %3 \n"
+ __ADDU "%0, %1, %3 \n"
__SC "%0, %2 \n"
" beqzl %0, 1b \n"
- " addu %0, %1, %3 \n"
+ __ADDU "%0, %1, %3 \n"
" .set mips0 \n"
: "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
: "Ir" (i), "m" (l->a.counter)
@@ -47,12 +47,16 @@
unsigned long temp;
__asm__ __volatile__(
- " .set mips3 \n"
- "1:" __LL "%1, %2 # local_add_return \n"
- " addu %0, %1, %3 \n"
+#ifdef CONFIG_CPU_MIPSR6
+ " .set mips64r6 \n"
+#else
+ " .set mips3 \n"
+#endif
+ "1:" __LL "%1, %2 # local_add_return \n"
+ __ADDU "%0, %1, %3 \n"
__SC "%0, %2 \n"
" beqz %0, 1b \n"
- " addu %0, %1, %3 \n"
+ __ADDU "%0, %1, %3 \n"
" .set mips0 \n"
: "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
: "Ir" (i), "m" (l->a.counter)
@@ -80,10 +84,10 @@
__asm__ __volatile__(
" .set mips3 \n"
"1:" __LL "%1, %2 # local_sub_return \n"
- " subu %0, %1, %3 \n"
+ __SUBU "%0, %1, %3 \n"
__SC "%0, %2 \n"
" beqzl %0, 1b \n"
- " subu %0, %1, %3 \n"
+ __SUBU "%0, %1, %3 \n"
" .set mips0 \n"
: "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
: "Ir" (i), "m" (l->a.counter)
@@ -92,12 +96,16 @@
unsigned long temp;
__asm__ __volatile__(
+#ifdef CONFIG_CPU_MIPSR6
+ " .set mips64r6 \n"
+#else
" .set mips3 \n"
- "1:" __LL "%1, %2 # local_sub_return \n"
- " subu %0, %1, %3 \n"
+#endif
+ "1:" __LL "%1, %2 # local_sub_return \n"
+ __SUBU "%0, %1, %3 \n"
__SC "%0, %2 \n"
" beqz %0, 1b \n"
- " subu %0, %1, %3 \n"
+ __SUBU "%0, %1, %3 \n"
" .set mips0 \n"
: "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
: "Ir" (i), "m" (l->a.counter)
diff --git a/arch/mips/include/asm/local64.h b/arch/mips/include/asm/local64.h
deleted file mode 100644
index 36c93b5..0000000
--- a/arch/mips/include/asm/local64.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/local64.h>
diff --git a/arch/mips/include/asm/mach-ar7/spaces.h b/arch/mips/include/asm/mach-ar7/spaces.h
index ac28f27..660ab64 100644
--- a/arch/mips/include/asm/mach-ar7/spaces.h
+++ b/arch/mips/include/asm/mach-ar7/spaces.h
@@ -14,8 +14,11 @@
* This handles the memory map.
* We handle pages at KSEG0 for kernels with 32 bit address space.
*/
-#define PAGE_OFFSET 0x94000000UL
-#define PHYS_OFFSET 0x14000000UL
+#define PAGE_OFFSET _AC(0x94000000, UL)
+#define PHYS_OFFSET _AC(0x14000000, UL)
+
+#define UNCAC_BASE _AC(0xb4000000, UL) /* 0xa0000000 + PHYS_OFFSET */
+#define IO_BASE UNCAC_BASE
#include <asm/mach-generic/spaces.h>
diff --git a/arch/mips/include/asm/mach-generic/dma-coherence.h b/arch/mips/include/asm/mach-generic/dma-coherence.h
index fe23034..faa0144 100644
--- a/arch/mips/include/asm/mach-generic/dma-coherence.h
+++ b/arch/mips/include/asm/mach-generic/dma-coherence.h
@@ -14,19 +14,19 @@
static inline dma_addr_t plat_map_dma_mem(struct device *dev, void *addr,
size_t size)
{
- return virt_to_phys(addr);
+ return virt_to_bus(addr);
}
static inline dma_addr_t plat_map_dma_mem_page(struct device *dev,
struct page *page)
{
- return page_to_phys(page);
+ return phys_to_bus(page_to_phys(page));
}
static inline unsigned long plat_dma_addr_to_phys(struct device *dev,
dma_addr_t dma_addr)
{
- return dma_addr;
+ return bus_to_phys(dma_addr);
}
static inline void plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr,
@@ -62,7 +62,7 @@
#ifdef CONFIG_DMA_COHERENT
return 1;
#else
- return coherentio;
+ return (coherentio > 0);
#endif
}
diff --git a/arch/mips/include/asm/mach-generic/spaces.h b/arch/mips/include/asm/mach-generic/spaces.h
index 5b2f2e6..5e6eb21 100644
--- a/arch/mips/include/asm/mach-generic/spaces.h
+++ b/arch/mips/include/asm/mach-generic/spaces.h
@@ -6,6 +6,7 @@
* Copyright (C) 1994 - 1999, 2000, 03, 04 Ralf Baechle
* Copyright (C) 2000, 2002 Maciej W. Rozycki
* Copyright (C) 1990, 1999, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2015 Imagination Technologies
*/
#ifndef _ASM_MACH_GENERIC_SPACES_H
#define _ASM_MACH_GENERIC_SPACES_H
@@ -25,8 +26,12 @@
#else
#define CAC_BASE _AC(0x80000000, UL)
#endif
+#ifndef IO_BASE
#define IO_BASE _AC(0xa0000000, UL)
+#endif
+#ifndef UNCAC_BASE
#define UNCAC_BASE _AC(0xa0000000, UL)
+#endif
#ifndef MAP_BASE
#ifdef CONFIG_KVM_GUEST
@@ -49,7 +54,9 @@
#ifndef CAC_BASE
#ifdef CONFIG_DMA_NONCOHERENT
+#ifndef CONFIG_MIPS_CMP
#define CAC_BASE _AC(0x9800000000000000, UL)
+#endif
#else
#define CAC_BASE _AC(0xa800000000000000, UL)
#endif
@@ -93,4 +100,19 @@
#define FIXADDR_TOP ((unsigned long)(long)(int)0xfffe0000)
#endif
+#ifndef in_module
+/*
+ * If the Instruction Pointer is in module space (0xc0000000), return true;
+ * otherwise, it is in kernel space (0x80000000), return false.
+ *
+ * FIXME: This will not work when the kernel space and module space are the
+ * same. If they are the same, we need to modify scripts/recordmcount.pl,
+ * ftrace_make_nop/call() and the other related parts to ensure the
+ * enabling/disabling of the calling site to _mcount is right for both kernel
+ * and module.
+ *
+ */
+#define in_module(ip) (((unsigned long)ip) & 0x40000000)
+#endif
+
#endif /* __ASM_MACH_GENERIC_SPACES_H */
diff --git a/arch/mips/include/asm/mach-goldfish/hardware.h b/arch/mips/include/asm/mach-goldfish/hardware.h
new file mode 100644
index 0000000..cdde417
--- /dev/null
+++ b/arch/mips/include/asm/mach-goldfish/hardware.h
@@ -0,0 +1,36 @@
+/* include/asm-mips/mach-goldfish/hardware.h
+**
+** Copyright (C) 2007 Google, Inc.
+**
+** This software is licensed under the terms of the GNU General Public
+** License version 2, as published by the Free Software Foundation, and
+** may be copied, distributed, and modified under those terms.
+**
+** This program is distributed in the hope that it will be useful,
+** but WITHOUT ANY WARRANTY; without even the implied warranty of
+** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+** GNU General Public License for more details.
+**
+*/
+
+#ifndef __ASM_MACH_GOLDFISH_HARDWARE_H
+#define __ASM_ARCH_GOLDFISH_HARDWARE_H
+
+/*
+ * Where in virtual memory the IO devices (timers, system controllers
+ * and so on)
+ */
+#define IO_SIZE 0x00800000 // How much?
+#define IO_START 0x00000000 // PA of IO
+
+#define IO_ADDRESS(x) (ioremap(IO_START, IO_SIZE) + (x))
+
+/* For now... */
+#define dma_alloc_writecombine(dev, size, handle, gfp) \
+ dma_alloc_coherent(dev, size, handle, gfp)
+
+
+#define dma_free_writecombine(dev, size, cpu_addr, handle) \
+ dma_free_coherent(dev, size, cpu_addr, handle)
+
+#endif
diff --git a/arch/mips/include/asm/mach-goldfish/irq.h b/arch/mips/include/asm/mach-goldfish/irq.h
new file mode 100644
index 0000000..d211140
--- /dev/null
+++ b/arch/mips/include/asm/mach-goldfish/irq.h
@@ -0,0 +1,33 @@
+/* include/asm-mips/mach-goldfish/irq.h
+**
+** Copyright (C) 2007 Google, Inc.
+**
+** This software is licensed under the terms of the GNU General Public
+** License version 2, as published by the Free Software Foundation, and
+** may be copied, distributed, and modified under those terms.
+**
+** This program is distributed in the hope that it will be useful,
+** but WITHOUT ANY WARRANTY; without even the implied warranty of
+** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+** GNU General Public License for more details.
+**
+*/
+
+#ifndef __ASM_MACH_GOLDFISH_IRQ_H
+#define __ASM_MACH_GOLDFISH_IRQ_H
+
+/* 0..7 MIPS CPU interrupts */
+#define MIPS_CPU_IRQ_BASE 0
+#define MIPS_CPU_IRQ_PIC 2
+#define MIPS_CPU_IRQ_FIQ 3 /* Not used? */
+#define MIPS_CPU_IRQ_COMPARE 7
+
+/* 8..39 Cascaded Goldfish PIC interrupts */
+#define GOLDFISH_IRQ_BASE 8
+#define GOLDFISH_IRQ_PBUS 1
+#define GOLDFISH_IRQ_RTC 3
+#define GOLDFISH_IRQ_TTY 4
+
+#define NR_IRQS 40
+
+#endif
diff --git a/arch/mips/include/asm/mach-goldfish/time.h b/arch/mips/include/asm/mach-goldfish/time.h
new file mode 100644
index 0000000..d8de2b4
--- /dev/null
+++ b/arch/mips/include/asm/mach-goldfish/time.h
@@ -0,0 +1,18 @@
+/* include/asm-mips/mach-goldfish/time.h
+**
+** Copyright (C) 2008 MIPS Technologies, Inc.
+**
+** This software is licensed under the terms of the GNU General Public
+** License version 2, as published by the Free Software Foundation, and
+** may be copied, distributed, and modified under those terms.
+**
+** This program is distributed in the hope that it will be useful,
+** but WITHOUT ANY WARRANTY; without even the implied warranty of
+** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+** GNU General Public License for more details.
+**
+*/
+
+struct timespec;
+extern void save_time_delta(struct timespec *delta, struct timespec *rtc);
+extern void restore_time_delta(struct timespec *delta, struct timespec *rtc);
diff --git a/arch/mips/include/asm/mach-goldfish/war.h b/arch/mips/include/asm/mach-goldfish/war.h
new file mode 100644
index 0000000..e3b9820
--- /dev/null
+++ b/arch/mips/include/asm/mach-goldfish/war.h
@@ -0,0 +1,25 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2002, 2004, 2007 by Ralf Baechle <ralf@linux-mips.org>
+ */
+#ifndef __ASM_MIPS_MACH_GOLDFISH_WAR_H
+#define __ASM_MIPS_MACH_GOLDFISH_WAR_H
+
+#define R4600_V1_INDEX_ICACHEOP_WAR 0
+#define R4600_V1_HIT_CACHEOP_WAR 0
+#define R4600_V2_HIT_CACHEOP_WAR 0
+#define R5432_CP0_INTERRUPT_WAR 0
+#define BCM1250_M3_WAR 0
+#define SIBYTE_1956_WAR 0
+#define MIPS4K_ICACHE_REFILL_WAR 0
+#define MIPS_CACHE_SYNC_WAR 0
+#define TX49XX_ICACHE_INDEX_INV_WAR 0
+#define RM9000_CDEX_SMP_WAR 0
+#define ICACHE_REFILLS_WORKAROUND_WAR 0
+#define R10000_LLSC_WAR 0
+#define MIPS34K_MISSED_ITLB_WAR 0
+
+#endif /* __ASM_MIPS_MACH_GOLDFISH_WAR_H */
diff --git a/arch/mips/include/asm/mach-ip28/spaces.h b/arch/mips/include/asm/mach-ip28/spaces.h
index 5edf05d..5d6a764 100644
--- a/arch/mips/include/asm/mach-ip28/spaces.h
+++ b/arch/mips/include/asm/mach-ip28/spaces.h
@@ -11,11 +11,14 @@
#ifndef _ASM_MACH_IP28_SPACES_H
#define _ASM_MACH_IP28_SPACES_H
-#define CAC_BASE 0xa800000000000000
+#define CAC_BASE _AC(0xa800000000000000, UL)
-#define HIGHMEM_START (~0UL)
+#define HIGHMEM_START (~0UL)
-#define PHYS_OFFSET _AC(0x20000000, UL)
+#define PHYS_OFFSET _AC(0x20000000, UL)
+
+#define UNCAC_BASE _AC(0xc0000000, UL) /* 0xa0000000 + PHYS_OFFSET */
+#define IO_BASE UNCAC_BASE
#include <asm/mach-generic/spaces.h>
diff --git a/arch/mips/include/asm/mach-malta/kernel-entry-init.h b/arch/mips/include/asm/mach-malta/kernel-entry-init.h
index 0b793e7..1cfcf31 100644
--- a/arch/mips/include/asm/mach-malta/kernel-entry-init.h
+++ b/arch/mips/include/asm/mach-malta/kernel-entry-init.h
@@ -4,11 +4,144 @@
* for more details.
*
* Chris Dearman (chris@mips.com)
- * Copyright (C) 2007 Mips Technologies, Inc.
+ * Leonid Yegoshin (yegoshin@mips.com)
+ * Copyright (C) 2012 Mips Technologies, Inc.
*/
#ifndef __ASM_MACH_MIPS_KERNEL_ENTRY_INIT_H
#define __ASM_MACH_MIPS_KERNEL_ENTRY_INIT_H
+ .macro eva_entry t1 t2 t0
+ andi \t1, 0x7 /* Config.K0 == CCA */
+ move \t2, \t1
+ ins \t2, \t1, 16, 3
+
+#ifdef CONFIG_EVA_OLD_MALTA_MAP
+
+#ifdef CONFIG_EVA_3GB
+ li \t0, ((MIPS_SEGCFG_UK << MIPS_SEGCFG_AM_SHIFT) | \
+ (0 << MIPS_SEGCFG_PA_SHIFT) | (2 << MIPS_SEGCFG_C_SHIFT) | \
+ (1 << MIPS_SEGCFG_EU_SHIFT)) | \
+ (((MIPS_SEGCFG_MK << MIPS_SEGCFG_AM_SHIFT) | \
+ (0 << MIPS_SEGCFG_PA_SHIFT) | \
+ (1 << MIPS_SEGCFG_EU_SHIFT)) << 16)
+ ins \t0, \t1, 16, 3
+ mtc0 \t0, $5, 2
+#ifdef CONFIG_SMP
+ li \t0, ((MIPS_SEGCFG_MUSUK << MIPS_SEGCFG_AM_SHIFT) | \
+ (0 << MIPS_SEGCFG_PA_SHIFT) | \
+ (1 << MIPS_SEGCFG_EU_SHIFT)) | \
+ (((MIPS_SEGCFG_MUSUK << MIPS_SEGCFG_AM_SHIFT) | \
+ (0 << MIPS_SEGCFG_PA_SHIFT) | \
+ (1 << MIPS_SEGCFG_EU_SHIFT)) << 16)
+#else
+ li \t0, ((MIPS_SEGCFG_MUSUK << MIPS_SEGCFG_AM_SHIFT) | \
+ (0 << MIPS_SEGCFG_PA_SHIFT) | \
+ (1 << MIPS_SEGCFG_EU_SHIFT)) | \
+ (((MIPS_SEGCFG_MUSUK << MIPS_SEGCFG_AM_SHIFT) | \
+ (4 << MIPS_SEGCFG_PA_SHIFT) | \
+ (1 << MIPS_SEGCFG_EU_SHIFT)) << 16)
+#endif /* CONFIG_SMP */
+ or \t0, \t2
+ mtc0 \t0, $5, 3
+#else /* !CONFIG_EVA_3GB */
+ li \t0, ((MIPS_SEGCFG_MK << MIPS_SEGCFG_AM_SHIFT) | \
+ (0 << MIPS_SEGCFG_PA_SHIFT) | \
+ (1 << MIPS_SEGCFG_EU_SHIFT)) | \
+ (((MIPS_SEGCFG_MK << MIPS_SEGCFG_AM_SHIFT) | \
+ (0 << MIPS_SEGCFG_PA_SHIFT) | \
+ (1 << MIPS_SEGCFG_EU_SHIFT)) << 16)
+ or \t0, \t2
+ mtc0 \t0, $5, 2
+#ifdef CONFIG_SMP
+ li \t0, ((MIPS_SEGCFG_MUSUK << MIPS_SEGCFG_AM_SHIFT) | \
+ (0 << MIPS_SEGCFG_PA_SHIFT) | (2 << MIPS_SEGCFG_C_SHIFT) | \
+ (1 << MIPS_SEGCFG_EU_SHIFT)) | \
+ (((MIPS_SEGCFG_MUSUK << MIPS_SEGCFG_AM_SHIFT) | \
+ (0 << MIPS_SEGCFG_PA_SHIFT) | \
+ (1 << MIPS_SEGCFG_EU_SHIFT)) << 16)
+#else
+ li \t0, ((MIPS_SEGCFG_MUSUK << MIPS_SEGCFG_AM_SHIFT) | \
+ (0 << MIPS_SEGCFG_PA_SHIFT) | (2 << MIPS_SEGCFG_C_SHIFT) | \
+ (1 << MIPS_SEGCFG_EU_SHIFT)) | \
+ (((MIPS_SEGCFG_MUSUK << MIPS_SEGCFG_AM_SHIFT) | \
+ (4 << MIPS_SEGCFG_PA_SHIFT) | \
+ (1 << MIPS_SEGCFG_EU_SHIFT)) << 16)
+#endif /* CONFIG_SMP */
+ ins \t0, \t1, 16, 3
+ mtc0 \t0, $5, 3
+#endif /* CONFIG_EVA_3GB */
+ li \t0, ((MIPS_SEGCFG_MUSUK << MIPS_SEGCFG_AM_SHIFT) | \
+ (6 << MIPS_SEGCFG_PA_SHIFT) | \
+ (1 << MIPS_SEGCFG_EU_SHIFT)) | \
+ (((MIPS_SEGCFG_MUSUK << MIPS_SEGCFG_AM_SHIFT) | \
+ (4 << MIPS_SEGCFG_PA_SHIFT) | \
+ (1 << MIPS_SEGCFG_EU_SHIFT)) << 16)
+
+#else /* !CONFIG_EVA_OLD_MALTA_MAP */
+
+#ifdef CONFIG_EVA_3GB
+ li \t0, ((MIPS_SEGCFG_UK << MIPS_SEGCFG_AM_SHIFT) | \
+ (0 << MIPS_SEGCFG_PA_SHIFT) | (2 << MIPS_SEGCFG_C_SHIFT) | \
+ (1 << MIPS_SEGCFG_EU_SHIFT)) | \
+ (((MIPS_SEGCFG_MK << MIPS_SEGCFG_AM_SHIFT) | \
+ (0 << MIPS_SEGCFG_PA_SHIFT) | \
+ (1 << MIPS_SEGCFG_EU_SHIFT)) << 16)
+ ins \t0, \t1, 16, 3
+ mtc0 \t0, $5, 2
+ li \t0, ((MIPS_SEGCFG_MUSUK << MIPS_SEGCFG_AM_SHIFT) | \
+ (6 << MIPS_SEGCFG_PA_SHIFT) | \
+ (1 << MIPS_SEGCFG_EU_SHIFT)) | \
+ (((MIPS_SEGCFG_MUSUK << MIPS_SEGCFG_AM_SHIFT) | \
+ (5 << MIPS_SEGCFG_PA_SHIFT) | \
+ (1 << MIPS_SEGCFG_EU_SHIFT)) << 16)
+ or \t0, \t2
+ mtc0 \t0, $5, 3
+ li \t0, ((MIPS_SEGCFG_MUSUK << MIPS_SEGCFG_AM_SHIFT) | \
+ (3 << MIPS_SEGCFG_PA_SHIFT) | \
+ (1 << MIPS_SEGCFG_EU_SHIFT)) | \
+ (((MIPS_SEGCFG_MUSUK << MIPS_SEGCFG_AM_SHIFT) | \
+ (1 << MIPS_SEGCFG_PA_SHIFT) | \
+ (1 << MIPS_SEGCFG_EU_SHIFT)) << 16)
+#else /* !CONFIG_EVA_3GB */
+ li \t0, ((MIPS_SEGCFG_MK << MIPS_SEGCFG_AM_SHIFT) | \
+ (0 << MIPS_SEGCFG_PA_SHIFT) | \
+ (1 << MIPS_SEGCFG_EU_SHIFT)) | \
+ (((MIPS_SEGCFG_MK << MIPS_SEGCFG_AM_SHIFT) | \
+ (0 << MIPS_SEGCFG_PA_SHIFT) | \
+ (1 << MIPS_SEGCFG_EU_SHIFT)) << 16)
+ or \t0, \t2
+ mtc0 \t0, $5, 2
+ li \t0, ((MIPS_SEGCFG_UK << MIPS_SEGCFG_AM_SHIFT) | \
+ (0 << MIPS_SEGCFG_PA_SHIFT) | (2 << MIPS_SEGCFG_C_SHIFT) | \
+ (1 << MIPS_SEGCFG_EU_SHIFT)) | \
+ (((MIPS_SEGCFG_UK << MIPS_SEGCFG_AM_SHIFT) | \
+ (0 << MIPS_SEGCFG_PA_SHIFT) | \
+ (1 << MIPS_SEGCFG_EU_SHIFT)) << 16)
+ ins \t0, \t1, 16, 3
+ mtc0 \t0, $5, 3
+ li \t0, ((MIPS_SEGCFG_MUSUK << MIPS_SEGCFG_AM_SHIFT) | \
+ (2 << MIPS_SEGCFG_PA_SHIFT) | \
+ (1 << MIPS_SEGCFG_EU_SHIFT)) | \
+ (((MIPS_SEGCFG_MUSUK << MIPS_SEGCFG_AM_SHIFT) | \
+ (0 << MIPS_SEGCFG_PA_SHIFT) | \
+ (1 << MIPS_SEGCFG_EU_SHIFT)) << 16)
+#endif /* CONFIG_EVA_3GB */
+
+#endif /* CONFIG_EVA_OLD_MALTA_MAP */
+
+ or \t0, \t2
+ mtc0 \t0, $5, 4
+ jal mips_ihb
+
+ mfc0 \t0, $16, 5
+ li \t2, 0x40000000 /* K bit */
+ or \t0, \t0, \t2
+ mtc0 \t0, $16, 5
+ sync
+ jal mips_ihb
+ .endm
+
+
.macro kernel_entry_setup
#ifdef CONFIG_MIPS_MT_SMTC
mfc0 t0, CP0_CONFIG
@@ -40,13 +173,65 @@
.asciz "SMTC kernel requires the MT ASE to run\n"
__FINIT
0:
-#endif
+#endif /* CONFIG_MIPS_MT_SMTC */
+
+#ifdef CONFIG_EVA
+ sync
+ ehb
+
+ mfc0 t1, CP0_CONFIG
+ bgez t1, 9f
+ mfc0 t0, CP0_CONFIG, 1
+ bgez t0, 9f
+ mfc0 t0, CP0_CONFIG, 2
+ bgez t0, 9f
+ mfc0 t0, CP0_CONFIG, 3
+ sll t0, t0, 6 /* SC bit */
+ bgez t0, 9f
+
+ eva_entry t1 t2 t0
+ PTR_LA t0, mips_cca
+ sw t1, 0(t0)
+ b 0f
+
+9:
+ /* Assume we came from YAMON... */
+ PTR_LA v0, 0x9fc00534 /* YAMON print */
+ lw v0, (v0)
+ move a0, zero
+ PTR_LA a1, nonsc_processor
+ jal v0
+
+ PTR_LA v0, 0x9fc00520 /* YAMON exit */
+ lw v0, (v0)
+ li a0, 1
+ jal v0
+
+1: b 1b
+ nop
+
+ __INITDATA
+nonsc_processor:
+ .asciz "Kernel requires the Segment/EVA to run\n"
+ __FINIT
+#endif /* CONFIG_EVA */
+
+0:
.endm
/*
* Do SMP slave processor setup necessary before we can safely execute C code.
*/
.macro smp_slave_setup
+
+#ifdef CONFIG_EVA
+
+ sync
+ ehb
+ mfc0 t1, CP0_CONFIG
+ eva_entry t1 t2 t0
+#endif /* CONFIG_EVA */
+
.endm
#endif /* __ASM_MACH_MIPS_KERNEL_ENTRY_INIT_H */
diff --git a/arch/mips/include/asm/mach-malta/spaces.h b/arch/mips/include/asm/mach-malta/spaces.h
new file mode 100644
index 0000000..ea3c928
--- /dev/null
+++ b/arch/mips/include/asm/mach-malta/spaces.h
@@ -0,0 +1,155 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Author: Leonid Yegoshin (yegoshin@mips.com)
+ * Copyright (C) 2012 MIPS Technologies, Inc.
+ */
+
+#ifndef _ASM_MALTA_SPACES_H
+#define _ASM_MALTA_SPACES_H
+
+#ifdef CONFIG_EVA
+
+#ifdef CONFIG_EVA_OLD_MALTA_MAP
+
+/* Classic (old) Malta board EVA memory map with system controller RocIt2-1.418
+
+ This memory map is traditional but IOCU works only with mirroring 256MB memory
+ and effectively can't be used with EVA.
+
+ Phys memory - 80000000 to ffffffff - 2GB (last 64KB reserved for
+ correct HIGHMEM macros arithmetics)
+ KV memory - 0 - 7fffffff (2GB) or even higher
+ Kernel code is located in the same place (80000000) just for keeping
+ the same YAMON and other stuff, so KSEG0 is used "illegally",
+ using Malta mirroring of 1st 256MB (see also __pa_symbol below)
+ for SMP kernel and direct map to 80000000 for non-SMP.
+ SMP kernel requires that because of way how YAMON starts
+ secondary cores.
+ IO/UNCAC_ADDR ... well, even KSEG1 or KSEG3 but physaddr is 0UL (256MB-512MB)
+ CAC_ADDR ... it used to revert effect of UNCAC_ADDR
+ VMALLOC is cut to C0000000 to E0000000 (KSEG2)
+ PKMAP/kmap_coherent should be not used - no HIGHMEM
+
+ PCI bus:
+ PCI devices are located in 256MB-512MB of PCI space,
+ phys memory window is located in 2GB-4GB of PCI space.
+
+ Note: CONFIG_EVA_3GB actually gives only 2GB but it shifts IO space up to KSEG3
+ It is done as a preparation work for non-Malta boards.
+ */
+
+#define PAGE_OFFSET _AC(0x0, UL)
+#define PHYS_OFFSET _AC(0x80000000, UL)
+#define HIGHMEM_START _AC(0xffff0000, UL)
+
+/* trick definition, just to use kernel symbols from KSEG0 but keep
+ all dynamic memory in EVA's MUSUK KUSEG segment (=0x80000000) - I am lazy
+ to move kernel code from 80000000 to zero
+ Don't copy it for other boards, it is likely to have a different kernel
+ location */
+#define __pa_symbol(x) (RELOC_HIDE((unsigned long)(x), 0))
+
+#define YAMON_BASE _AC(0x80000000, UL)
+
+#else /* !CONFIG_EVA_OLD_MALTA_MAP */
+
+/* New Malta board EVA memory map basic's:
+
+ This map is designed for work with IOCU on Malta.
+ IOCU on Malta can't support mirroring of first 256MB in old memory map.
+
+ Phys memory - 00000000 to ffffffff - up to 4GB
+ (last 64KB reserved for correct HIGHMEM macros arithmetics,
+ memory hole 256M-512M is for I/O registers and PCI)
+ For EVA_3GB the first 512MB are not used, let's use 4GB memory.
+ KV memory - 0 - 7fffffff (2GB) or even higher,
+ Kernel code is located in the same place (80000000) just for keeping
+ the same YAMON and other stuff, at least for now.
+ Need to be copied for 3GB configurations.
+ IO/UNCAC_ADDR ... well, even KSEG1 or KSEG3 but physaddr is 0UL (256MB-512MB)
+ CAC_ADDR ... it used to revert effect of UNCAC_ADDR
+ VMALLOC is cut to C0000000 to E0000000 (KSEG2)
+ PKMAP/kmap_coherent should be not used - no HIGHMEM
+
+ PCI bus:
+ PCI devices are located in 2GB + (256MB-512MB) of PCI space (non-transparent)
+ phys memory window is located in 0GB-2GB of PCI space.
+
+ Note: 3GB configuration doesn't work until PCI bridges loop problem is fixed
+ and that code is not finished yet (loop was discovered after work done)
+ */
+
+#define PAGE_OFFSET _AC(0x0, UL)
+
+#ifdef CONFIG_EVA_3GB
+/* skip first 512MB */
+#define PHYS_OFFSET _AC(0x20000000, UL)
+#else
+#define PHYS_OFFSET _AC(0x0, UL)
+#define YAMON_BASE _AC(0x80000000, UL)
+#endif
+
+#define HIGHMEM_START _AC(0xffff0000, UL)
+
+/* trick definition, just to use kernel symbols from KSEG0 but keep
+ all dynamic memory in EVA's MUSUK KUSEG segment - I am lazy
+ to move kernel code from 80000000 to zero
+ Don't copy it for other boards, it is likely to have a different kernel
+ location */
+#define __pa_symbol(x) __pa(CPHYSADDR(RELOC_HIDE((unsigned long)(x), 0)))
+
+#endif /* CONFIG_EVA_OLD_MALTA_MAP */
+
+/* I put INDEX_BASE here to underline the fact that in EVA mode kernel
+ may be located somethere and not in CKSEG0, so CKSEG0 may have
+ a "surprise" location and index-based CACHE may give unexpected result */
+#define INDEX_BASE CKSEG0
+
+/*
+ * If the Instruction Pointer is in module space (0xc0000000), return true;
+ * otherwise, it is in kernel space (0x80000000), return false.
+ *
+ * FIXME: This will not work when the kernel space and module space are the
+ * same. If they are the same, we need to modify scripts/recordmcount.pl,
+ * ftrace_make_nop/call() and the other related parts to ensure the
+ * enabling/disabling of the calling site to _mcount is right for both kernel
+ * and module.
+ *
+ * It must be changed for 3.5GB memory map. LY22
+ */
+#define in_module(ip) (((unsigned long)ip) & 0x40000000)
+
+#ifdef CONFIG_EVA_3GB
+
+#define UNCAC_BASE _AC(0xe0000000, UL)
+#define IO_BASE UNCAC_BASE
+
+#define KSEG
+#define KUSEG 0x00000000
+#define KSEG0 0x80000000
+#define KSEG3 0xa0000000
+#define KSEG2 0xc0000000
+#define KSEG1 0xe0000000
+
+#define CKUSEG 0x00000000
+#define CKSEG0 0x80000000
+#define CKSEG3 0xa0000000
+#define CKSEG2 _AC(0xc0000000, UL)
+#define CKSEG1 0xe0000000
+
+#define MAP_BASE CKSEG2
+#define VMALLOC_END (MAP_BASE + _AC(0x20000000, UL) - 2*PAGE_SIZE)
+
+#endif /* CONFIG_EVA_3GB */
+
+#define IO_SIZE _AC(0x10000000, UL)
+#define IO_SHIFT _AC(0x10000000, UL)
+
+#endif /* CONFIG_EVA */
+
+#include <asm/mach-generic/spaces.h>
+
+#endif /* __ASM_MALTA_SPACES_H */
diff --git a/arch/mips/include/asm/mips-boards/generic.h b/arch/mips/include/asm/mips-boards/generic.h
index bd9746f..dc7cfbc 100644
--- a/arch/mips/include/asm/mips-boards/generic.h
+++ b/arch/mips/include/asm/mips-boards/generic.h
@@ -24,12 +24,6 @@
#define ASCII_DISPLAY_POS_BASE 0x1f000418
/*
- * Reset register.
- */
-#define SOFTRES_REG 0x1f000500
-#define GORESET 0x42
-
-/*
* Revision register.
*/
#define MIPS_REVISION_REG 0x1fc00010
@@ -75,6 +69,9 @@
#ifdef CONFIG_OF
extern struct boot_param_header __dtb_start;
+#ifdef CONFIG_MIPS_APPENDED_DTB
+extern struct boot_param_header __appended_dtb;
+#endif
#endif
#ifdef CONFIG_PCI
diff --git a/arch/mips/include/asm/mips-boards/malta.h b/arch/mips/include/asm/mips-boards/malta.h
index 722bc88..9750dce 100644
--- a/arch/mips/include/asm/mips-boards/malta.h
+++ b/arch/mips/include/asm/mips-boards/malta.h
@@ -54,8 +54,8 @@
/*
* GCMP Specific definitions
*/
-#define GCMP_BASE_ADDR 0x1fbf8000
-#define GCMP_ADDRSPACE_SZ (256 * 1024)
+#define GCMP_BASE_ADDR_MALTA 0x1fbf8000
+#define GCMP_ADDRSPACE_SZ_MALTA (64 * 1024)
/*
* GIC Specific definitions
@@ -64,6 +64,12 @@
#define GIC_ADDRSPACE_SZ (128 * 1024)
/*
+ * CPC Specific definitions
+ */
+#define CPC_BASE_ADDR_MALTA 0x1bde0000
+#define CPC_ADDRSPACE_SZ_MALTA (32 * 1024)
+
+/*
* MSC01 BIU Specific definitions
* FIXME : These should be elsewhere ?
*/
diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h
index 87e6207..698b24b 100644
--- a/arch/mips/include/asm/mipsregs.h
+++ b/arch/mips/include/asm/mipsregs.h
@@ -139,6 +139,12 @@
* and should be written as zero.
*/
#define FPU_CSR_RSVD 0x001c0000
+/* ... but FPU2 uses that bits */
+#define FPU_CSR_NAN2008 0x00040000
+#define FPU_CSR_ABS2008 0x00080000
+#define FPU_CSR_MAC2008 0x00100000
+
+#define FPU_CSR_DEFAULT 0x00000000
/*
* X the exception cause indicator
@@ -264,6 +270,8 @@
#define PG_XIE (_ULCAST_(1) << 30)
#define PG_ELPA (_ULCAST_(1) << 29)
#define PG_ESP (_ULCAST_(1) << 28)
+#define PG_IEC (_ULCAST_(1) << 27)
+#define PG_MCCAUSE (_ULCAST_(0x1f) << 0)
/*
* R4x00 interrupt enable / cause bits
@@ -414,6 +422,7 @@
#define ST0_BEV 0x00400000
#define ST0_RE 0x02000000
#define ST0_FR 0x04000000
+#define _ST0_FR (26)
#define ST0_CU 0xf0000000
#define ST0_CU0 0x10000000
#define ST0_CU1 0x20000000
@@ -573,7 +582,9 @@
#define MIPS_CONF1_IA (_ULCAST_(7) << 16)
#define MIPS_CONF1_IL (_ULCAST_(7) << 19)
#define MIPS_CONF1_IS (_ULCAST_(7) << 22)
-#define MIPS_CONF1_TLBS (_ULCAST_(63)<< 25)
+#define MIPS_CONF1_TLBS_SHIFT (25)
+#define MIPS_CONF1_TLBS_SIZE (6)
+#define MIPS_CONF1_TLBS (_ULCAST_(63)<< MIPS_CONF1_TLBS_SHIFT)
#define MIPS_CONF2_SA (_ULCAST_(15)<< 0)
#define MIPS_CONF2_SL (_ULCAST_(15)<< 4)
@@ -587,28 +598,80 @@
#define MIPS_CONF3_TL (_ULCAST_(1) << 0)
#define MIPS_CONF3_SM (_ULCAST_(1) << 1)
#define MIPS_CONF3_MT (_ULCAST_(1) << 2)
+#define MIPS_CONF3_CDMM (_ULCAST_(1) << 3)
#define MIPS_CONF3_SP (_ULCAST_(1) << 4)
#define MIPS_CONF3_VINT (_ULCAST_(1) << 5)
#define MIPS_CONF3_VEIC (_ULCAST_(1) << 6)
#define MIPS_CONF3_LPA (_ULCAST_(1) << 7)
+#define MIPS_CONF3_ITL (_ULCAST_(1) << 8)
+#define MIPS_CONF3_CTXTC (_ULCAST_(1) << 9)
#define MIPS_CONF3_DSP (_ULCAST_(1) << 10)
#define MIPS_CONF3_DSP2P (_ULCAST_(1) << 11)
#define MIPS_CONF3_RXI (_ULCAST_(1) << 12)
#define MIPS_CONF3_ULRI (_ULCAST_(1) << 13)
#define MIPS_CONF3_ISA (_ULCAST_(3) << 14)
-#define MIPS_CONF3_ISA_OE (_ULCAST_(3) << 16)
+#define MIPS_CONF3_ISA_OE (_ULCAST_(1) << 16)
+#define MIPS_CONF3_MCU (_ULCAST_(1) << 17)
+#define MIPS_CONF3_MMAR (_ULCAST_(7) << 18)
+#define MIPS_CONF3_IPLW (_ULCAST_(3) << 21)
#define MIPS_CONF3_VZ (_ULCAST_(1) << 23)
+#define MIPS_CONF3_PW (_ULCAST_(1) << 24)
+#define MIPS_CONF3_SC (_ULCAST_(1) << 25)
+#define MIPS_CONF3_BI (_ULCAST_(1) << 26)
+#define MIPS_CONF3_BP (_ULCAST_(1) << 27)
+#define MIPS_CONF3_MSA (_ULCAST_(1) << 28)
+#define MIPS_CONF3_CMGCR (_ULCAST_(1) << 29)
+#define MIPS_CONF3_BPG (_ULCAST_(1) << 30)
+#define MIPS_CONF4_MMUSIZEEXT_SHIFT (0)
#define MIPS_CONF4_MMUSIZEEXT (_ULCAST_(255) << 0)
+#define MIPS_CONF4_FTLBSETS_SHIFT (0)
+#define MIPS_CONF4_FTLBSETS (_ULCAST_(15) << MIPS_CONF4_FTLBSETS_SHIFT)
+#define MIPS_CONF4_FTLBWAYS_SHIFT (4)
+#define MIPS_CONF4_FTLBWAYS (_ULCAST_(15) << MIPS_CONF4_FTLBWAYS_SHIFT)
+#define MIPS_CONF4_FTLBPAGESIZE_SHIFT (8)
+#define MIPS_CONF4_FTLBPAGESIZE (_ULCAST_(31) << MIPS_CONF4_FTLBPAGESIZE_SHIFT)
#define MIPS_CONF4_MMUEXTDEF (_ULCAST_(3) << 14)
#define MIPS_CONF4_MMUEXTDEF_MMUSIZEEXT (_ULCAST_(1) << 14)
+#define MIPS_CONF4_MMUEXTDEF_FTLBSIZEEXT (_ULCAST_(2) << 14)
+#define MIPS_CONF4_MMUEXTDEF_VTLBSIZEEXT (_ULCAST_(3) << 14)
+#define MIPS_CONF4_KSCREXIST (_ULCAST_(255) << 16)
+#define MIPS_CONF4_VTLBSIZEEXT_SHIFT (24)
+#define MIPS_CONF4_VTLBSIZEEXT (_ULCAST_(15) << MIPS_CONF4_VTLBSIZEEXT_SHIFT)
+#define MIPS_CONF4_AE (_ULCAST_(1) << 28)
+#define MIPS_CONF4_IE (_ULCAST_(3) << 29)
+#define MIPS_CONF4_TLBINV (_ULCAST_(2) << 29)
+#define MIPS_CONF4_TLBINV_FULL (_ULCAST_(1) << 29)
+
+#define MIPS_CONF5_NF (_ULCAST_(1) << 0)
+#define MIPS_CONF5_UFR (_ULCAST_(1) << 2)
+#define MIPS_CONF5_MRP (_ULCAST_(1) << 3)
+#define MIPS_CONF5_VC (_ULCAST_(1) << 7)
+#define MIPS_CONF5_FRE (_ULCAST_(1) << 8)
+#define MIPS_CONF5_UFE (_ULCAST_(1) << 9)
+#define MIPS_CONF5_L2C (_ULCAST_(1) << 10)
+#define MIPS_CONF5_MSAEN (_ULCAST_(1) << 27)
+#define MIPS_CONF5_EVA (_ULCAST_(1) << 28)
+#define MIPS_CONF5_CV (_ULCAST_(1) << 29)
+#define MIPS_CONF5_K (_ULCAST_(1) << 30)
+
+#define MIPS_CONF6_JRCD (_ULCAST_(1) << 0)
+#define MIPS_CONF6_JRCP (_ULCAST_(1) << 1)
#define MIPS_CONF6_SYND (_ULCAST_(1) << 13)
+#define MIPS_CONF6_SPCD (_ULCAST_(1) << 14)
+#define MIPS_CONF6_FTLBEN (_ULCAST_(1) << 15)
#define MIPS_CONF7_WII (_ULCAST_(1) << 31)
-
+#define MIPS_CONF7_AR (_ULCAST_(1) << 16)
+#define MIPS_CONF7_IAR (_ULCAST_(1) << 10)
#define MIPS_CONF7_RPS (_ULCAST_(1) << 2)
+/* EntryHI bit definition */
+#define MIPS_EHINV (_ULCAST_(1) << 10)
+
+/* HTW PTWCTL base definition - not yet for HUGE pages */
+#define HTW_PWCTL_BASE (_ULCAST_(7) << 26) /* XK+XS+XU */
/*
* Bits in the MIPS32/64 coprocessor 1 (FPU) revision register.
@@ -620,6 +683,83 @@
#define MIPS_FPIR_W (_ULCAST_(1) << 20)
#define MIPS_FPIR_L (_ULCAST_(1) << 21)
#define MIPS_FPIR_F64 (_ULCAST_(1) << 22)
+/* additional bits in MIPS32/64 coprocessor 2 (FPU) */
+#define MIPS_FPIR_HAS2008 (_ULCAST_(1) << 23)
+#define MIPS_FPIR_FC (_ULCAST_(1) << 24)
+#define MIPS_FPIR_FREP (_ULCAST_(1) << 29)
+
+/*
+ * Bits in the MIPS32 Memory Segmentation registers.
+ */
+#define MIPS_SEGCFG_PA_SHIFT 9
+#define MIPS_SEGCFG_PA (_ULCAST_(127) << MIPS_SEGCFG_PA_SHIFT)
+#define MIPS_SEGCFG_AM_SHIFT 4
+#define MIPS_SEGCFG_AM (_ULCAST_(7) << MIPS_SEGCFG_AM_SHIFT)
+#define MIPS_SEGCFG_EU_SHIFT 3
+#define MIPS_SEGCFG_EU (_ULCAST_(1) << MIPS_SEGCFG_EU_SHIFT)
+#define MIPS_SEGCFG_C_SHIFT 0
+#define MIPS_SEGCFG_C (_ULCAST_(7) << MIPS_SEGCFG_C_SHIFT)
+
+#define MIPS_SEGCFG_UUSK _ULCAST_(7)
+#define MIPS_SEGCFG_USK _ULCAST_(5)
+#define MIPS_SEGCFG_MUSUK _ULCAST_(4)
+#define MIPS_SEGCFG_MUSK _ULCAST_(3)
+#define MIPS_SEGCFG_MSK _ULCAST_(2)
+#define MIPS_SEGCFG_MK _ULCAST_(1)
+#define MIPS_SEGCFG_UK _ULCAST_(0)
+
+/* ebase register bit definition */
+#define MIPS_EBASE_WG (_ULCAST_(1) << 11)
+
+/* MAAR bits definitions */
+#define MIPS_MAAR_V (_ULCAST_(1))
+#define MIPS_MAAR_S (_ULCAST_(1) << 1)
+#define MIPS_MAAR_HI_V (_ULCAST_(1) << 31)
+
+#define MIPS_MAAR_MAX 64
+
+#define MIPS_PWFIELD_BDI_SHIFT 32
+#define MIPS_PWFIELD_BDI_MASK 0x3f00000000UL
+#define MIPS_PWFIELD_GDI_SHIFT 24
+#define MIPS_PWFIELD_GDI_MASK 0x3f000000
+#define MIPS_PWFIELD_UDI_SHIFT 18
+#define MIPS_PWFIELD_UDI_MASK 0x00fc0000
+#define MIPS_PWFIELD_MDI_SHIFT 12
+#define MIPS_PWFIELD_MDI_MASK 0x0003f000
+#define MIPS_PWFIELD_PTI_SHIFT 6
+#define MIPS_PWFIELD_PTI_MASK 0x00000fc0
+#define MIPS_PWFIELD_PTEI_SHIFT 0
+#define MIPS_PWFIELD_PTEI_MASK 0x0000003f
+
+#define MIPS_PWSIZE_BDW_SHIFT 32
+#define MIPS_PWSIZE_BDW_MASK 0x3f00000000UL
+#define MIPS_PWSIZE_PS 0x40000000
+#define MIPS_PWSIZE_GDW_SHIFT 24
+#define MIPS_PWSIZE_GDW_MASK 0x3f000000
+#define MIPS_PWSIZE_UDW_SHIFT 18
+#define MIPS_PWSIZE_UDW_MASK 0x00fc0000
+#define MIPS_PWSIZE_MDW_SHIFT 12
+#define MIPS_PWSIZE_MDW_MASK 0x0003f000
+#define MIPS_PWSIZE_PTW_SHIFT 6
+#define MIPS_PWSIZE_PTW_MASK 0x00000fc0
+#define MIPS_PWSIZE_PTEW_SHIFT 0
+#define MIPS_PWSIZE_PTEW_MASK 0x0000003f
+
+#define MIPS_PWCTL_PWEN_SHIFT 31
+#define MIPS_PWCTL_PWEN_MASK 0x80000000
+#define MIPS_PWCTL_DPH_SHIFT 7
+#define MIPS_PWCTL_DPH_MASK 0x00000080
+#define MIPS_PWCTL_HUGEPG_SHIFT 6
+#define MIPS_PWCTL_HUGEPG_MASK 0x00000060
+#define MIPS_PWCTL_PSN_SHIFT 0
+#define MIPS_PWCTL_PSN_MASK 0x0000003f
+
+#define MIPS_GNR_VPID_SHIFT (0)
+#define MIPS_GNR_VPID (_ULCAST_(7) << MIPS_GNR_VPID_SHIFT)
+#define MIPS_GNR_CORE_SHIFT (8)
+#define MIPS_GNR_CORE (_ULCAST_(15) << MIPS_GNR_CORE_SHIFT)
+#define MIPS_GNR_CLUSTER_SHIFT (16)
+#define MIPS_GNR_CLUSTER (_ULCAST_(15) << MIPS_GNR_CLUSTER_SHIFT)
#ifndef __ASSEMBLY__
@@ -861,6 +1001,8 @@
#define read_c0_entrylo1() __read_ulong_c0_register($3, 0)
#define write_c0_entrylo1(val) __write_ulong_c0_register($3, 0, val)
+#define read_c0_gnr() __read_32bit_c0_register($3, 1)
+
#define read_c0_conf() __read_32bit_c0_register($3, 0)
#define write_c0_conf(val) __write_32bit_c0_register($3, 0, val)
@@ -930,6 +1072,8 @@
#define write_c0_epc(val) __write_ulong_c0_register($14, 0, val)
#define read_c0_prid() __read_32bit_c0_register($15, 0)
+#define read_c0_cmgcrbase() __read_ulong_c0_register($15, 3)
+#define read_c0_bevva() __read_ulong_c0_register($15, 4)
#define read_c0_config() __read_32bit_c0_register($16, 0)
#define read_c0_config1() __read_32bit_c0_register($16, 1)
@@ -948,6 +1092,16 @@
#define write_c0_config6(val) __write_32bit_c0_register($16, 6, val)
#define write_c0_config7(val) __write_32bit_c0_register($16, 7, val)
+#define read_c0_lladdr() __read_ulong_c0_register($17, 0)
+#define write_c0_lladdr(val) __write_ulong_c0_register($17, 0, val)
+/*
+ * MAAR registers
+ */
+#define read_c0_maar() __read_ulong_c0_register($17, 1)
+#define write_c0_maar(val) __write_ulong_c0_register($17, 1, val)
+#define read_c0_maarindex() __read_32bit_c0_register($17, 2)
+#define write_c0_maarindex(val) __write_32bit_c0_register($17, 2, val)
+
/*
* The WatchLo register. There may be up to 8 of them.
*/
@@ -1092,9 +1246,36 @@
#define read_c0_srsmap() __read_32bit_c0_register($12, 3)
#define write_c0_srsmap(val) __write_32bit_c0_register($12, 3, val)
+#ifdef CONFIG_CPU_MIPS64_R6
+#define read_c0_ebase() __read_ulong_c0_register($15, 1)
+#define write_c0_ebase(val) __write_ulong_c0_register($15, 1, val)
+#else
#define read_c0_ebase() __read_32bit_c0_register($15, 1)
#define write_c0_ebase(val) __write_32bit_c0_register($15, 1, val)
+#endif
+/* MIPSR3 */
+#define read_c0_segctl0() __read_32bit_c0_register($5, 2)
+#define write_c0_segctl0(val) __write_32bit_c0_register($5, 2, val)
+
+#define read_c0_segctl1() __read_32bit_c0_register($5, 3)
+#define write_c0_segctl1(val) __write_32bit_c0_register($5, 3, val)
+
+#define read_c0_segctl2() __read_32bit_c0_register($5, 4)
+#define write_c0_segctl2(val) __write_32bit_c0_register($5, 4, val)
+
+/* Hardware Page Table Walker */
+#define read_c0_pwbase() __read_ulong_c0_register($5, 5)
+#define write_c0_pwbase(val) __write_ulong_c0_register($5, 5, val)
+
+#define read_c0_pwfield() __read_ulong_c0_register($5, 6)
+#define write_c0_pwfield(val) __write_ulong_c0_register($5, 6, val)
+
+#define read_c0_pwsize() __read_ulong_c0_register($5, 7)
+#define write_c0_pwsize(val) __write_ulong_c0_register($5, 7, val)
+
+#define read_c0_pwctl() __read_32bit_c0_register($6, 6)
+#define write_c0_pwctl(val) __write_32bit_c0_register($6, 6, val)
/* Cavium OCTEON (cnMIPS) */
#define read_c0_cvmcount() __read_ulong_c0_register($9, 6)
@@ -1163,7 +1344,7 @@
/*
* Macros to access the floating point coprocessor control registers
*/
-#define read_32bit_cp1_register(source) \
+#define _read_32bit_cp1_register(source, gas_hardfloat) \
({ \
int __res; \
\
@@ -1173,12 +1354,47 @@
" # gas fails to assemble cfc1 for some archs, \n" \
" # like Octeon. \n" \
" .set mips1 \n" \
+ " "STR(gas_hardfloat)" \n" \
" cfc1 %0,"STR(source)" \n" \
" .set pop \n" \
: "=r" (__res)); \
__res; \
})
+#ifdef GAS_HAS_SET_HARDFLOAT
+#define read_32bit_cp1_register(source) \
+ _read_32bit_cp1_register(source, .set hardfloat)
+#else
+#define read_32bit_cp1_register(source) \
+ _read_32bit_cp1_register(source, )
+#endif
+
+#define _write_32bit_cp1_register(dest, value , gas_hardfloat) \
+({ \
+ __asm__ __volatile__( \
+ " .set push \n" \
+ " .set reorder \n" \
+ " # gas fails to assemble cfc1 for some archs, \n" \
+ " # like Octeon. \n" \
+ " .set mips1 \n" \
+ " "STR(gas_hardfloat)" \n" \
+ " ctc1 %0,"STR(dest)" \n" \
+ " .set pop \n" \
+ :: "r" (value)); \
+})
+
+#ifdef GAS_HAS_SET_HARDFLOAT
+#define write_32bit_cp1_register(dest, value) \
+ _write_32bit_cp1_register(dest, value, .set hardfloat)
+#else
+#define write_32bit_cp1_register(dest, value) \
+ _write_32bit_cp1_register(dest, value, )
+#endif
+
+#ifndef CONFIG_CPU_MIPSR6
+/*
+ * Macros to access the DSP ASE registers
+ */
#ifdef HAVE_AS_DSP
#define rddsp(mask) \
({ \
@@ -1564,6 +1780,7 @@
#endif /* CONFIG_CPU_MICROMIPS */
#endif
+#endif /* CONFIG_CPU_MIPSR6 */
/*
* TLB operations.
@@ -1631,6 +1848,12 @@
".set reorder");
}
+static inline void tlbinvf(void)
+{
+ __asm__ __volatile__(
+ ".word 0x42000004");
+}
+
/*
* Manipulate bits in a c0 register.
*/
@@ -1723,7 +1946,8 @@
{
__asm__ __volatile__(
" .set mips32r2 \n"
- " ehb \n" " .set mips0 \n");
+ " ehb \n"
+ " .set mips0 \n");
}
/*
@@ -1795,6 +2019,7 @@
__BUILD_SET_C0(status)
__BUILD_SET_C0(cause)
__BUILD_SET_C0(config)
+__BUILD_SET_C0(config5)
__BUILD_SET_C0(intcontrol)
__BUILD_SET_C0(intctl)
__BUILD_SET_C0(srsmap)
diff --git a/arch/mips/include/asm/mmu.h b/arch/mips/include/asm/mmu.h
index c436138..260f2c0 100644
--- a/arch/mips/include/asm/mmu.h
+++ b/arch/mips/include/asm/mmu.h
@@ -3,7 +3,11 @@
typedef struct {
unsigned long asid[NR_CPUS];
+ unsigned long vdso_asid[NR_CPUS];
+ struct page *vdso_page[NR_CPUS];
void *vdso;
+ struct vm_area_struct *vdso_vma;
+ unsigned long thread_flags;
} mm_context_t;
#endif /* __ASM_MMU_H */
diff --git a/arch/mips/include/asm/mmu_context.h b/arch/mips/include/asm/mmu_context.h
index 516e6e9..3906ed4 100644
--- a/arch/mips/include/asm/mmu_context.h
+++ b/arch/mips/include/asm/mmu_context.h
@@ -24,6 +24,14 @@
#endif /* SMTC */
#include <asm-generic/mm_hooks.h>
+#define htw_set_pwbase(pgd) \
+do { \
+ if (cpu_has_htw) { \
+ write_c0_pwbase(pgd); \
+ back_to_back_c0_hazard(); \
+ } \
+} while (0)
+
#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
#define TLBMISS_HANDLER_SETUP_PGD(pgd) \
@@ -34,12 +42,14 @@
tlbmiss_handler_setup_pgd = \
(__typeof__(tlbmiss_handler_setup_pgd)) tlbmiss_handler_setup_pgd_array; \
tlbmiss_handler_setup_pgd((unsigned long)(pgd)); \
+ htw_set_pwbase((unsigned long)pgd); \
} while (0)
#define TLBMISS_HANDLER_SETUP() \
do { \
TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir); \
write_c0_xcontext((unsigned long) smp_processor_id() << 51); \
+ back_to_back_c0_hazard(); \
} while (0)
#else /* CONFIG_MIPS_PGD_C0_CONTEXT: using pgd_current*/
@@ -51,20 +61,26 @@
*/
extern unsigned long pgd_current[];
-#define TLBMISS_HANDLER_SETUP_PGD(pgd) \
- pgd_current[smp_processor_id()] = (unsigned long)(pgd)
-
#ifdef CONFIG_32BIT
+#define TLBMISS_HANDLER_SETUP_PGD(pgd) \
+ pgd_current[smp_processor_id()] = (unsigned long)(pgd); \
+ htw_set_pwbase((unsigned long)pgd);
+
#define TLBMISS_HANDLER_SETUP() \
write_c0_context((unsigned long) smp_processor_id() << 25); \
back_to_back_c0_hazard(); \
- TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir)
+ TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir);
#endif
#ifdef CONFIG_64BIT
+#define TLBMISS_HANDLER_SETUP_PGD(pgd) \
+ pgd_current[2 * smp_processor_id()] = (unsigned long)(pgd); \
+ pgd_current[(2 *smp_processor_id())+1] = (unsigned long)(swapper_pg_dir); \
+ htw_set_pwbase((unsigned long)pgd);
+
#define TLBMISS_HANDLER_SETUP() \
- write_c0_context((unsigned long) smp_processor_id() << 26); \
+ write_c0_context((unsigned long) smp_processor_id() << 27); \
back_to_back_c0_hazard(); \
- TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir)
+ TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir);
#endif
#endif /* CONFIG_MIPS_PGD_C0_CONTEXT*/
#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
@@ -164,6 +180,7 @@
#else /* Not SMTC */
local_irq_save(flags);
#endif /* CONFIG_MIPS_MT_SMTC */
+ htw_stop();
/* Check if our ASID is of an older version and thus invalid */
if ((cpu_context(cpu, next) ^ asid_cache(cpu)) & ASID_VERSION_MASK)
@@ -203,6 +220,7 @@
*/
cpumask_clear_cpu(cpu, mm_cpumask(prev));
cpumask_set_cpu(cpu, mm_cpumask(next));
+ htw_start();
local_irq_restore(flags);
}
@@ -234,6 +252,7 @@
#endif /* CONFIG_MIPS_MT_SMTC */
local_irq_save(flags);
+ htw_stop();
/* Unconditionally get a new ASID. */
get_new_mmu_context(next, cpu);
@@ -260,6 +279,7 @@
/* mark mmu ownership change */
cpumask_clear_cpu(cpu, mm_cpumask(prev));
cpumask_set_cpu(cpu, mm_cpumask(next));
+ htw_start();
local_irq_restore(flags);
}
@@ -280,6 +300,7 @@
#endif /* CONFIG_MIPS_MT_SMTC */
local_irq_save(flags);
+ htw_stop();
if (cpumask_test_cpu(cpu, mm_cpumask(mm))) {
get_new_mmu_context(mm, cpu);
@@ -315,6 +336,7 @@
}
#endif /* CONFIG_MIPS_MT_SMTC */
}
+ htw_start();
local_irq_restore(flags);
}
diff --git a/arch/mips/include/asm/module.h b/arch/mips/include/asm/module.h
index 44b705d..6202276 100644
--- a/arch/mips/include/asm/module.h
+++ b/arch/mips/include/asm/module.h
@@ -92,6 +92,10 @@
#define MODULE_PROC_FAMILY "MIPS64_R1 "
#elif defined CONFIG_CPU_MIPS64_R2
#define MODULE_PROC_FAMILY "MIPS64_R2 "
+#elif defined CONFIG_CPU_MIPS32_R6
+#define MODULE_PROC_FAMILY "MIPS32_R6 "
+#elif defined CONFIG_CPU_MIPS64_R6
+#define MODULE_PROC_FAMILY "MIPS64_R6 "
#elif defined CONFIG_CPU_R3000
#define MODULE_PROC_FAMILY "R3000 "
#elif defined CONFIG_CPU_TX39XX
diff --git a/arch/mips/include/asm/msa.h b/arch/mips/include/asm/msa.h
new file mode 100644
index 0000000..1168a56
--- /dev/null
+++ b/arch/mips/include/asm/msa.h
@@ -0,0 +1,220 @@
+/*
+ * Copyright (C) 2013 Imagination Technologies
+ * Author: Paul Burton <paul.burton@imgtec.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+#ifndef _ASM_MSA_H
+#define _ASM_MSA_H
+
+#include <asm/mipsregs.h>
+
+#ifndef __ASSEMBLY__
+
+extern void _save_msa(struct task_struct *);
+extern void _restore_msa(struct task_struct *);
+extern void _init_msa_upper(void);
+extern void _restore_msa_uppers_from_thread(union fpureg *);
+
+static inline void enable_msa(void)
+{
+ if (cpu_has_msa) {
+ set_c0_config5(MIPS_CONF5_MSAEN);
+ enable_fpu_hazard();
+ }
+}
+
+static inline void disable_msa(void)
+{
+ if (cpu_has_msa) {
+ clear_c0_config5(MIPS_CONF5_MSAEN);
+ disable_fpu_hazard();
+ }
+}
+
+static inline int is_msa_enabled(void)
+{
+ if (!cpu_has_msa)
+ return 0;
+
+ return read_c0_config5() & MIPS_CONF5_MSAEN;
+}
+
+static inline int thread_msa_context_live(void)
+{
+ /*
+ * Check cpu_has_msa only if it's a constant. This will allow the
+ * compiler to optimise out code for CPUs without MSA without adding
+ * an extra redundant check for CPUs with MSA.
+ */
+ if (__builtin_constant_p(cpu_has_msa) && !cpu_has_msa)
+ return 0;
+
+ return test_thread_flag(TIF_MSA_CTX_LIVE);
+}
+
+static inline void save_msa(struct task_struct *t)
+{
+ if (cpu_has_msa)
+ _save_msa(t);
+}
+
+static inline void restore_msa(struct task_struct *t)
+{
+ if (cpu_has_msa)
+ _restore_msa(t);
+}
+
+#ifdef TOOLCHAIN_SUPPORTS_MSA
+
+#define __BUILD_MSA_CTL_REG(name, cs) \
+static inline unsigned int read_msa_##name(void) \
+{ \
+ unsigned int reg; \
+ __asm__ __volatile__( \
+ " .set push\n" \
+ " .set fp=64\n" \
+ " .set msa\n" \
+ " cfcmsa %0, $" #cs "\n" \
+ " .set pop\n" \
+ : "=r"(reg)); \
+ return reg; \
+} \
+ \
+static inline void write_msa_##name(unsigned int val) \
+{ \
+ __asm__ __volatile__( \
+ " .set push\n" \
+ " .set fp=64\n" \
+ " .set msa\n" \
+ " ctcmsa $" #cs ", %0\n" \
+ " .set pop\n" \
+ : : "r"(val)); \
+}
+
+#else /* !TOOLCHAIN_SUPPORTS_MSA */
+
+/*
+ * Define functions using .word for the c[ft]cmsa instructions in order to
+ * allow compilation with toolchains that do not support MSA. Once all
+ * toolchains in use support MSA these can be removed.
+ */
+#ifdef CONFIG_CPU_MICROMIPS
+#define CFC_MSA_INSN 0x587e0056
+#define CTC_MSA_INSN 0x583e0816
+#else
+#define CFC_MSA_INSN 0x787e0059
+#define CTC_MSA_INSN 0x783e0819
+#endif
+
+#define __BUILD_MSA_CTL_REG(name, cs) \
+static inline unsigned int read_msa_##name(void) \
+{ \
+ unsigned int reg; \
+ __asm__ __volatile__( \
+ " .set push\n" \
+ " .set noat\n" \
+ " .insn\n" \
+ " .word %1 | (" #cs " << 11)\n" \
+ " move %0, $1\n" \
+ " .set pop\n" \
+ : "=r"(reg) : "i"(CFC_MSA_INSN)); \
+ return reg; \
+} \
+ \
+static inline void write_msa_##name(unsigned int val) \
+{ \
+ __asm__ __volatile__( \
+ " .set push\n" \
+ " .set noat\n" \
+ " move $1, %0\n" \
+ " .insn\n" \
+ " .word %1 | (" #cs " << 6)\n" \
+ " .set pop\n" \
+ : : "r"(val), "i"(CTC_MSA_INSN)); \
+}
+
+#endif /* !TOOLCHAIN_SUPPORTS_MSA */
+
+__BUILD_MSA_CTL_REG(ir, 0)
+__BUILD_MSA_CTL_REG(csr, 1)
+__BUILD_MSA_CTL_REG(access, 2)
+__BUILD_MSA_CTL_REG(save, 3)
+__BUILD_MSA_CTL_REG(modify, 4)
+__BUILD_MSA_CTL_REG(request, 5)
+__BUILD_MSA_CTL_REG(map, 6)
+__BUILD_MSA_CTL_REG(unmap, 7)
+
+#endif /* !__ASSEMBLY__ */
+
+#define MSA_IR 0
+#define MSA_CSR 1
+#define MSA_ACCESS 2
+#define MSA_SAVE 3
+#define MSA_MODIFY 4
+#define MSA_REQUEST 5
+#define MSA_MAP 6
+#define MSA_UNMAP 7
+
+/* MSA Implementation Register (MSAIR) */
+#define MSA_IR_REVB 0
+#define MSA_IR_REVF (_ULCAST_(0xff) << MSA_IR_REVB)
+#define MSA_IR_PROCB 8
+#define MSA_IR_PROCF (_ULCAST_(0xff) << MSA_IR_PROCB)
+#define MSA_IR_WRPB 16
+#define MSA_IR_WRPF (_ULCAST_(0x1) << MSA_IR_WRPB)
+
+/* MSA Control & Status Register (MSACSR) */
+#define MSA_CSR_RMB 0
+#define MSA_CSR_RMF (_ULCAST_(0x3) << MSA_CSR_RMB)
+#define MSA_CSR_RM_NEAREST 0
+#define MSA_CSR_RM_TO_ZERO 1
+#define MSA_CSR_RM_TO_POS 2
+#define MSA_CSR_RM_TO_NEG 3
+#define MSA_CSR_FLAGSB 2
+#define MSA_CSR_FLAGSF (_ULCAST_(0x1f) << MSA_CSR_FLAGSB)
+#define MSA_CSR_FLAGS_IB 2
+#define MSA_CSR_FLAGS_IF (_ULCAST_(0x1) << MSA_CSR_FLAGS_IB)
+#define MSA_CSR_FLAGS_UB 3
+#define MSA_CSR_FLAGS_UF (_ULCAST_(0x1) << MSA_CSR_FLAGS_UB)
+#define MSA_CSR_FLAGS_OB 4
+#define MSA_CSR_FLAGS_OF (_ULCAST_(0x1) << MSA_CSR_FLAGS_OB)
+#define MSA_CSR_FLAGS_ZB 5
+#define MSA_CSR_FLAGS_ZF (_ULCAST_(0x1) << MSA_CSR_FLAGS_ZB)
+#define MSA_CSR_FLAGS_VB 6
+#define MSA_CSR_FLAGS_VF (_ULCAST_(0x1) << MSA_CSR_FLAGS_VB)
+#define MSA_CSR_ENABLESB 7
+#define MSA_CSR_ENABLESF (_ULCAST_(0x1f) << MSA_CSR_ENABLESB)
+#define MSA_CSR_ENABLES_IB 7
+#define MSA_CSR_ENABLES_IF (_ULCAST_(0x1) << MSA_CSR_ENABLES_IB)
+#define MSA_CSR_ENABLES_UB 8
+#define MSA_CSR_ENABLES_UF (_ULCAST_(0x1) << MSA_CSR_ENABLES_UB)
+#define MSA_CSR_ENABLES_OB 9
+#define MSA_CSR_ENABLES_OF (_ULCAST_(0x1) << MSA_CSR_ENABLES_OB)
+#define MSA_CSR_ENABLES_ZB 10
+#define MSA_CSR_ENABLES_ZF (_ULCAST_(0x1) << MSA_CSR_ENABLES_ZB)
+#define MSA_CSR_ENABLES_VB 11
+#define MSA_CSR_ENABLES_VF (_ULCAST_(0x1) << MSA_CSR_ENABLES_VB)
+#define MSA_CSR_CAUSEB 12
+#define MSA_CSR_CAUSEF (_ULCAST_(0x3f) << MSA_CSR_CAUSEB)
+#define MSA_CSR_CAUSE_IB 12
+#define MSA_CSR_CAUSE_IF (_ULCAST_(0x1) << MSA_CSR_CAUSE_IB)
+#define MSA_CSR_CAUSE_UB 13
+#define MSA_CSR_CAUSE_UF (_ULCAST_(0x1) << MSA_CSR_CAUSE_UB)
+#define MSA_CSR_CAUSE_OB 14
+#define MSA_CSR_CAUSE_OF (_ULCAST_(0x1) << MSA_CSR_CAUSE_OB)
+#define MSA_CSR_CAUSE_ZB 15
+#define MSA_CSR_CAUSE_ZF (_ULCAST_(0x1) << MSA_CSR_CAUSE_ZB)
+#define MSA_CSR_CAUSE_VB 16
+#define MSA_CSR_CAUSE_VF (_ULCAST_(0x1) << MSA_CSR_CAUSE_VB)
+#define MSA_CSR_CAUSE_EB 17
+#define MSA_CSR_CAUSE_EF (_ULCAST_(0x1) << MSA_CSR_CAUSE_EB)
+#define MSA_CSR_NXB 18
+#define MSA_CSR_NXF (_ULCAST_(0x1) << MSA_CSR_NXB)
+#define MSA_CSR_FSB 24
+#define MSA_CSR_FSF (_ULCAST_(0x1) << MSA_CSR_FSB)
+
+#endif /* _ASM_MSA_H */
diff --git a/arch/mips/include/asm/mutex.h b/arch/mips/include/asm/mutex.h
deleted file mode 100644
index 458c1f7..0000000
--- a/arch/mips/include/asm/mutex.h
+++ /dev/null
@@ -1,9 +0,0 @@
-/*
- * Pull in the generic implementation for the mutex fastpath.
- *
- * TODO: implement optimized primitives instead, or leave the generic
- * implementation in place, or pick the atomic_xchg() based generic
- * implementation. (see asm-generic/mutex-xchg.h for details)
- */
-
-#include <asm-generic/mutex-dec.h>
diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
index f59552f..3befe06 100644
--- a/arch/mips/include/asm/page.h
+++ b/arch/mips/include/asm/page.h
@@ -33,6 +33,9 @@
#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
#define PAGE_MASK (~((1 << PAGE_SHIFT) - 1))
+/* this is used for calculation of real page sizes and should be the same */
+#define BASIC_PAGE_SHIFT 12
+
#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
#define HPAGE_SHIFT (PAGE_SHIFT + PAGE_SHIFT - 3)
#define HPAGE_SIZE (_AC(1,UL) << HPAGE_SHIFT)
@@ -70,13 +73,16 @@
struct page;
+#include <asm/cpu-features.h>
+
static inline void clear_user_page(void *addr, unsigned long vaddr,
struct page *page)
{
extern void (*flush_data_cache_page)(unsigned long addr);
clear_page(addr);
- if (pages_do_alias((unsigned long) addr, vaddr & PAGE_MASK))
+ if (cpu_has_vtag_dcache || (cpu_has_dc_aliases &&
+ pages_do_alias((unsigned long) addr, vaddr & PAGE_MASK)))
flush_data_cache_page((unsigned long)addr);
}
@@ -165,7 +171,9 @@
* https://patchwork.linux-mips.org/patch/1541/
*/
-#define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x), 0))
+#ifndef __pa_symbol
+#define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x), 0))
+#endif
#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
@@ -202,13 +210,11 @@
#define virt_addr_valid(kaddr) \
__virt_addr_valid((const volatile void *) (kaddr))
-#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
+#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
-#define UNCAC_ADDR(addr) ((addr) - PAGE_OFFSET + UNCAC_BASE + \
- PHYS_OFFSET)
-#define CAC_ADDR(addr) ((addr) - UNCAC_BASE + PAGE_OFFSET - \
- PHYS_OFFSET)
+#define UNCAC_ADDR(addr) ((addr) - PAGE_OFFSET + UNCAC_BASE)
+#define CAC_ADDR(addr) ((addr) - UNCAC_BASE + PAGE_OFFSET)
#include <asm-generic/memory_model.h>
#include <asm-generic/getorder.h>
diff --git a/arch/mips/include/asm/parport.h b/arch/mips/include/asm/parport.h
deleted file mode 100644
index cf252af..0000000
--- a/arch/mips/include/asm/parport.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/parport.h>
diff --git a/arch/mips/include/asm/percpu.h b/arch/mips/include/asm/percpu.h
deleted file mode 100644
index 844e763..0000000
--- a/arch/mips/include/asm/percpu.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __ASM_PERCPU_H
-#define __ASM_PERCPU_H
-
-#include <asm-generic/percpu.h>
-
-#endif /* __ASM_PERCPU_H */
diff --git a/arch/mips/include/asm/pgtable-32.h b/arch/mips/include/asm/pgtable-32.h
index b4204c1..50d184e 100644
--- a/arch/mips/include/asm/pgtable-32.h
+++ b/arch/mips/include/asm/pgtable-32.h
@@ -51,11 +51,13 @@
#define PKMAP_BASE (0xfe000000UL)
+#ifndef VMALLOC_END
#ifdef CONFIG_HIGHMEM
# define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE)
#else
# define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE)
#endif
+#endif
#ifdef CONFIG_64BIT_PHYS_ADDR
#define pte_ERROR(e) \
@@ -140,76 +142,61 @@
((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address))
#define pte_unmap(pte) ((void)(pte))
-#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
-
-/* Swap entries must have VALID bit cleared. */
-#define __swp_type(x) (((x).val >> 10) & 0x1f)
-#define __swp_offset(x) ((x).val >> 15)
-#define __swp_entry(type,offset) \
- ((swp_entry_t) { ((type) << 10) | ((offset) << 15) })
+#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
/*
- * Bits 0, 4, 8, and 9 are taken, split up 28 bits of offset into this range:
+ * Two words PTE case:
+ * Bits 0 and 1 (V+G) of pte_high are taken, use the rest for the swaps and
+ * page offset...
+ * Bits F and P are in pte_low.
+ *
+ * Note: swp_entry_t is one word today.
*/
-#define PTE_FILE_MAX_BITS 28
-
-#define pte_to_pgoff(_pte) ((((_pte).pte >> 1 ) & 0x07) | \
- (((_pte).pte >> 2 ) & 0x38) | \
- (((_pte).pte >> 10) << 6 ))
-
-#define pgoff_to_pte(off) ((pte_t) { (((off) & 0x07) << 1 ) | \
- (((off) & 0x38) << 2 ) | \
- (((off) >> 6 ) << 10) | \
- _PAGE_FILE })
-
-#else
-
-/* Swap entries must have VALID and GLOBAL bits cleared. */
-#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
-#define __swp_type(x) (((x).val >> 2) & 0x1f)
-#define __swp_offset(x) ((x).val >> 7)
-#define __swp_entry(type,offset) \
- ((swp_entry_t) { ((type) << 2) | ((offset) << 7) })
-#else
-#define __swp_type(x) (((x).val >> 8) & 0x1f)
-#define __swp_offset(x) ((x).val >> 13)
-#define __swp_entry(type,offset) \
- ((swp_entry_t) { ((type) << 8) | ((offset) << 13) })
-#endif /* defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32) */
-
-#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
-/*
- * Bits 0 and 1 of pte_high are taken, use the rest for the page offset...
- */
-#define PTE_FILE_MAX_BITS 30
-
-#define pte_to_pgoff(_pte) ((_pte).pte_high >> 2)
-#define pgoff_to_pte(off) ((pte_t) { _PAGE_FILE, (off) << 2 })
-
-#else
-/*
- * Bits 0, 4, 6, and 7 are taken, split up 28 bits of offset into this range:
- */
-#define PTE_FILE_MAX_BITS 28
-
-#define pte_to_pgoff(_pte) ((((_pte).pte >> 1) & 0x7) | \
- (((_pte).pte >> 2) & 0x8) | \
- (((_pte).pte >> 8) << 4))
-
-#define pgoff_to_pte(off) ((pte_t) { (((off) & 0x7) << 1) | \
- (((off) & 0x8) << 2) | \
- (((off) >> 4) << 8) | \
- _PAGE_FILE })
-#endif
-
-#endif
-
-#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
+#define __swp_type(x) \
+ (((x).val >> __SWP_PTE_SKIP_BITS_NUM) & __SWP_TYPE_MASK)
+#define __swp_offset(x) \
+ ((x).val >> (__SWP_PTE_SKIP_BITS_NUM + __SWP_TYPE_BITS_NUM))
+#define __swp_entry(type, offset) \
+ ((swp_entry_t) { ((type) << __SWP_PTE_SKIP_BITS_NUM) | \
+ ((offset) << (__SWP_TYPE_BITS_NUM + __SWP_PTE_SKIP_BITS_NUM)) })
#define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_high })
#define __swp_entry_to_pte(x) ((pte_t) { 0, (x).val })
-#else
+
+#define PTE_FILE_MAX_BITS (32 - __SWP_PTE_SKIP_BITS_NUM)
+
+#define pte_to_pgoff(_pte) ((_pte).pte_high >> __SWP_PTE_SKIP_BITS_NUM)
+#define pgoff_to_pte(off) \
+ ((pte_t) { _PAGE_FILE, (off) << __SWP_PTE_SKIP_BITS_NUM })
+
+#else /* CONFIG_MIPS32 && !CONFIG_64BIT_PHYS_ADDR */
+
+/* Swap entries must have V,G,P and F bits cleared. */
+#define __swp_type(x) (((x).val >> _PAGE_DIRTY_SHIFT) & __SWP_TYPE_MASK)
+#define __swp_offset(x) \
+ ((x).val >> (_PAGE_DIRTY_SHIFT + __SWP_TYPE_BITS_NUM))
+#define __swp_entry(type, offset) \
+ ((swp_entry_t) { ((type) << _PAGE_DIRTY_SHIFT) | \
+ ((offset) << (_PAGE_DIRTY_SHIFT + __SWP_TYPE_BITS_NUM)) })
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
-#endif
+/*
+ * Bits V+G, and F+P are taken, split up 28 bits of offset into this range:
+ */
+#define PTE_FILE_MAX_BITS (32 - __FILE_PTE_TOTAL_BITS_NUM)
+
+#define pte_to_pgoff(_pte) \
+ ((((_pte).pte >> __FILE_PTE_TOTAL_BITS_NUM) & \
+ ~(__FILE_PTE_LOW_MASK)) | \
+ (((_pte).pte >> __FILE_PTE_LOW_BITS_NUM) & \
+ (__FILE_PTE_LOW_MASK)))
+
+#define pgoff_to_pte(off) \
+ ((pte_t) { (((off) & __FILE_PTE_LOW_MASK) << \
+ (__FILE_PTE_LOW_BITS_NUM)) | \
+ (((off) & ~(__FILE_PTE_LOW_MASK)) << \
+ (__FILE_PTE_TOTAL_BITS_NUM)) | \
+ _PAGE_FILE })
+
+#endif /* CONFIG_64BIT_PHYS_ADDR && CONFIG_MIPS32 */
#endif /* _ASM_PGTABLE_32_H */
diff --git a/arch/mips/include/asm/pgtable-64.h b/arch/mips/include/asm/pgtable-64.h
index e1c49a9..0f24e55 100644
--- a/arch/mips/include/asm/pgtable-64.h
+++ b/arch/mips/include/asm/pgtable-64.h
@@ -17,7 +17,7 @@
#include <asm/cachectl.h>
#include <asm/fixmap.h>
-#ifdef CONFIG_PAGE_SIZE_64KB
+#if defined(CONFIG_PAGE_SIZE_64KB) && !defined(CONFIG_48VMBITS)
#include <asm-generic/pgtable-nopmd.h>
#else
#include <asm-generic/pgtable-nopud.h>
@@ -59,6 +59,10 @@
#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
#define PGDIR_MASK (~(PGDIR_SIZE-1))
+/* Hardware Page Walker definitions - one bit detects user or system PGD */
+#define MIPS_BASE_SHIFT 63UL
+#define MIPS_BASE_SIZE 1UL
+
/*
* For 4kB page size we use a 3 level page tree and an 8kB pud, which
* permits us mapping 40 bits of virtual address space.
@@ -90,7 +94,11 @@
#define PTE_ORDER 0
#endif
#ifdef CONFIG_PAGE_SIZE_16KB
-#define PGD_ORDER 0
+#ifdef CONFIG_48VMBITS
+#define PGD_ORDER 1
+#else
+#define PGD_ORDER 0
+#endif
#define PUD_ORDER aieeee_attempt_to_allocate_pud
#define PMD_ORDER 0
#define PTE_ORDER 0
@@ -104,7 +112,11 @@
#ifdef CONFIG_PAGE_SIZE_64KB
#define PGD_ORDER 0
#define PUD_ORDER aieeee_attempt_to_allocate_pud
+#ifdef CONFIG_48VMBITS
+#define PMD_ORDER 0
+#else
#define PMD_ORDER aieeee_attempt_to_allocate_pmd
+#endif
#define PTE_ORDER 0
#endif
@@ -114,11 +126,7 @@
#endif
#define PTRS_PER_PTE ((PAGE_SIZE << PTE_ORDER) / sizeof(pte_t))
-#if PGDIR_SIZE >= TASK_SIZE64
-#define USER_PTRS_PER_PGD (1)
-#else
-#define USER_PTRS_PER_PGD (TASK_SIZE64 / PGDIR_SIZE)
-#endif
+#define USER_PTRS_PER_PGD ((TASK_SIZE64 / PGDIR_SIZE)?(TASK_SIZE64 / PGDIR_SIZE):1)
#define FIRST_USER_ADDRESS 0UL
/*
@@ -283,21 +291,30 @@
* low 32 bits zero.
*/
static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
-{ pte_t pte; pte_val(pte) = (type << 32) | (offset << 40); return pte; }
+{
+ pte_t pte;
-#define __swp_type(x) (((x).val >> 32) & 0xff)
-#define __swp_offset(x) ((x).val >> 40)
+ pte_val(pte) = (type << __SWP_PTE_SKIP_BITS_NUM) |
+ (offset << (__SWP_PTE_SKIP_BITS_NUM + __SWP_TYPE_BITS_NUM));
+ return pte;
+}
+
+#define __swp_type(x) \
+ (((x).val >> __SWP_PTE_SKIP_BITS_NUM) & __SWP_TYPE_MASK)
+#define __swp_offset(x) \
+ ((x).val >> (__SWP_PTE_SKIP_BITS_NUM + __SWP_TYPE_BITS_NUM))
#define __swp_entry(type, offset) ((swp_entry_t) { pte_val(mk_swap_pte((type), (offset))) })
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
/*
- * Bits 0, 4, 6, and 7 are taken. Let's leave bits 1, 2, 3, and 5 alone to
- * make things easier, and only use the upper 56 bits for the page offset...
+ * Take out all bits from V to bit 0. We should actually take out only VGFP but
+ * today PTE is too complicated by HUGE page support etc
*/
-#define PTE_FILE_MAX_BITS 56
+#define PTE_FILE_MAX_BITS (64 - _PAGE_DIRTY_SHIFT)
-#define pte_to_pgoff(_pte) ((_pte).pte >> 8)
-#define pgoff_to_pte(off) ((pte_t) { ((off) << 8) | _PAGE_FILE })
+#define pte_to_pgoff(_pte) ((_pte).pte >> _PAGE_DIRTY_SHIFT)
+#define pgoff_to_pte(off) \
+ ((pte_t) { ((off) << _PAGE_DIRTY_SHIFT) | _PAGE_FILE })
#endif /* _ASM_PGTABLE_64_H */
diff --git a/arch/mips/include/asm/pgtable-bits.h b/arch/mips/include/asm/pgtable-bits.h
index 32aea48..bdb1711 100644
--- a/arch/mips/include/asm/pgtable-bits.h
+++ b/arch/mips/include/asm/pgtable-bits.h
@@ -55,16 +55,15 @@
*/
#define _PAGE_PRESENT_SHIFT 6
#define _PAGE_PRESENT (1 << _PAGE_PRESENT_SHIFT)
-#define _PAGE_READ_SHIFT 7
-#define _PAGE_READ (1 << _PAGE_READ_SHIFT)
-#define _PAGE_WRITE_SHIFT 8
-#define _PAGE_WRITE (1 << _PAGE_WRITE_SHIFT)
-#define _PAGE_ACCESSED_SHIFT 9
-#define _PAGE_ACCESSED (1 << _PAGE_ACCESSED_SHIFT)
-#define _PAGE_MODIFIED_SHIFT 10
+#define _PAGE_MODIFIED_SHIFT 7
#define _PAGE_MODIFIED (1 << _PAGE_MODIFIED_SHIFT)
-
-#define _PAGE_FILE (1 << 10)
+#define _PAGE_FILE (1 << _PAGE_MODIFIED_SHIFT)
+#define _PAGE_READ_SHIFT 8
+#define _PAGE_READ (1 << _PAGE_READ_SHIFT)
+#define _PAGE_WRITE_SHIFT 9
+#define _PAGE_WRITE (1 << _PAGE_WRITE_SHIFT)
+#define _PAGE_ACCESSED_SHIFT 10
+#define _PAGE_ACCESSED (1 << _PAGE_ACCESSED_SHIFT)
#elif defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
@@ -75,16 +74,16 @@
*/
#define _PAGE_PRESENT_SHIFT 0
#define _PAGE_PRESENT (1 << _PAGE_PRESENT_SHIFT)
-#define _PAGE_READ_SHIFT 1
-#define _PAGE_READ (1 << _PAGE_READ_SHIFT)
-#define _PAGE_WRITE_SHIFT 2
-#define _PAGE_WRITE (1 << _PAGE_WRITE_SHIFT)
-#define _PAGE_ACCESSED_SHIFT 3
-#define _PAGE_ACCESSED (1 << _PAGE_ACCESSED_SHIFT)
-#define _PAGE_MODIFIED_SHIFT 4
+#define _PAGE_MODIFIED_SHIFT 1
#define _PAGE_MODIFIED (1 << _PAGE_MODIFIED_SHIFT)
-#define _PAGE_FILE_SHIFT 4
+#define _PAGE_FILE_SHIFT 1
#define _PAGE_FILE (1 << _PAGE_FILE_SHIFT)
+#define _PAGE_READ_SHIFT 2
+#define _PAGE_READ (1 << _PAGE_READ_SHIFT)
+#define _PAGE_WRITE_SHIFT 3
+#define _PAGE_WRITE (1 << _PAGE_WRITE_SHIFT)
+#define _PAGE_ACCESSED_SHIFT 4
+#define _PAGE_ACCESSED (1 << _PAGE_ACCESSED_SHIFT)
/*
* And these are the hardware TLB bits
@@ -102,6 +101,8 @@
#define _CACHE_MASK (1 << _CACHE_UNCACHED_SHIFT)
#else /* 'Normal' r4K case */
+
+#if !defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_CPU_MIPSR6)
/*
* When using the RI/XI bit support, we have 13 bits of flags below
* the physical address. The RI/XI bits are placed such that a SRL 5
@@ -118,22 +119,24 @@
*/
#define _PAGE_PRESENT_SHIFT (0)
#define _PAGE_PRESENT (1 << _PAGE_PRESENT_SHIFT)
-#define _PAGE_READ_SHIFT (cpu_has_rixi ? _PAGE_PRESENT_SHIFT : _PAGE_PRESENT_SHIFT + 1)
+#define _PAGE_MODIFIED_SHIFT (_PAGE_PRESENT_SHIFT + 1)
+#define _PAGE_MODIFIED (1 << _PAGE_MODIFIED_SHIFT)
+#define _PAGE_FILE_SHIFT (_PAGE_MODIFIED_SHIFT)
+#define _PAGE_FILE (_PAGE_MODIFIED)
+#define _PAGE_READ_SHIFT \
+ (cpu_has_rixi ? _PAGE_MODIFIED_SHIFT : _PAGE_MODIFIED_SHIFT + 1)
#define _PAGE_READ ({BUG_ON(cpu_has_rixi); 1 << _PAGE_READ_SHIFT; })
#define _PAGE_WRITE_SHIFT (_PAGE_READ_SHIFT + 1)
#define _PAGE_WRITE (1 << _PAGE_WRITE_SHIFT)
#define _PAGE_ACCESSED_SHIFT (_PAGE_WRITE_SHIFT + 1)
#define _PAGE_ACCESSED (1 << _PAGE_ACCESSED_SHIFT)
-#define _PAGE_MODIFIED_SHIFT (_PAGE_ACCESSED_SHIFT + 1)
-#define _PAGE_MODIFIED (1 << _PAGE_MODIFIED_SHIFT)
-#define _PAGE_FILE (_PAGE_MODIFIED)
#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
/* huge tlb page */
-#define _PAGE_HUGE_SHIFT (_PAGE_MODIFIED_SHIFT + 1)
+#define _PAGE_HUGE_SHIFT (_PAGE_ACCESSED_SHIFT + 1)
#define _PAGE_HUGE (1 << _PAGE_HUGE_SHIFT)
#else
-#define _PAGE_HUGE_SHIFT (_PAGE_MODIFIED_SHIFT)
+#define _PAGE_HUGE_SHIFT (_PAGE_ACCESSED_SHIFT)
#define _PAGE_HUGE ({BUG(); 1; }) /* Dummy value */
#endif
@@ -154,7 +157,152 @@
#define _PAGE_NO_READ_SHIFT (cpu_has_rixi ? _PAGE_NO_EXEC_SHIFT + 1 : _PAGE_NO_EXEC_SHIFT)
#define _PAGE_NO_READ ({BUG_ON(!cpu_has_rixi); 1 << _PAGE_NO_READ_SHIFT; })
-#define _PAGE_GLOBAL_SHIFT (_PAGE_NO_READ_SHIFT + 1)
+#else /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */
+
+/* static bits allocation in MIPS R2, two variants -
+ HUGE TLB in 64BIT kernel support or not.
+ RIXI support in both */
+
+#ifdef CONFIG_64BIT
+
+/*
+ * Low bits are: CCC D V G RI XI [S H] A W R M(=F) P
+ * TLB refill will do a ROTR 7/9 (in case of cpu_has_rixi),
+ * or SRL/DSRL 7/9 to strip low bits.
+ * PFN size in high bits is 49 or 51 bit --> 512TB or 4*512TB for 4KB pages
+ */
+
+#define _PAGE_PRESENT_SHIFT (0)
+#define _PAGE_PRESENT (1 << _PAGE_PRESENT_SHIFT)
+/* implemented in software */
+#define _PAGE_MODIFIED_SHIFT (_PAGE_PRESENT_SHIFT + 1)
+#define _PAGE_MODIFIED (1 << _PAGE_MODIFIED_SHIFT)
+/* set:pagecache unset:swap */
+#define _PAGE_FILE (_PAGE_MODIFIED)
+/* implemented in software, should be unused if cpu_has_rixi. */
+#define _PAGE_READ_SHIFT (_PAGE_MODIFIED_SHIFT + 1)
+#define _PAGE_READ (1 << _PAGE_READ_SHIFT)
+/* implemented in software */
+#define _PAGE_WRITE_SHIFT (_PAGE_READ_SHIFT + 1)
+#define _PAGE_WRITE (1 << _PAGE_WRITE_SHIFT)
+/* implemented in software */
+#define _PAGE_ACCESSED_SHIFT (_PAGE_WRITE_SHIFT + 1)
+#define _PAGE_ACCESSED (1 << _PAGE_ACCESSED_SHIFT)
+
+#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
+/* huge tlb page */
+#define _PAGE_HUGE_SHIFT (_PAGE_ACCESSED_SHIFT + 1)
+#define _PAGE_HUGE (1 << _PAGE_HUGE_SHIFT)
+#define _PAGE_SPLITTING_SHIFT (_PAGE_HUGE_SHIFT + 1)
+#define _PAGE_SPLITTING (1 << _PAGE_SPLITTING_SHIFT)
+#else
+#define _PAGE_HUGE_SHIFT (_PAGE_ACCESSED_SHIFT)
+#define _PAGE_HUGE ({BUG(); 1; }) /* Dummy value */
+#define _PAGE_SPLITTING_SHIFT (_PAGE_HUGE_SHIFT)
+#define _PAGE_SPLITTING ({BUG(); 1; }) /* Dummy value */
+#endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */
+
+/* Page cannot be executed */
+#define _PAGE_NO_EXEC_SHIFT (_PAGE_SPLITTING_SHIFT + 1)
+#define _PAGE_NO_EXEC (1 << _PAGE_NO_EXEC_SHIFT)
+
+/* Page cannot be read */
+#define _PAGE_NO_READ_SHIFT (_PAGE_NO_EXEC_SHIFT + 1)
+#define _PAGE_NO_READ (1 << _PAGE_NO_READ_SHIFT)
+
+#else /* !CONFIG_64BIT */
+
+#ifndef CONFIG_MIPS_HUGE_TLB_SUPPORT
+
+/*
+ * No HUGE page support
+ * Low bits are: CCC D V G RI(=R) XI A W M(=F) P
+ * TLB refill will do a ROTR 6 (in case of cpu_has_rixi),
+ * or SRL 6 to strip low bits.
+ * All 20 bits PFN are preserved in high bits (4GB in 4KB pages)
+ */
+
+#define _PAGE_PRESENT_SHIFT (0)
+#define _PAGE_PRESENT (1 << _PAGE_PRESENT_SHIFT)
+/* implemented in software */
+#define _PAGE_MODIFIED_SHIFT (_PAGE_PRESENT_SHIFT + 1)
+#define _PAGE_MODIFIED (1 << _PAGE_MODIFIED_SHIFT)
+/* set:pagecache unset:swap */
+#define _PAGE_FILE_SHIFT (_PAGE_MODIFIED_SHIFT)
+#define _PAGE_FILE (_PAGE_MODIFIED)
+/* implemented in software */
+#define _PAGE_WRITE_SHIFT (_PAGE_MODIFIED_SHIFT + 1)
+#define _PAGE_WRITE (1 << _PAGE_WRITE_SHIFT)
+/* implemented in software */
+#define _PAGE_ACCESSED_SHIFT (_PAGE_WRITE_SHIFT + 1)
+#define _PAGE_ACCESSED (1 << _PAGE_ACCESSED_SHIFT)
+
+/* huge tlb page dummies */
+#define _PAGE_HUGE_SHIFT (_PAGE_ACCESSED_SHIFT)
+#define _PAGE_HUGE ({BUG(); 1; }) /* Dummy value */
+#define _PAGE_SPLITTING_SHIFT (_PAGE_HUGE_SHIFT)
+#define _PAGE_SPLITTING ({BUG(); 1; }) /* Dummy value */
+
+/* Page cannot be executed */
+#define _PAGE_NO_EXEC_SHIFT (_PAGE_SPLITTING_SHIFT + 1)
+#define _PAGE_NO_EXEC (1 << _PAGE_NO_EXEC_SHIFT)
+
+/* Page cannot be read */
+#define _PAGE_NO_READ_SHIFT (_PAGE_NO_EXEC_SHIFT + 1)
+#define _PAGE_NO_READ (1 << _PAGE_NO_READ_SHIFT)
+
+/* implemented in software, should be unused if cpu_has_rixi. */
+#define _PAGE_READ_SHIFT (_PAGE_NO_READ_SHIFT)
+#define _PAGE_READ (1 << _PAGE_READ_SHIFT)
+
+#else /* CONFIG_MIPS_HUGE_TLB_SUPPORT */
+
+/*
+ * Low bits are: CCC D V G S H A W R M(=F) P
+ * No RIXI is enforced
+ * TLB refill will do a SRL 7,
+ * only 19 bits PFN are preserved in high bits (2GB in 4KB pages)
+ */
+
+#define _PAGE_PRESENT_SHIFT (0)
+#define _PAGE_PRESENT (1 << _PAGE_PRESENT_SHIFT)
+/* implemented in software */
+#define _PAGE_MODIFIED_SHIFT (_PAGE_PRESENT_SHIFT + 1)
+#define _PAGE_MODIFIED (1 << _PAGE_MODIFIED_SHIFT)
+/* set:pagecache unset:swap */
+#define _PAGE_FILE_SHIFT (_PAGE_MODIFIED_SHIFT)
+#define _PAGE_FILE (_PAGE_MODIFIED)
+/* implemented in software */
+#define _PAGE_READ_SHIFT (_PAGE_MODIFIED_SHIFT + 1)
+#define _PAGE_READ (1 << _PAGE_READ_SHIFT)
+/* implemented in software */
+#define _PAGE_WRITE_SHIFT (_PAGE_READ_SHIFT + 1)
+#define _PAGE_WRITE (1 << _PAGE_WRITE_SHIFT)
+/* implemented in software */
+#define _PAGE_ACCESSED_SHIFT (_PAGE_WRITE_SHIFT + 1)
+#define _PAGE_ACCESSED (1 << _PAGE_ACCESSED_SHIFT)
+
+/* huge tlb page... but no HUGE page support in MIPS32 yet */
+#define _PAGE_HUGE_SHIFT (_PAGE_ACCESSED_SHIFT + 1)
+#define _PAGE_HUGE (1 << _PAGE_HUGE_SHIFT)
+#define _PAGE_SPLITTING_SHIFT (_PAGE_HUGE_SHIFT + 1)
+#define _PAGE_SPLITTING (1 << _PAGE_SPLITTING_SHIFT)
+
+/* Page cannot be executed */
+#define _PAGE_NO_EXEC_SHIFT (_PAGE_SPLITTING_SHIFT)
+#define _PAGE_NO_EXEC ({BUG(); 1; }) /* Dummy value */
+/* Page cannot be read */
+#define _PAGE_NO_READ_SHIFT (_PAGE_NO_EXEC_SHIFT)
+#define _PAGE_NO_READ ({BUG(); 1; }) /* Dummy value */
+
+#endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */
+
+#endif /* CONFIG_64BIT */
+
+#endif /* !CONFIG_CPU_MIPSR2 && !CONFIG_CPU_MIPSR6 */
+
+
+#define _PAGE_GLOBAL_SHIFT (_PAGE_NO_READ_SHIFT + 1)
#define _PAGE_GLOBAL (1 << _PAGE_GLOBAL_SHIFT)
#define _PAGE_VALID_SHIFT (_PAGE_GLOBAL_SHIFT + 1)
@@ -190,6 +338,51 @@
#define _PAGE_GLOBAL_SHIFT ilog2(_PAGE_GLOBAL)
#endif
+/*
+ * Swap and File entries format definitions in PTE
+ * This constant definitions are here because it is linked with bit positions
+ * The real macros are still in pgtable-32/64.h
+ *
+ * There are 3 kind of format - 64BIT, generic 32BIT and 32BIT & 64BIT PA
+ */
+#define __SWP_TYPE_BITS_NUM 5
+#define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS_NUM) - 1)
+
+#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
+
+/*
+ * Two words PTE case:
+ * Bits 0 and 1 (V+G) of pte_high are taken, use the rest for the swaps and
+ * page offset...
+ * Bits F and P are in pte_low.
+ *
+ * Note: swp_entry_t or file entry are one word today (pte_high)
+ */
+#define __SWP_PTE_SKIP_BITS_NUM 2
+
+#elif defined(CONFIG_64BIT)
+/*
+ * Swap entry is located in high 32 bits of PTE
+ *
+ * File entry starts right from D bit
+ */
+#define __SWP_PTE_SKIP_BITS_NUM 32
+
+#else /* CONFIG_32BIT && !CONFIG_64BIT_PHYS_ADDR */
+/*
+ * Swap entry is encoded starting right from D bit
+ *
+ * File entry is encoded in all bits besides V,G,F and P which are grouped in
+ * two fields with variable gap, so - additonal location info is defined here
+ */
+/* rightmost taken out field - F and P */
+#define __FILE_PTE_LOW_BITS_NUM 2
+/* total number of taken out bits - V,G,F,P */
+#define __FILE_PTE_TOTAL_BITS_NUM 4
+/* mask for intermediate field which is used for encoding */
+#define __FILE_PTE_LOW_MASK ((_PAGE_GLOBAL - 1) >> (_PAGE_FILE_SHIFT + 1))
+
+#endif /* defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32) */
#ifndef __ASSEMBLY__
/*
diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
index 8b8f6b3..2dc5a09 100644
--- a/arch/mips/include/asm/pgtable.h
+++ b/arch/mips/include/asm/pgtable.h
@@ -10,6 +10,7 @@
#include <linux/mm_types.h>
#include <linux/mmzone.h>
+#include <linux/smp.h>
#ifdef CONFIG_32BIT
#include <asm/pgtable-32.h>
#endif
@@ -95,6 +96,27 @@
#define pmd_page_vaddr(pmd) pmd_val(pmd)
+#define htw_stop() \
+do { \
+ if (cpu_has_htw) { \
+ raw_current_cpu_data.htw_level++; \
+ write_c0_pwctl(HTW_PWCTL_BASE & \
+ ~(1 << MIPS_PWCTL_PWEN_SHIFT)); \
+ back_to_back_c0_hazard(); \
+ } \
+} while(0)
+
+#define htw_start() \
+do { \
+ if (cpu_has_htw) { \
+ if (!--raw_current_cpu_data.htw_level) { \
+ write_c0_pwctl(HTW_PWCTL_BASE | \
+ (1 << MIPS_PWCTL_PWEN_SHIFT)); \
+ back_to_back_c0_hazard(); \
+ } \
+ } \
+} while(0)
+
#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
#define pte_none(pte) (!(((pte).pte_low | (pte).pte_high) & ~_PAGE_GLOBAL))
@@ -123,12 +145,17 @@
static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
pte_t null = __pte(0);
+ unsigned long flags;
+ local_irq_save(flags);
+ htw_stop();
/* Preserve global status for the pair */
if (ptep_buddy(ptep)->pte_low & _PAGE_GLOBAL)
null.pte_low = null.pte_high = _PAGE_GLOBAL;
set_pte_at(mm, addr, ptep, null);
+ htw_start();
+ local_irq_restore(flags);
}
#else
@@ -159,6 +186,10 @@
static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ htw_stop();
#if !defined(CONFIG_CPU_R3000) && !defined(CONFIG_CPU_TX39XX)
/* Preserve global status for the pair */
if (pte_val(*ptep_buddy(ptep)) & _PAGE_GLOBAL)
@@ -166,6 +197,8 @@
else
#endif
set_pte_at(mm, addr, ptep, __pte(0));
+ htw_start();
+ local_irq_restore(flags);
}
#endif
diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h
index 1470b7b..2ce2b2f 100644
--- a/arch/mips/include/asm/processor.h
+++ b/arch/mips/include/asm/processor.h
@@ -40,7 +40,7 @@
* A special page (the vdso) is mapped into all processes at the very
* top of the virtual memory space.
*/
-#define SPECIAL_PAGES_SIZE PAGE_SIZE
+#define SPECIAL_PAGES_SIZE (PAGE_SIZE * 2)
#ifdef CONFIG_32BIT
#ifdef CONFIG_KVM_GUEST
@@ -71,7 +71,11 @@
* 8192EB ...
*/
#define TASK_SIZE32 0x7fff8000UL
-#define TASK_SIZE64 0x10000000000UL
+#ifdef CONFIG_48VMBITS
+#define TASK_SIZE64 (0x1UL << ((cpu_data[0].vmbits>48)?48:cpu_data[0].vmbits))
+#else
+#define TASK_SIZE64 (0x10000000000UL)
+#endif
#define TASK_SIZE (test_thread_flag(TIF_32BIT_ADDR) ? TASK_SIZE32 : TASK_SIZE64)
#ifdef __KERNEL__
@@ -97,7 +101,39 @@
#define NUM_FPU_REGS 32
-typedef __u64 fpureg_t;
+#ifdef CONFIG_CPU_HAS_MSA
+# define FPU_REG_WIDTH 128
+#else
+# define FPU_REG_WIDTH 64
+#endif
+
+union fpureg {
+ __u8 val8[FPU_REG_WIDTH / 8];
+ __u16 val16[FPU_REG_WIDTH / 16];
+ __u32 val32[FPU_REG_WIDTH / 32];
+ __u64 val64[FPU_REG_WIDTH / 64];
+};
+
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+# define FPR_IDX(width, idx) (idx)
+#else
+# define FPR_IDX(width, idx) ((idx) ^ ((64 / (width)) - 1))
+#endif
+
+#define BUILD_FPR_ACCESS(width) \
+static inline u##width get_fpr##width(union fpureg *fpr, unsigned idx) \
+{ \
+ return fpr->val##width[FPR_IDX(width, idx)]; \
+} \
+ \
+static inline void set_fpr##width(union fpureg *fpr, unsigned idx, \
+ u##width val) \
+{ \
+ fpr->val##width[FPR_IDX(width, idx)] = val; \
+}
+
+BUILD_FPR_ACCESS(32)
+BUILD_FPR_ACCESS(64)
/*
* It would be nice to add some more fields for emulator statistics, but there
@@ -107,8 +143,9 @@
*/
struct mips_fpu_struct {
- fpureg_t fpr[NUM_FPU_REGS];
+ union fpureg fpr[NUM_FPU_REGS];
unsigned int fcr31;
+ unsigned int msacsr;
};
#define NUM_DSP_REGS 6
@@ -195,7 +232,13 @@
unsigned long seg;
} mm_segment_t;
-#define ARCH_MIN_TASKALIGN 8
+#ifdef CONFIG_CPU_HAS_MSA
+# define ARCH_MIN_TASKALIGN 16
+# define FPU_ALIGN __aligned(16)
+#else
+# define ARCH_MIN_TASKALIGN 8
+# define FPU_ALIGN
+#endif
struct mips_abi;
@@ -212,7 +255,7 @@
unsigned long cp0_status;
/* Saved fpu/fpu emulator stuff. */
- struct mips_fpu_struct fpu;
+ struct mips_fpu_struct fpu FPU_ALIGN;
#ifdef CONFIG_MIPS_MT_FPAFF
/* Emulated instruction count */
unsigned long emulated_fp;
@@ -275,8 +318,9 @@
* Saved FPU/FPU emulator stuff \
*/ \
.fpu = { \
- .fpr = {0,}, \
+ .fpr = {{{0,},},}, \
.fcr31 = 0, \
+ .msacsr = 0, \
}, \
/* \
* FPU affinity state (null if not FPAFF) \
@@ -358,4 +402,16 @@
#endif
+/*
+ * Functions & macros implementing the PR_GET_FP_MODE & PR_SET_FP_MODE options
+ * to the prctl syscall.
+ */
+extern long mips_get_process_fp_mode(struct task_struct *task);
+extern long mips_set_process_fp_mode(struct task_struct *task,
+ unsigned long value);
+
+#define GET_FP_MODE(task) mips_get_process_fp_mode(task)
+#define SET_FP_MODE(task,value) mips_set_process_fp_mode(task, value)
+
+
#endif /* _ASM_PROCESSOR_H */
diff --git a/arch/mips/include/asm/r4kcache.h b/arch/mips/include/asm/r4kcache.h
index a0b2650..ed66ea9 100644
--- a/arch/mips/include/asm/r4kcache.h
+++ b/arch/mips/include/asm/r4kcache.h
@@ -16,6 +16,7 @@
#include <asm/cacheops.h>
#include <asm/cpu-features.h>
#include <asm/mipsmtregs.h>
+#include <asm/uaccess.h>
/*
* This macro return a properly sign-extended address suitable as base address
@@ -28,17 +29,31 @@
* - We need a properly sign extended address for 64-bit code. To get away
* without ifdefs we let the compiler do it by a type cast.
*/
-#define INDEX_BASE CKSEG0
+#ifndef INDEX_BASE
+#define INDEX_BASE CKSEG0
+#endif
+#ifdef CONFIG_CPU_MIPSR6
#define cache_op(op,addr) \
__asm__ __volatile__( \
" .set push \n" \
" .set noreorder \n" \
+ " .set mips64r6\n\t \n" \
+ " cache %0, %1 \n" \
+ " .set pop \n" \
+ : \
+ : "i" (op), "R" (*(unsigned char *)(addr)))
+#else
+#define cache_op(op,addr) \
+ __asm__ __volatile__( \
+ " .set push \n" \
+ " .set noreorder \n" \
" .set mips3\n\t \n" \
" cache %0, %1 \n" \
" .set pop \n" \
: \
: "i" (op), "R" (*(unsigned char *)(addr)))
+#endif
#ifdef CONFIG_MIPS_MT
/*
@@ -46,8 +61,6 @@
* execution during I-cache flushes.
*/
-#define PROTECT_CACHE_FLUSHES 1
-
#ifdef PROTECT_CACHE_FLUSHES
extern int mt_protiflush;
@@ -190,10 +203,24 @@
cache_op(Hit_Writeback_Inv_SD, addr);
}
+#ifdef CONFIG_CPU_MIPSR6
#define protected_cache_op(op,addr) \
__asm__ __volatile__( \
" .set push \n" \
" .set noreorder \n" \
+ " .set mips64r6 \n" \
+ "1: cache %0, (%1) \n" \
+ "2: .set pop \n" \
+ " .section __ex_table,\"a\" \n" \
+ " "STR(PTR)" 1b, 2b \n" \
+ " .previous" \
+ : \
+ : "i" (op), "r" (addr))
+#else
+#define protected_cache_op(op,addr) \
+ __asm__ __volatile__( \
+ " .set push \n" \
+ " .set noreorder \n" \
" .set mips3 \n" \
"1: cache %0, (%1) \n" \
"2: .set pop \n" \
@@ -202,13 +229,33 @@
" .previous" \
: \
: "i" (op), "r" (addr))
+#endif
+
+#ifdef CONFIG_EVA
+#define protected_cachee_op(op,addr) \
+ __asm__ __volatile__( \
+ " .set push \n" \
+ " .set noreorder \n" \
+ " .set eva \n" \
+ "1: cachee %0, (%1) \n" \
+ "2: .set pop \n" \
+ " .section __ex_table,\"a\" \n" \
+ " "STR(PTR)" 1b, 2b \n" \
+ " .previous" \
+ : \
+ : "i" (op), "r" (addr))
+#endif
/*
* The next two are for badland addresses like signal trampolines.
*/
static inline void protected_flush_icache_line(unsigned long addr)
{
+#ifndef CONFIG_EVA
protected_cache_op(Hit_Invalidate_I, addr);
+#else
+ protected_cachee_op(Hit_Invalidate_I, addr);
+#endif
}
/*
@@ -219,7 +266,11 @@
*/
static inline void protected_writeback_dcache_line(unsigned long addr)
{
+#ifndef CONFIG_EVA
protected_cache_op(Hit_Writeback_Inv_D, addr);
+#else
+ protected_cachee_op(Hit_Writeback_Inv_D, addr);
+#endif
}
static inline void protected_writeback_scache_line(unsigned long addr)
@@ -235,6 +286,144 @@
cache_op(Page_Invalidate_T, addr);
}
+#ifdef CONFIG_CPU_MIPSR6
+
+#define cache16_unroll32(base,op) \
+ __asm__ __volatile__( \
+ " .set push \n" \
+ " .set noreorder \n" \
+ " .set mips64r6 \n" \
+ " .set noat \n" \
+ " cache %1, 0x000(%0); cache %1, 0x010(%0) \n" \
+ " cache %1, 0x020(%0); cache %1, 0x030(%0) \n" \
+ " cache %1, 0x040(%0); cache %1, 0x050(%0) \n" \
+ " cache %1, 0x060(%0); cache %1, 0x070(%0) \n" \
+ " cache %1, 0x080(%0); cache %1, 0x090(%0) \n" \
+ " cache %1, 0x0a0(%0); cache %1, 0x0b0(%0) \n" \
+ " cache %1, 0x0c0(%0); cache %1, 0x0d0(%0) \n" \
+ " cache %1, 0x0e0(%0); cache %1, 0x0f0(%0) \n" \
+ STR(PTR_ADDIU) " $1, %0, 0x100 \n" \
+ " cache %1, 0x000($1); cache %1, 0x010($1) \n" \
+ " cache %1, 0x020($1); cache %1, 0x030($1) \n" \
+ " cache %1, 0x040($1); cache %1, 0x050($1) \n" \
+ " cache %1, 0x060($1); cache %1, 0x070($1) \n" \
+ " cache %1, 0x080($1); cache %1, 0x090($1) \n" \
+ " cache %1, 0x0a0($1); cache %1, 0x0b0($1) \n" \
+ " cache %1, 0x0c0($1); cache %1, 0x0d0($1) \n" \
+ " cache %1, 0x0e0($1); cache %1, 0x0f0($1) \n" \
+ " .set pop \n" \
+ : \
+ : "r" (base), \
+ "i" (op));
+
+#define cache32_unroll32(base,op) \
+ __asm__ __volatile__( \
+ " .set push \n" \
+ " .set noreorder \n" \
+ " .set mips64r6 \n" \
+ " .set noat \n" \
+ " cache %1, 0x000(%0); cache %1, 0x020(%0) \n" \
+ " cache %1, 0x040(%0); cache %1, 0x060(%0) \n" \
+ " cache %1, 0x080(%0); cache %1, 0x0a0(%0) \n" \
+ " cache %1, 0x0c0(%0); cache %1, 0x0e0(%0) \n" \
+ STR(PTR_ADDIU) " $1, %0, 0x100 \n" \
+ " cache %1, 0x000($1); cache %1, 0x020($1) \n" \
+ " cache %1, 0x040($1); cache %1, 0x060($1) \n" \
+ " cache %1, 0x080($1); cache %1, 0x0a0($1) \n" \
+ " cache %1, 0x0c0($1); cache %1, 0x0e0($1) \n" \
+ STR(PTR_ADDIU) " $1, $1, 0x100 \n" \
+ " cache %1, 0x000($1); cache %1, 0x020($1) \n" \
+ " cache %1, 0x040($1); cache %1, 0x060($1) \n" \
+ " cache %1, 0x080($1); cache %1, 0x0a0($1) \n" \
+ " cache %1, 0x0c0($1); cache %1, 0x0e0($1) \n" \
+ STR(PTR_ADDIU) " $1, $1, 0x100 \n" \
+ " cache %1, 0x000($1); cache %1, 0x020($1) \n" \
+ " cache %1, 0x040($1); cache %1, 0x060($1) \n" \
+ " cache %1, 0x080($1); cache %1, 0x0a0($1) \n" \
+ " cache %1, 0x0c0($1); cache %1, 0x0e0($1) \n" \
+ " .set pop \n" \
+ : \
+ : "r" (base), \
+ "i" (op));
+
+#define cache64_unroll32(base,op) \
+ __asm__ __volatile__( \
+ " .set push \n" \
+ " .set noreorder \n" \
+ " .set mips64r6 \n" \
+ " .set noat \n" \
+ " cache %1, 0x000(%0); cache %1, 0x040(%0) \n" \
+ " cache %1, 0x080(%0); cache %1, 0x0c0(%0) \n" \
+ STR(PTR_ADDIU) " $1, %0, 0x100 \n" \
+ " cache %1, 0x000($1); cache %1, 0x040($1) \n" \
+ " cache %1, 0x080($1); cache %1, 0x0c0($1) \n" \
+ STR(PTR_ADDIU) " $1, $1, 0x100 \n" \
+ " cache %1, 0x000($1); cache %1, 0x040($1) \n" \
+ " cache %1, 0x080($1); cache %1, 0x0c0($1) \n" \
+ STR(PTR_ADDIU) " $1, $1, 0x100 \n" \
+ " cache %1, 0x000($1); cache %1, 0x040($1) \n" \
+ " cache %1, 0x080($1); cache %1, 0x0c0($1) \n" \
+ STR(PTR_ADDIU) " $1, $1, 0x100 \n" \
+ " cache %1, 0x000($1); cache %1, 0x040($1) \n" \
+ " cache %1, 0x080($1); cache %1, 0x0c0($1) \n" \
+ STR(PTR_ADDIU) " $1, $1, 0x100 \n" \
+ " cache %1, 0x000($1); cache %1, 0x040($1) \n" \
+ " cache %1, 0x080($1); cache %1, 0x0c0($1) \n" \
+ STR(PTR_ADDIU) " $1, $1, 0x100 \n" \
+ " cache %1, 0x000($1); cache %1, 0x040($1) \n" \
+ " cache %1, 0x080($1); cache %1, 0x0c0($1) \n" \
+ STR(PTR_ADDIU) " $1, $1, 0x100 \n" \
+ " cache %1, 0x000($1); cache %1, 0x040($1) \n" \
+ " cache %1, 0x080($1); cache %1, 0x0c0($1) \n" \
+ " .set pop \n" \
+ : \
+ : "r" (base), \
+ "i" (op));
+
+#define cache128_unroll32(base,op) \
+ __asm__ __volatile__( \
+ " .set push \n" \
+ " .set noreorder \n" \
+ " .set mips64r6 \n" \
+ " .set noat \n" \
+ " cache %1, 0x000(%0); cache %1, 0x080(%0) \n" \
+ STR(PTR_ADDIU) " $1, %0, 0x100 \n" \
+ " cache %1, 0x000($1); cache %1, 0x080($1) \n" \
+ STR(PTR_ADDIU) " $1, $1, 0x100 \n" \
+ " cache %1, 0x000($1); cache %1, 0x080($1) \n" \
+ STR(PTR_ADDIU) " $1, $1, 0x100 \n" \
+ " cache %1, 0x000($1); cache %1, 0x080($1) \n" \
+ STR(PTR_ADDIU) " $1, $1, 0x100 \n" \
+ " cache %1, 0x000($1); cache %1, 0x080($1) \n" \
+ STR(PTR_ADDIU) " $1, $1, 0x100 \n" \
+ " cache %1, 0x000($1); cache %1, 0x080($1) \n" \
+ STR(PTR_ADDIU) " $1, $1, 0x100 \n" \
+ " cache %1, 0x000($1); cache %1, 0x080($1) \n" \
+ STR(PTR_ADDIU) " $1, $1, 0x100 \n" \
+ " cache %1, 0x000($1); cache %1, 0x080($1) \n" \
+ STR(PTR_ADDIU) " $1, $1, 0x100 \n" \
+ " cache %1, 0x000($1); cache %1, 0x080($1) \n" \
+ STR(PTR_ADDIU) " $1, $1, 0x100 \n" \
+ " cache %1, 0x000($1); cache %1, 0x080($1) \n" \
+ STR(PTR_ADDIU) " $1, $1, 0x100 \n" \
+ " cache %1, 0x000($1); cache %1, 0x080($1) \n" \
+ STR(PTR_ADDIU) " $1, $1, 0x100 \n" \
+ " cache %1, 0x000($1); cache %1, 0x080($1) \n" \
+ STR(PTR_ADDIU) " $1, $1, 0x100 \n" \
+ " cache %1, 0x000($1); cache %1, 0x080($1) \n" \
+ STR(PTR_ADDIU) " $1, $1, 0x100 \n" \
+ " cache %1, 0x000($1); cache %1, 0x080($1) \n" \
+ STR(PTR_ADDIU) " $1, $1, 0x100 \n" \
+ " cache %1, 0x000($1); cache %1, 0x080($1) \n" \
+ STR(PTR_ADDIU) " $1, $1, 0x100 \n" \
+ " cache %1, 0x000($1); cache %1, 0x080($1) \n" \
+ " .set pop \n" \
+ : \
+ : "r" (base), \
+ "i" (op));
+
+#else /* !CONFIG_CPU_MIPSR6 */
+
#define cache16_unroll32(base,op) \
__asm__ __volatile__( \
" .set push \n" \
@@ -338,6 +527,113 @@
: \
: "r" (base), \
"i" (op));
+#endif /* CONFIG_CPU_MIPSR6 */
+
+#ifdef CONFIG_EVA
+#define cache16_unroll32_user(base,op) \
+ __asm__ __volatile__( \
+ " .set push \n" \
+ " .set noreorder \n" \
+ " .set eva \n" \
+ " cachee %1, 0x000(%0); cachee %1, 0x010(%0) \n" \
+ " cachee %1, 0x020(%0); cachee %1, 0x030(%0) \n" \
+ " cachee %1, 0x040(%0); cachee %1, 0x050(%0) \n" \
+ " cachee %1, 0x060(%0); cachee %1, 0x070(%0) \n" \
+ " cachee %1, 0x080(%0); cachee %1, 0x090(%0) \n" \
+ " cachee %1, 0x0a0(%0); cachee %1, 0x0b0(%0) \n" \
+ " cachee %1, 0x0c0(%0); cachee %1, 0x0d0(%0) \n" \
+ " cachee %1, 0x0e0(%0); cachee %1, 0x0f0(%0) \n" \
+ " cachee %1, 0x100(%0); cachee %1, 0x110(%0) \n" \
+ " cachee %1, 0x120(%0); cachee %1, 0x130(%0) \n" \
+ " cachee %1, 0x140(%0); cachee %1, 0x150(%0) \n" \
+ " cachee %1, 0x160(%0); cachee %1, 0x170(%0) \n" \
+ " cachee %1, 0x180(%0); cachee %1, 0x190(%0) \n" \
+ " cachee %1, 0x1a0(%0); cachee %1, 0x1b0(%0) \n" \
+ " cachee %1, 0x1c0(%0); cachee %1, 0x1d0(%0) \n" \
+ " cachee %1, 0x1e0(%0); cachee %1, 0x1f0(%0) \n" \
+ " .set pop \n" \
+ : \
+ : "r" (base), \
+ "i" (op));
+
+#define cache32_unroll32_user(base,op) \
+ __asm__ __volatile__( \
+ " .set push \n" \
+ " .set noreorder \n" \
+ " .set eva \n" \
+ " cachee %1, 0x000(%0); cachee %1, 0x020(%0) \n" \
+ " cachee %1, 0x040(%0); cachee %1, 0x060(%0) \n" \
+ " cachee %1, 0x080(%0); cachee %1, 0x0a0(%0) \n" \
+ " cachee %1, 0x0c0(%0); cachee %1, 0x0e0(%0) \n" \
+ " cachee %1, 0x100(%0); cachee %1, 0x120(%0) \n" \
+ " cachee %1, 0x140(%0); cachee %1, 0x160(%0) \n" \
+ " cachee %1, 0x180(%0); cachee %1, 0x1a0(%0) \n" \
+ " cachee %1, 0x1c0(%0); cachee %1, 0x1e0(%0) \n" \
+ " cachee %1, 0x200(%0); cachee %1, 0x220(%0) \n" \
+ " cachee %1, 0x240(%0); cachee %1, 0x260(%0) \n" \
+ " cachee %1, 0x280(%0); cachee %1, 0x2a0(%0) \n" \
+ " cachee %1, 0x2c0(%0); cachee %1, 0x2e0(%0) \n" \
+ " cachee %1, 0x300(%0); cachee %1, 0x320(%0) \n" \
+ " cachee %1, 0x340(%0); cachee %1, 0x360(%0) \n" \
+ " cachee %1, 0x380(%0); cachee %1, 0x3a0(%0) \n" \
+ " cachee %1, 0x3c0(%0); cachee %1, 0x3e0(%0) \n" \
+ " .set pop \n" \
+ : \
+ : "r" (base), \
+ "i" (op));
+
+#define cache64_unroll32_user(base,op) \
+ __asm__ __volatile__( \
+ " .set push \n" \
+ " .set noreorder \n" \
+ " .set eva \n" \
+ " cachee %1, 0x000(%0); cachee %1, 0x040(%0) \n" \
+ " cachee %1, 0x080(%0); cachee %1, 0x0c0(%0) \n" \
+ " cachee %1, 0x100(%0); cachee %1, 0x140(%0) \n" \
+ " cachee %1, 0x180(%0); cachee %1, 0x1c0(%0) \n" \
+ " cachee %1, 0x200(%0); cachee %1, 0x240(%0) \n" \
+ " cachee %1, 0x280(%0); cachee %1, 0x2c0(%0) \n" \
+ " cachee %1, 0x300(%0); cachee %1, 0x340(%0) \n" \
+ " cachee %1, 0x380(%0); cachee %1, 0x3c0(%0) \n" \
+ " cachee %1, 0x400(%0); cachee %1, 0x440(%0) \n" \
+ " cachee %1, 0x480(%0); cachee %1, 0x4c0(%0) \n" \
+ " cachee %1, 0x500(%0); cachee %1, 0x540(%0) \n" \
+ " cachee %1, 0x580(%0); cachee %1, 0x5c0(%0) \n" \
+ " cachee %1, 0x600(%0); cachee %1, 0x640(%0) \n" \
+ " cachee %1, 0x680(%0); cachee %1, 0x6c0(%0) \n" \
+ " cachee %1, 0x700(%0); cachee %1, 0x740(%0) \n" \
+ " cachee %1, 0x780(%0); cachee %1, 0x7c0(%0) \n" \
+ " .set pop \n" \
+ : \
+ : "r" (base), \
+ "i" (op));
+
+#define cache128_unroll32_user(base,op) \
+ __asm__ __volatile__( \
+ " .set push \n" \
+ " .set noreorder \n" \
+ " .set eva \n" \
+ " cachee %1, 0x000(%0); cachee %1, 0x080(%0) \n" \
+ " cachee %1, 0x100(%0); cachee %1, 0x180(%0) \n" \
+ " cachee %1, 0x200(%0); cachee %1, 0x280(%0) \n" \
+ " cachee %1, 0x300(%0); cachee %1, 0x380(%0) \n" \
+ " cachee %1, 0x400(%0); cachee %1, 0x480(%0) \n" \
+ " cachee %1, 0x500(%0); cachee %1, 0x580(%0) \n" \
+ " cachee %1, 0x600(%0); cachee %1, 0x680(%0) \n" \
+ " cachee %1, 0x700(%0); cachee %1, 0x780(%0) \n" \
+ " cachee %1, 0x800(%0); cachee %1, 0x880(%0) \n" \
+ " cachee %1, 0x900(%0); cachee %1, 0x980(%0) \n" \
+ " cachee %1, 0xa00(%0); cachee %1, 0xa80(%0) \n" \
+ " cachee %1, 0xb00(%0); cachee %1, 0xb80(%0) \n" \
+ " cachee %1, 0xc00(%0); cachee %1, 0xc80(%0) \n" \
+ " cachee %1, 0xd00(%0); cachee %1, 0xd80(%0) \n" \
+ " cachee %1, 0xe00(%0); cachee %1, 0xe80(%0) \n" \
+ " cachee %1, 0xf00(%0); cachee %1, 0xf80(%0) \n" \
+ " .set pop \n" \
+ : \
+ : "r" (base), \
+ "i" (op));
+#endif
/* build blast_xxx, blast_xxx_page, blast_xxx_page_indexed */
#define __BUILD_BLAST_CACHE(pfx, desc, indexop, hitop, lsize) \
@@ -411,6 +707,33 @@
__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 64)
__BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 128)
+#ifdef CONFIG_EVA
+
+#define __BUILD_BLAST_USER_CACHE(pfx, desc, indexop, hitop, lsize) \
+static inline void blast_##pfx##cache##lsize##_user_page(unsigned long page) \
+{ \
+ unsigned long start = page; \
+ unsigned long end = page + PAGE_SIZE; \
+ \
+ __##pfx##flush_prologue \
+ \
+ do { \
+ cache##lsize##_unroll32_user(start, hitop); \
+ start += lsize * 32; \
+ } while (start < end); \
+ \
+ __##pfx##flush_epilogue \
+}
+
+__BUILD_BLAST_USER_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 16)
+__BUILD_BLAST_USER_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 16)
+__BUILD_BLAST_USER_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 32)
+__BUILD_BLAST_USER_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32)
+__BUILD_BLAST_USER_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 64)
+__BUILD_BLAST_USER_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64)
+
+#endif
+
/* build blast_xxx_range, protected_blast_xxx_range */
#define __BUILD_BLAST_CACHE_RANGE(pfx, desc, hitop, prot) \
static inline void prot##blast_##pfx##cache##_range(unsigned long start, \
@@ -423,7 +746,7 @@
__##pfx##flush_prologue \
\
while (1) { \
- prot##cache_op(hitop, addr); \
+ prot##cache_op(hitop, addr); \
if (addr == aend) \
break; \
addr += lsize; \
@@ -432,10 +755,49 @@
__##pfx##flush_epilogue \
}
+#ifndef CONFIG_EVA
+
__BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, protected_)
-__BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, protected_)
__BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, protected_)
+
+#else
+
+#define __BUILD_PROT_BLAST_CACHE_RANGE(pfx, desc, hitop) \
+static inline void protected_blast_##pfx##cache##_range(unsigned long start, \
+ unsigned long end) \
+{ \
+ unsigned long lsize = cpu_##desc##_line_size(); \
+ unsigned long addr = start & ~(lsize - 1); \
+ unsigned long aend = (end - 1) & ~(lsize - 1); \
+ \
+ __##pfx##flush_prologue \
+ \
+ if (segment_eq(get_fs(), USER_DS)) \
+ while (1) { \
+ protected_cachee_op(hitop, addr); \
+ if (addr == aend) \
+ break; \
+ addr += lsize; \
+ } \
+ else \
+ while (1) { \
+ protected_cache_op(hitop, addr); \
+ if (addr == aend) \
+ break; \
+ addr += lsize; \
+ } \
+ \
+ __##pfx##flush_epilogue \
+}
+
+__BUILD_PROT_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D)
+__BUILD_PROT_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I)
+
+#endif
+
+__BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, protected_)
__BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, )
+__BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, )
__BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, )
/* blast_inv_dcache_range */
__BUILD_BLAST_CACHE_RANGE(inv_d, dcache, Hit_Invalidate_D, )
diff --git a/arch/mips/include/asm/scatterlist.h b/arch/mips/include/asm/scatterlist.h
deleted file mode 100644
index 7ee0e64..0000000
--- a/arch/mips/include/asm/scatterlist.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __ASM_SCATTERLIST_H
-#define __ASM_SCATTERLIST_H
-
-#include <asm-generic/scatterlist.h>
-
-#endif /* __ASM_SCATTERLIST_H */
diff --git a/arch/mips/include/asm/sections.h b/arch/mips/include/asm/sections.h
deleted file mode 100644
index b7e3726..0000000
--- a/arch/mips/include/asm/sections.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _ASM_SECTIONS_H
-#define _ASM_SECTIONS_H
-
-#include <asm-generic/sections.h>
-
-#endif /* _ASM_SECTIONS_H */
diff --git a/arch/mips/include/asm/segment.h b/arch/mips/include/asm/segment.h
deleted file mode 100644
index 92ac001..0000000
--- a/arch/mips/include/asm/segment.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef _ASM_SEGMENT_H
-#define _ASM_SEGMENT_H
-
-/* Only here because we have some old header files that expect it.. */
-
-#endif /* _ASM_SEGMENT_H */
diff --git a/arch/mips/include/asm/serial.h b/arch/mips/include/asm/serial.h
deleted file mode 100644
index a0cb0caf..0000000
--- a/arch/mips/include/asm/serial.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/serial.h>
diff --git a/arch/mips/include/asm/smp.h b/arch/mips/include/asm/smp.h
index eb60087..5a7283a 100644
--- a/arch/mips/include/asm/smp.h
+++ b/arch/mips/include/asm/smp.h
@@ -16,12 +16,14 @@
#include <linux/smp.h>
#include <linux/threads.h>
#include <linux/cpumask.h>
+#include <linux/cache.h>
#include <linux/atomic.h>
#include <asm/smp-ops.h>
+#include <asm/percpu.h>
extern int smp_num_siblings;
-extern cpumask_t cpu_sibling_map[];
+DECLARE_PER_CPU_SHARED_ALIGNED(cpumask_t, cpu_sibling_map);
#define raw_smp_processor_id() (current_thread_info()->cpu)
diff --git a/arch/mips/include/asm/spinlock.h b/arch/mips/include/asm/spinlock.h
index 78d201f..121d463 100644
--- a/arch/mips/include/asm/spinlock.h
+++ b/arch/mips/include/asm/spinlock.h
@@ -108,7 +108,7 @@
" subu %[ticket], %[my_ticket], %[ticket] \n"
"2: \n"
" .subsection 2 \n"
- "4: andi %[ticket], %[ticket], 0x1fff \n"
+ "4: andi %[ticket], %[ticket], 0xffff \n"
" sll %[ticket], 5 \n"
" \n"
"6: bnez %[ticket], 6b \n"
@@ -238,6 +238,23 @@
: "m" (rw->lock)
: "memory");
} else {
+#ifdef CONFIG_CPU_MIPSR6
+ unsigned long tmp2 = (unsigned long)&(rw->lock);
+
+ do {
+ __asm__ __volatile__(
+ " .set push \n"
+ " .set noreorder # arch_read_lock \n"
+ "1: ll %0, 0(%1) \n"
+ " bltz %0, 1b \n"
+ " addu %0, 1 \n"
+ "2: sc %0, 0(%1) \n"
+ " .set pop \n"
+ : "=&r" (tmp)
+ : "r" (tmp2)
+ : "memory");
+ } while (unlikely(!tmp));
+#else
do {
__asm__ __volatile__(
"1: ll %1, %2 # arch_read_lock \n"
@@ -248,6 +265,7 @@
: "m" (rw->lock)
: "memory");
} while (unlikely(!tmp));
+#endif
}
smp_llsc_mb();
@@ -272,6 +290,23 @@
: "m" (rw->lock)
: "memory");
} else {
+#ifdef CONFIG_CPU_MIPSR6
+ unsigned long tmp2 = (unsigned long)&(rw->lock);
+
+ do {
+ __asm__ __volatile__(
+ " .set push \n"
+ " .set noat \n"
+ "1: ll %0, 0(%1) # arch_read_unlock \n"
+ " li $1, 1 \n"
+ " sub %0, %0, $1 \n"
+ " sc %0, 0(%1) \n"
+ " .set pop \n"
+ : "=&r" (tmp)
+ : "r" (tmp2)
+ : "memory");
+ } while (unlikely(!tmp));
+#else
do {
__asm__ __volatile__(
"1: ll %1, %2 # arch_read_unlock \n"
@@ -281,6 +316,7 @@
: "m" (rw->lock)
: "memory");
} while (unlikely(!tmp));
+#endif
}
}
@@ -302,6 +338,23 @@
: "m" (rw->lock)
: "memory");
} else {
+#ifdef CONFIG_CPU_MIPSR6
+ unsigned long tmp2 = (unsigned long)&(rw->lock);
+
+ do {
+ __asm__ __volatile__(
+ " .set push \n"
+ " .set noreorder # arch_write_lock \n"
+ "1: ll %0, 0(%1) \n"
+ " bnez %0, 1b \n"
+ " lui %0, 0x8000 \n"
+ "2: sc %0, 0(%1) \n"
+ " .set pop \n"
+ : "=&r" (tmp)
+ : "r" (tmp2)
+ : "memory");
+ } while (unlikely(!tmp));
+#else
do {
__asm__ __volatile__(
"1: ll %1, %2 # arch_write_lock \n"
@@ -312,6 +365,7 @@
: "m" (rw->lock)
: "memory");
} while (unlikely(!tmp));
+#endif
}
smp_llsc_mb();
@@ -352,6 +406,26 @@
: "m" (rw->lock)
: "memory");
} else {
+#ifdef CONFIG_CPU_MIPSR6
+ unsigned long tmp2 = (unsigned long)&(rw->lock);
+
+ __asm__ __volatile__(
+ " .set noreorder # arch_read_trylock \n"
+ " li %1, 0 \n"
+ "1: ll %0, 0(%2) \n"
+ " bltz %0, 2f \n"
+ " addu %0, 1 \n"
+ " sc %0, 0(%2) \n"
+ " beqz %0, 1b \n"
+ " nop \n"
+ " .set reorder \n"
+ __WEAK_LLSC_MB
+ " li %2, 1 \n"
+ "2: \n"
+ : "=&r" (tmp), "=&r" (ret)
+ : "r" (tmp2)
+ : "memory");
+#else
__asm__ __volatile__(
" .set noreorder # arch_read_trylock \n"
" li %2, 0 \n"
@@ -368,6 +442,7 @@
: "=m" (rw->lock), "=&r" (tmp), "=&r" (ret)
: "m" (rw->lock)
: "memory");
+#endif
}
return ret;
@@ -396,6 +471,26 @@
: "m" (rw->lock)
: "memory");
} else {
+#ifdef CONFIG_CPU_MIPSR6
+ unsigned long tmp2 = (unsigned long)&(rw->lock);
+
+ do {
+ __asm__ __volatile__(
+ " .set push \n"
+ " .set reorder \n"
+ "1: ll %0, 0(%2) \n"
+ " li %1, 0 # arch_write_trylock \n"
+ " bnez %0, 2f \n"
+ " lui %0, 0x8000 \n"
+ " sc %0, 0(%2) \n"
+ " li %1, 1 \n"
+ "2: \n"
+ " .set pop \n"
+ : "=&r" (tmp), "=&r" (ret)
+ : "r" (tmp2)
+ : "memory");
+ } while (unlikely(!tmp));
+#else
do {
__asm__ __volatile__(
" ll %1, %3 # arch_write_trylock \n"
@@ -409,6 +504,7 @@
: "m" (rw->lock)
: "memory");
} while (unlikely(!tmp));
+#endif
smp_llsc_mb();
}
diff --git a/arch/mips/include/asm/spram.h b/arch/mips/include/asm/spram.h
index 0b89006..58882ef7 100644
--- a/arch/mips/include/asm/spram.h
+++ b/arch/mips/include/asm/spram.h
@@ -1,7 +1,7 @@
#ifndef _MIPS_SPRAM_H
#define _MIPS_SPRAM_H
-#ifdef CONFIG_CPU_MIPSR2
+#if defined(CONFIG_CPU_MIPSR2)
extern __init void spram_config(void);
#else
static inline void spram_config(void) { };
diff --git a/arch/mips/include/asm/stackframe.h b/arch/mips/include/asm/stackframe.h
index a89d1b1..4cfedb8 100644
--- a/arch/mips/include/asm/stackframe.h
+++ b/arch/mips/include/asm/stackframe.h
@@ -49,7 +49,7 @@
LONG_S v1, PT_HI(sp)
mflhxu v1
LONG_S v1, PT_ACX(sp)
-#else
+#elif !defined(CONFIG_CPU_MIPSR6)
mfhi v1
#endif
#ifdef CONFIG_32BIT
@@ -59,7 +59,7 @@
LONG_S $10, PT_R10(sp)
LONG_S $11, PT_R11(sp)
LONG_S $12, PT_R12(sp)
-#ifndef CONFIG_CPU_HAS_SMARTMIPS
+#if !defined(CONFIG_CPU_HAS_SMARTMIPS) && !defined(CONFIG_CPU_MIPSR6)
LONG_S v1, PT_HI(sp)
mflo v1
#endif
@@ -67,7 +67,7 @@
LONG_S $14, PT_R14(sp)
LONG_S $15, PT_R15(sp)
LONG_S $24, PT_R24(sp)
-#ifndef CONFIG_CPU_HAS_SMARTMIPS
+#if !defined(CONFIG_CPU_HAS_SMARTMIPS) && !defined(CONFIG_CPU_MIPSR6)
LONG_S v1, PT_LO(sp)
#endif
.endm
@@ -94,7 +94,11 @@
#define CPU_ID_REG CP0_XCONTEXT
#define CPU_ID_MFC0 MFC0
#else
+#ifdef CONFIG_64BIT
+#define PTEBASE_SHIFT 24 /* CONTEXT using double 2 ptrs */
+#else
#define PTEBASE_SHIFT 23 /* CONTEXT */
+#endif
#define CPU_ID_REG CP0_CONTEXT
#define CPU_ID_MFC0 MFC0
#endif
@@ -255,7 +259,7 @@
mtlhx $24
LONG_L $24, PT_LO(sp)
mtlhx $24
-#else
+#elif !defined(CONFIG_CPU_MIPSR6)
LONG_L $24, PT_LO(sp)
mtlo $24
LONG_L $24, PT_HI(sp)
@@ -448,8 +452,13 @@
.macro RESTORE_SP_AND_RET
LONG_L sp, PT_R29(sp)
+#ifdef CONFIG_CPU_MIPSR6
+ .set mips64r6
+ eretnc
+#else
.set mips3
eret
+#endif
.set mips0
.endm
diff --git a/arch/mips/include/asm/switch_to.h b/arch/mips/include/asm/switch_to.h
index fd16bcb..5df3b93 100644
--- a/arch/mips/include/asm/switch_to.h
+++ b/arch/mips/include/asm/switch_to.h
@@ -15,14 +15,30 @@
#include <asm/cpu-features.h>
#include <asm/watch.h>
#include <asm/dsp.h>
+#include <asm/mmu_context.h>
+#include <asm/msa.h>
struct task_struct;
-/*
- * switch_to(n) should switch tasks to task nr n, first
- * checking that n isn't the current task, in which case it does nothing.
+enum {
+ FP_SAVE_NONE = 0,
+ FP_SAVE_VECTOR = -1,
+ FP_SAVE_SCALAR = 1,
+};
+
+/**
+ * resume - resume execution of a task
+ * @prev: The task previously executed.
+ * @next: The task to begin executing.
+ * @next_ti: task_thread_info(next).
+ * @fp_save: Which, if any, FP context to save for prev.
+ *
+ * This function is used whilst scheduling to save the context of prev & load
+ * the context of next. Returns prev.
*/
-extern asmlinkage void *resume(void *last, void *next, void *next_ti, u32 __usedfpu);
+extern asmlinkage struct task_struct *resume(struct task_struct *prev,
+ struct task_struct *next, struct thread_info *next_ti,
+ s32 fp_save);
extern unsigned int ll_bit;
extern struct task_struct *ll_task;
@@ -58,29 +74,52 @@
#define __mips_mt_fpaff_switch_to(prev) do { (void) (prev); } while (0)
#endif
-#define __clear_software_ll_bit() \
+#ifdef CONFIG_CPU_MIPSR6
+#define __clear_ll_bit() \
+do { \
+ write_c0_lladdr(0); \
+} while (0)
+#else
+#define __clear_ll_bit() \
do { \
if (!__builtin_constant_p(cpu_has_llsc) || !cpu_has_llsc) \
ll_bit = 0; \
} while (0)
+#endif
#define switch_to(prev, next, last) \
do { \
- u32 __usedfpu; \
+ s32 __fpsave = FP_SAVE_NONE; \
__mips_mt_fpaff_switch_to(prev); \
if (cpu_has_dsp) \
__save_dsp(prev); \
- __clear_software_ll_bit(); \
- __usedfpu = test_and_clear_tsk_thread_flag(prev, TIF_USEDFPU); \
- (last) = resume(prev, next, task_thread_info(next), __usedfpu); \
+ __clear_ll_bit(); \
+ if (test_and_clear_tsk_thread_flag(prev, TIF_USEDFPU)) \
+ __fpsave = FP_SAVE_SCALAR; \
+ if (test_and_clear_tsk_thread_flag(prev, TIF_USEDMSA)) \
+ __fpsave = FP_SAVE_VECTOR; \
+ (last) = resume(prev, next, task_thread_info(next), __fpsave); \
} while (0)
+static inline void flush_vdso_page(void)
+{
+ int cpu = raw_smp_processor_id();
+
+ if (current->mm && cpu_context(cpu, current->mm) &&
+ (current->mm->context.vdso_page[cpu] != current_thread_info()->vdso_page) &&
+ (current->mm->context.vdso_asid[cpu] == cpu_asid(cpu, current->mm))) {
+ local_flush_tlb_page(current->mm->mmap, (unsigned long)current->mm->context.vdso);
+ current->mm->context.vdso_asid[cpu] = 0;
+ }
+}
+
#define finish_arch_switch(prev) \
do { \
if (cpu_has_dsp) \
__restore_dsp(current); \
if (cpu_has_userlocal) \
write_c0_userlocal(current_thread_info()->tp_value); \
+ flush_vdso_page(); \
__restore_watch(); \
} while (0)
diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
index 895320e..cc006b7 100644
--- a/arch/mips/include/asm/thread_info.h
+++ b/arch/mips/include/asm/thread_info.h
@@ -25,6 +25,7 @@
struct task_struct *task; /* main task structure */
struct exec_domain *exec_domain; /* execution domain */
unsigned long flags; /* low level flags */
+ unsigned long local_flags; /* low level flags, no locking access */
unsigned long tp_value; /* thread pointer */
__u32 cpu; /* current CPU */
int preempt_count; /* 0 => preemptable, <0 => BUG */
@@ -34,6 +35,8 @@
* 0x7fffffff for user-thead
* 0xffffffff for kernel-thread
*/
+ unsigned long vdso_offset;
+ struct page *vdso_page;
struct restart_block restart_block;
struct pt_regs *regs;
};
@@ -46,9 +49,11 @@
.task = &tsk, \
.exec_domain = &default_exec_domain, \
.flags = _TIF_FIXADE, \
+ .local_flags = 0, \
.cpu = 0, \
.preempt_count = INIT_PREEMPT_COUNT, \
.addr_limit = KERNEL_DS, \
+ .vdso_page = NULL, \
.restart_block = { \
.fn = do_no_restart_syscall, \
}, \
@@ -101,6 +106,7 @@
* - pending work-to-be-done flags are in LSW
* - other flags in MSW
*/
+#define TIF_FPU_LOSE_REQUEST 0 /* request to lose FPU from IPI call */
#define TIF_SIGPENDING 1 /* signal pending */
#define TIF_NEED_RESCHED 2 /* rescheduling necessary */
#define TIF_SYSCALL_AUDIT 3 /* syscall auditing active */
@@ -115,8 +121,12 @@
#define TIF_32BIT_ADDR 23 /* 32-bit address space (o32/n32) */
#define TIF_FPUBOUND 24 /* thread bound to FPU-full CPU set */
#define TIF_LOAD_WATCH 25 /* If set, load watch registers */
+
+#define TIF_USEDMSA 29 /* MSA has been used this quantum */
+#define TIF_MSA_CTX_LIVE 30 /* MSA context must be preserved */
#define TIF_SYSCALL_TRACE 31 /* syscall trace active */
+#define _TIF_FPU_LOSE_REQUEST (1<<TIF_FPU_LOSE_REQUEST)
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
@@ -130,6 +140,13 @@
#define _TIF_32BIT_ADDR (1<<TIF_32BIT_ADDR)
#define _TIF_FPUBOUND (1<<TIF_FPUBOUND)
#define _TIF_LOAD_WATCH (1<<TIF_LOAD_WATCH)
+#define _TIF_USEDMSA (1<<TIF_USEDMSA)
+#define _TIF_MSA_CTX_LIVE (1<<TIF_MSA_CTX_LIVE)
+#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
+
+#define _TIF_WORK_SYSCALL_ENTRY (_TIF_NOHZ | _TIF_SYSCALL_TRACE | \
+ _TIF_SYSCALL_AUDIT | \
+ _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP)
/* work to do in syscall_trace_leave() */
#define _TIF_WORK_SYSCALL_EXIT (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT)
@@ -140,6 +157,33 @@
/* work to do on any return to u-space */
#define _TIF_ALLWORK_MASK (_TIF_WORK_MASK | _TIF_WORK_SYSCALL_EXIT)
+/* local thread information flags, update only from thread itself = no locking */
+#define LTIF_FPU_FR 0x00000001 /* Status.FR bit state for this thread */
+#define LTIF_FPU_FRE 0x00000002 /* Mixed FPU mode emulation */
+
+#ifndef __ASSEMBLY__
+static inline void set_thread_local_flags(unsigned long x)
+{
+ current_thread_info()->local_flags |= x;
+}
+
+static inline void clear_thread_local_flags(unsigned long x)
+{
+ current_thread_info()->local_flags &= ~x;
+}
+
+static inline void change_thread_local_flags(unsigned long x, unsigned long f)
+{
+ current_thread_info()->local_flags =
+ (current_thread_info()->local_flags & ~x) | f;
+}
+
+static inline int test_thread_local_flags(unsigned long x)
+{
+ return ((current_thread_info()->local_flags & x)? 1 : 0);
+}
+#endif
+
#endif /* __KERNEL__ */
#endif /* _ASM_THREAD_INFO_H */
diff --git a/arch/mips/include/asm/time.h b/arch/mips/include/asm/time.h
index 2d7b9df..7229562 100644
--- a/arch/mips/include/asm/time.h
+++ b/arch/mips/include/asm/time.h
@@ -18,6 +18,7 @@
#include <linux/spinlock.h>
#include <linux/clockchips.h>
#include <linux/clocksource.h>
+#include <asm/gic.h>
extern spinlock_t rtc_lock;
@@ -53,14 +54,16 @@
extern unsigned int __weak get_c0_compare_int(void);
extern int r4k_clockevent_init(void);
extern int smtc_clockevent_init(void);
-extern int gic_clockevent_init(void);
static inline int mips_clockevent_init(void)
{
#ifdef CONFIG_MIPS_MT_SMTC
return smtc_clockevent_init();
#elif defined(CONFIG_CEVT_GIC)
- return (gic_clockevent_init() | r4k_clockevent_init());
+ extern int gic_clockevent_init(void);
+
+ gic_clockevent_init();
+ return r4k_clockevent_init();
#elif defined(CONFIG_CEVT_R4K)
return r4k_clockevent_init();
#else
@@ -75,7 +78,7 @@
static inline int init_mips_clocksource(void)
{
-#if defined(CONFIG_CSRC_R4K) && !defined(CONFIG_CSRC_GIC)
+#ifdef CONFIG_CSRC_R4K
return init_r4k_clocksource();
#else
return 0;
diff --git a/arch/mips/include/asm/tlbmisc.h b/arch/mips/include/asm/tlbmisc.h
index 3a45228..2101421 100644
--- a/arch/mips/include/asm/tlbmisc.h
+++ b/arch/mips/include/asm/tlbmisc.h
@@ -6,5 +6,10 @@
*/
extern void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
unsigned long entryhi, unsigned long pagemask);
+void remove_wired_entry(void);
+int wired_push(unsigned long entryhi, unsigned long entrylo0,
+ unsigned long entrylo1, unsigned long pagemask);
+int wired_pop(void);
+int install_vdso_tlb(void);
#endif /* __ASM_TLBMISC_H */
diff --git a/arch/mips/include/asm/topology.h b/arch/mips/include/asm/topology.h
index 12609a1..8cd0efb 100644
--- a/arch/mips/include/asm/topology.h
+++ b/arch/mips/include/asm/topology.h
@@ -4,6 +4,7 @@
* for more details.
*
* Copyright (C) 2007 by Ralf Baechle
+ * Copyright (C) 2012 by Leonid Yegoshin
*/
#ifndef __ASM_TOPOLOGY_H
#define __ASM_TOPOLOGY_H
@@ -12,6 +13,10 @@
#ifdef CONFIG_SMP
#define smt_capable() (smp_num_siblings > 1)
+#define topology_thread_cpumask(cpu) (&per_cpu(cpu_sibling_map, cpu))
+#define topology_core_id(cpu) (cpu_data[cpu].core)
+#define topology_core_cpumask(cpu) ((void)(cpu), cpu_online_mask)
+#define topology_physical_package_id(cpu) ((void)cpu, 0)
#endif
#endif /* __ASM_TOPOLOGY_H */
diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h
index f3fa375..48e3be2 100644
--- a/arch/mips/include/asm/uaccess.h
+++ b/arch/mips/include/asm/uaccess.h
@@ -223,47 +223,96 @@
* for 32 bit mode and old iron.
*/
#ifdef CONFIG_32BIT
+#define __GET_KERNEL_DW(val, ptr) __get_kernel_asm_ll32(val, ptr)
#define __GET_USER_DW(val, ptr) __get_user_asm_ll32(val, ptr)
#endif
#ifdef CONFIG_64BIT
-#define __GET_USER_DW(val, ptr) __get_user_asm(val, "ld", ptr)
+#define __GET_KERNEL_DW(val, ptr) __get_kernel_asm(val, "ld", ptr)
#endif
+extern void __get_kernel_unknown(void);
extern void __get_user_unknown(void);
-#define __get_user_common(val, size, ptr) \
+#define __get_kernel_common(val, size, ptr) \
do { \
- switch (size) { \
- case 1: __get_user_asm(val, "lb", ptr); break; \
- case 2: __get_user_asm(val, "lh", ptr); break; \
- case 4: __get_user_asm(val, "lw", ptr); break; \
- case 8: __GET_USER_DW(val, ptr); break; \
- default: __get_user_unknown(); break; \
+ __chk_user_ptr(ptr); \
+ __gu_err = 0; \
+ switch (size) { \
+ case 1: __get_kernel_asm(val, "lb", ptr); break; \
+ case 2: __get_kernel_asm(val, "lh", ptr); break; \
+ case 4: __get_kernel_asm(val, "lw", ptr); break; \
+ case 8: __GET_KERNEL_DW(val, ptr); break; \
+ default: __get_kernel_unknown(); break; \
} \
} while (0)
-#define __get_user_nocheck(x, ptr, size) \
+#ifdef CONFIG_EVA
+#define __get_user_common(val, size, ptr) \
+do { \
+ __gu_err = 0; \
+ switch (size) { \
+ case 1: __get_user_asm(val, "lbe", ptr); break; \
+ case 2: __get_user_asm(val, "lhe", ptr); break; \
+ case 4: __get_user_asm(val, "lwe", ptr); break; \
+ case 8: __GET_USER_DW(val, ptr); break; \
+ default: __get_user_unknown(); break; \
+ } \
+} while (0)
+#endif
+
+#ifndef CONFIG_EVA
+#define __get_user_nocheck(x, ptr, size) \
({ \
- int __gu_err; \
- \
- __chk_user_ptr(ptr); \
- __get_user_common((x), size, ptr); \
+ int __gu_err; \
+ __get_kernel_common((x), size, ptr); \
__gu_err; \
})
+#else
+#define __get_user_nocheck(x, ptr, size) \
+({ \
+ int __gu_err; \
+ const __typeof__(*(ptr)) __user * __gu_ptr = (ptr); \
+ \
+ if (segment_eq(get_fs(), KERNEL_DS)) \
+ __get_kernel_common((x), size, __gu_ptr); \
+ else { \
+ __chk_user_ptr(ptr); \
+ __get_user_common((x), size, __gu_ptr); \
+ } \
+ __gu_err; \
+})
+#endif
-#define __get_user_check(x, ptr, size) \
+#ifndef CONFIG_EVA
+#define __get_user_check(x, ptr, size) \
({ \
int __gu_err = -EFAULT; \
const __typeof__(*(ptr)) __user * __gu_ptr = (ptr); \
\
- might_fault(); \
- if (likely(access_ok(VERIFY_READ, __gu_ptr, size))) \
- __get_user_common((x), size, __gu_ptr); \
+ might_fault(); \
+ if (likely(access_ok(VERIFY_READ, __gu_ptr, size))) \
+ __get_kernel_common((x), size, __gu_ptr); \
\
__gu_err; \
})
+#else
+#define __get_user_check(x, ptr, size) \
+({ \
+ int __gu_err = -EFAULT; \
+ const __typeof__(*(ptr)) __user * __gu_ptr = (ptr); \
+ \
+ if (segment_eq(get_fs(), KERNEL_DS)) { \
+ __get_kernel_common((x), size, __gu_ptr); \
+ } else { \
+ might_fault(); \
+ if (likely(access_ok(VERIFY_READ, __gu_ptr, size))) \
+ __get_user_common((x), size, __gu_ptr); \
+ } \
+ __gu_err; \
+})
+#endif
-#define __get_user_asm(val, insn, addr) \
+#define __get_kernel_asm(val, insn, addr) \
{ \
long __gu_tmp; \
\
@@ -284,10 +333,34 @@
(val) = (__typeof__(*(addr))) __gu_tmp; \
}
+#ifdef CONFIG_EVA
+#define __get_user_asm(val, insn, addr) \
+{ \
+ long __gu_tmp; \
+ \
+ __asm__ __volatile__( \
+ " .set eva \n" \
+ "1: " insn " %1, 0(%3) \n" \
+ "2: \n" \
+ " .insn \n" \
+ " .section .fixup,\"ax\" \n" \
+ "3: li %0, %4 \n" \
+ " j 2b \n" \
+ " .previous \n" \
+ " .section __ex_table,\"a\" \n" \
+ " "__UA_ADDR "\t1b, 3b \n" \
+ " .previous \n" \
+ : "=r" (__gu_err), "=r" (__gu_tmp) \
+ : "0" (0), "r" (addr), "i" (-EFAULT)); \
+ \
+ (val) = (__typeof__(*(addr))) __gu_tmp; \
+}
+#endif
+
/*
* Get a long long 64 using 32 bit registers.
*/
-#define __get_user_asm_ll32(val, addr) \
+#define __get_kernel_asm_ll32(val, addr) \
{ \
union { \
unsigned long long l; \
@@ -314,18 +387,113 @@
\
(val) = __gu_tmp.t; \
}
+#ifdef CONFIG_EVA
+#define __get_user_asm_ll32(val, addr) \
+{ \
+ union { \
+ unsigned long long l; \
+ __typeof__(*(addr)) t; \
+ } __gu_tmp; \
+ \
+ __asm__ __volatile__( \
+ " .set eva \n" \
+ "1: lwe %1, (%3) \n" \
+ "2: lwe %D1, 4(%3) \n" \
+ "3: \n" \
+ " .insn \n" \
+ " .section .fixup,\"ax\" \n" \
+ "4: li %0, %4 \n" \
+ " move %1, $0 \n" \
+ " move %D1, $0 \n" \
+ " j 3b \n" \
+ " .previous \n" \
+ " .section __ex_table,\"a\" \n" \
+ " " __UA_ADDR " 1b, 4b \n" \
+ " " __UA_ADDR " 2b, 4b \n" \
+ " .previous \n" \
+ : "=r" (__gu_err), "=&r" (__gu_tmp.l) \
+ : "0" (0), "r" (addr), "i" (-EFAULT)); \
+ \
+ (val) = __gu_tmp.t; \
+}
+#endif
+
/*
* Yuck. We need two variants, one for 64bit operation and one
* for 32 bit mode and old iron.
*/
#ifdef CONFIG_32BIT
+#define __PUT_KERNEL_DW(ptr) __put_kernel_asm_ll32(ptr)
#define __PUT_USER_DW(ptr) __put_user_asm_ll32(ptr)
#endif
#ifdef CONFIG_64BIT
-#define __PUT_USER_DW(ptr) __put_user_asm("sd", ptr)
+#define __PUT_KERNEL_DW(ptr) __put_kernel_asm("sd", ptr)
#endif
+extern void __put_kernel_unknown(void);
+
+#ifdef CONFIG_EVA
+extern void __put_user_unknown(void);
+
+#define __put_kernel_common(size, ptr) \
+do { \
+ switch (size) { \
+ case 1: __put_kernel_asm("sb", ptr); break; \
+ case 2: __put_kernel_asm("sh", ptr); break; \
+ case 4: __put_kernel_asm("sw", ptr); break; \
+ case 8: __PUT_KERNEL_DW(ptr); break; \
+ default: __put_kernel_unknown(); break; \
+ } \
+} while (0)
+
+#define __put_user_common(size, ptr) \
+do { \
+ switch (size) { \
+ case 1: __put_user_asm("sbe", ptr); break; \
+ case 2: __put_user_asm("she", ptr); break; \
+ case 4: __put_user_asm("swe", ptr); break; \
+ case 8: __PUT_USER_DW(ptr); break; \
+ default: __put_user_unknown(); break; \
+ } \
+} while (0)
+
+#define __put_user_nocheck(x, ptr, size) \
+({ \
+ __typeof__(*(ptr)) __pu_val; \
+ int __pu_err = 0; \
+ const __typeof__(*(ptr)) __user * __pu_ptr = (ptr); \
+ \
+ if (segment_eq(get_fs(), KERNEL_DS)) { \
+ __chk_user_ptr(__pu_ptr); \
+ __pu_val = (x); \
+ __put_kernel_common(size, __pu_ptr); \
+ } else { \
+ __chk_user_ptr(__pu_ptr); \
+ __pu_val = (x); \
+ __put_user_common(size, __pu_ptr); \
+ } \
+ __pu_err; \
+})
+
+#define __put_user_check(x, ptr, size) \
+({ \
+ __typeof__(*(ptr)) __pu_val = (x); \
+ int __pu_err = -EFAULT; \
+ const __typeof__(*(ptr)) __user * __pu_ptr = (ptr); \
+ \
+ if (segment_eq(get_fs(), KERNEL_DS)) \
+ __put_kernel_common(size, __pu_ptr); \
+ else { \
+ might_fault(); \
+ if (likely(access_ok(VERIFY_WRITE, __pu_ptr, size))) \
+ __put_user_common(size, __pu_ptr); \
+ } \
+ __pu_err; \
+})
+
+#else
+
#define __put_user_nocheck(x, ptr, size) \
({ \
__typeof__(*(ptr)) __pu_val; \
@@ -334,11 +502,11 @@
__chk_user_ptr(ptr); \
__pu_val = (x); \
switch (size) { \
- case 1: __put_user_asm("sb", ptr); break; \
- case 2: __put_user_asm("sh", ptr); break; \
- case 4: __put_user_asm("sw", ptr); break; \
- case 8: __PUT_USER_DW(ptr); break; \
- default: __put_user_unknown(); break; \
+ case 1: __put_kernel_asm("sb", ptr); break; \
+ case 2: __put_kernel_asm("sh", ptr); break; \
+ case 4: __put_kernel_asm("sw", ptr); break; \
+ case 8: __PUT_KERNEL_DW(ptr); break; \
+ default: __put_kernel_unknown(); break; \
} \
__pu_err; \
})
@@ -352,17 +520,19 @@
might_fault(); \
if (likely(access_ok(VERIFY_WRITE, __pu_addr, size))) { \
switch (size) { \
- case 1: __put_user_asm("sb", __pu_addr); break; \
- case 2: __put_user_asm("sh", __pu_addr); break; \
- case 4: __put_user_asm("sw", __pu_addr); break; \
- case 8: __PUT_USER_DW(__pu_addr); break; \
- default: __put_user_unknown(); break; \
+ case 1: __put_kernel_asm("sb", __pu_addr); break; \
+ case 2: __put_kernel_asm("sh", __pu_addr); break; \
+ case 4: __put_kernel_asm("sw", __pu_addr); break; \
+ case 8: __PUT_KERNEL_DW(__pu_addr); break; \
+ default: __put_kernel_unknown(); break; \
} \
} \
__pu_err; \
})
+#endif /* CONFIG_EVA */
-#define __put_user_asm(insn, ptr) \
+#ifndef CONFIG_EVA
+#define __put_kernel_asm(insn, ptr) \
{ \
__asm__ __volatile__( \
"1: " insn " %z2, %3 # __put_user_asm\n" \
@@ -379,8 +549,47 @@
: "0" (0), "Jr" (__pu_val), "o" (__m(ptr)), \
"i" (-EFAULT)); \
}
+#else
+#define __put_kernel_asm(insn, ptr) \
+{ \
+ __asm__ __volatile__( \
+ "1: " insn " %2, %3 # __put_user_asm\n" \
+ "2: \n" \
+ " .insn \n" \
+ " .section .fixup,\"ax\" \n" \
+ "3: li %0, %4 \n" \
+ " j 2b \n" \
+ " .previous \n" \
+ " .section __ex_table,\"a\" \n" \
+ " " __UA_ADDR " 1b, 3b \n" \
+ " .previous \n" \
+ : "=r" (__pu_err) \
+ : "0" (0), "r" (__pu_val), "o" (__m(ptr)), \
+ "i" (-EFAULT)); \
+}
-#define __put_user_asm_ll32(ptr) \
+#define __put_user_asm(insn, ptr) \
+{ \
+ __asm__ __volatile__( \
+ " .set eva \n" \
+ "1: " insn " %2, 0(%3) # __put_user_asm\n" \
+ "2: \n" \
+ " .insn \n" \
+ " .section .fixup,\"ax\" \n" \
+ "3: li %0, %4 \n" \
+ " j 2b \n" \
+ " .previous \n" \
+ " .section __ex_table,\"a\" \n" \
+ " " __UA_ADDR " 1b, 3b \n" \
+ " .previous \n" \
+ : "=r" (__pu_err) \
+ : "0" (0), "r" (__pu_val), "r" (ptr), \
+ "i" (-EFAULT)); \
+}
+#endif
+
+
+#define __put_kernel_asm_ll32(ptr) \
{ \
__asm__ __volatile__( \
"1: sw %2, (%3) # __put_user_asm_ll32 \n" \
@@ -400,8 +609,30 @@
"i" (-EFAULT)); \
}
-extern void __put_user_unknown(void);
+#ifdef CONFIG_EVA
+#define __put_user_asm_ll32(ptr) \
+{ \
+ __asm__ __volatile__( \
+ " .set eva \n" \
+ "1: swe %2, (%3) # __put_user_asm_ll32 \n" \
+ "2: swe %D2, 4(%3) \n" \
+ "3: \n" \
+ " .insn \n" \
+ " .section .fixup,\"ax\" \n" \
+ "4: li %0, %4 \n" \
+ " j 3b \n" \
+ " .previous \n" \
+ " .section __ex_table,\"a\" \n" \
+ " " __UA_ADDR " 1b, 4b \n" \
+ " " __UA_ADDR " 2b, 4b \n" \
+ " .previous" \
+ : "=r" (__pu_err) \
+ : "0" (0), "r" (__pu_val), "r" (ptr), \
+ "i" (-EFAULT)); \
+}
+#endif
+#ifndef CONFIG_EVA
/*
* put_user_unaligned: - Write a simple value into user space.
* @x: Value to copy to user space.
@@ -670,6 +901,8 @@
extern void __put_user_unaligned_unknown(void);
+#endif /* CONFIG_EVA */
+
/*
* We're generating jump to subroutines which will be outside the range of
* jump instructions
@@ -692,8 +925,12 @@
#endif
extern size_t __copy_user(void *__to, const void *__from, size_t __n);
+#ifdef CONFIG_EVA
+extern size_t __copy_fromuser(void *__to, const void *__from, size_t __n);
+extern size_t __copy_touser(void *__to, const void *__from, size_t __n);
+#endif
-#define __invoke_copy_to_user(to, from, n) \
+#define __invoke_copy_to_kernel(to, from, n) \
({ \
register void __user *__cu_to_r __asm__("$4"); \
register const void *__cu_from_r __asm__("$5"); \
@@ -703,7 +940,7 @@
__cu_from_r = (from); \
__cu_len_r = (n); \
__asm__ __volatile__( \
- __MODULE_JAL(__copy_user) \
+ __MODULE_JAL(__copy_user) \
: "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \
: \
: "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \
@@ -711,6 +948,26 @@
__cu_len_r; \
})
+#ifdef CONFIG_EVA
+#define __invoke_copy_to_user(to, from, n) \
+({ \
+ register void __user *__cu_to_r __asm__("$4"); \
+ register const void *__cu_from_r __asm__("$5"); \
+ register long __cu_len_r __asm__("$6"); \
+ \
+ __cu_to_r = (to); \
+ __cu_from_r = (from); \
+ __cu_len_r = (n); \
+ __asm__ __volatile__( \
+ __MODULE_JAL(__copy_touser) \
+ : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \
+ : \
+ : "$8", "$9", "$10", "$11", "$12", "$15", "$24", "$31", \
+ DADDI_SCRATCH, "memory"); \
+ __cu_len_r; \
+})
+#endif
+
/*
* __copy_to_user: - Copy a block of data into user space, with less checking.
* @to: Destination address, in user space.
@@ -725,6 +982,7 @@
* Returns number of bytes that could not be copied.
* On success, this will be zero.
*/
+#ifndef CONFIG_EVA
#define __copy_to_user(to, from, n) \
({ \
void __user *__cu_to; \
@@ -734,13 +992,58 @@
__cu_to = (to); \
__cu_from = (from); \
__cu_len = (n); \
- might_fault(); \
- __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, __cu_len); \
+ might_fault(); \
+ __cu_len = __invoke_copy_to_kernel(__cu_to, __cu_from, __cu_len); \
__cu_len; \
})
+#else
+#define __copy_to_user(to, from, n) \
+({ \
+ void __user *__cu_to; \
+ const void *__cu_from; \
+ long __cu_len; \
+ \
+ __cu_to = (to); \
+ __cu_from = (from); \
+ __cu_len = (n); \
+ if (segment_eq(get_fs(), KERNEL_DS)) \
+ __cu_len = __invoke_copy_to_kernel(__cu_to, __cu_from, __cu_len); \
+ else { \
+ might_fault(); \
+ __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, __cu_len); \
+ } \
+ __cu_len; \
+})
+#endif
-extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
+#ifndef CONFIG_EVA
+#define __copy_to_user_inatomic(to, from, n) \
+({ \
+ void __user *__cu_to; \
+ const void *__cu_from; \
+ long __cu_len; \
+ \
+ __cu_to = (to); \
+ __cu_from = (from); \
+ __cu_len = (n); \
+ __cu_len = __invoke_copy_to_kernel(__cu_to, __cu_from, __cu_len); \
+ __cu_len; \
+})
+#define __copy_from_user_inatomic(to, from, n) \
+({ \
+ void *__cu_to; \
+ const void __user *__cu_from; \
+ long __cu_len; \
+ \
+ __cu_to = (to); \
+ __cu_from = (from); \
+ __cu_len = (n); \
+ __cu_len = __invoke_copy_from_kernel_inatomic(__cu_to, __cu_from, \
+ __cu_len); \
+ __cu_len; \
+})
+#else
#define __copy_to_user_inatomic(to, from, n) \
({ \
void __user *__cu_to; \
@@ -750,7 +1053,10 @@
__cu_to = (to); \
__cu_from = (from); \
__cu_len = (n); \
- __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, __cu_len); \
+ if (segment_eq(get_fs(), KERNEL_DS)) \
+ __cu_len = __invoke_copy_to_kernel(__cu_to, __cu_from, __cu_len); \
+ else \
+ __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, __cu_len); \
__cu_len; \
})
@@ -763,10 +1069,15 @@
__cu_to = (to); \
__cu_from = (from); \
__cu_len = (n); \
- __cu_len = __invoke_copy_from_user_inatomic(__cu_to, __cu_from, \
- __cu_len); \
+ if (segment_eq(get_fs(), KERNEL_DS)) \
+ __cu_len = __invoke_copy_from_kernel_inatomic(__cu_to, __cu_from, \
+ __cu_len); \
+ else \
+ __cu_len = __invoke_copy_from_user_inatomic(__cu_to, __cu_from, \
+ __cu_len); \
__cu_len; \
})
+#endif
/*
* copy_to_user: - Copy a block of data into user space.
@@ -781,6 +1092,24 @@
* Returns number of bytes that could not be copied.
* On success, this will be zero.
*/
+#ifndef CONFIG_EVA
+#define copy_to_user(to, from, n) \
+({ \
+ void __user *__cu_to; \
+ const void *__cu_from; \
+ long __cu_len; \
+ \
+ __cu_to = (to); \
+ __cu_from = (from); \
+ __cu_len = (n); \
+ if (access_ok(VERIFY_WRITE, __cu_to, __cu_len)) { \
+ might_fault(); \
+ __cu_len = __invoke_copy_to_kernel(__cu_to, __cu_from, \
+ __cu_len); \
+ } \
+ __cu_len; \
+})
+#else
#define copy_to_user(to, from, n) \
({ \
void __user *__cu_to; \
@@ -790,15 +1119,20 @@
__cu_to = (to); \
__cu_from = (from); \
__cu_len = (n); \
- if (access_ok(VERIFY_WRITE, __cu_to, __cu_len)) { \
- might_fault(); \
- __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, \
- __cu_len); \
- } \
+ if (segment_eq(get_fs(), KERNEL_DS)) \
+ __cu_len = __invoke_copy_to_kernel(__cu_to, __cu_from, \
+ __cu_len); \
+ else \
+ if (access_ok(VERIFY_WRITE, __cu_to, __cu_len)) { \
+ might_fault(); \
+ __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, \
+ __cu_len); \
+ } \
__cu_len; \
})
+#endif
-#define __invoke_copy_from_user(to, from, n) \
+#define __invoke_copy_from_kernel(to, from, n) \
({ \
register void *__cu_to_r __asm__("$4"); \
register const void __user *__cu_from_r __asm__("$5"); \
@@ -809,7 +1143,7 @@
__cu_len_r = (n); \
__asm__ __volatile__( \
".set\tnoreorder\n\t" \
- __MODULE_JAL(__copy_user) \
+ __MODULE_JAL(__copy_user) \
".set\tnoat\n\t" \
__UA_ADDU "\t$1, %1, %2\n\t" \
".set\tat\n\t" \
@@ -821,6 +1155,59 @@
__cu_len_r; \
})
+#ifdef CONFIG_EVA
+#define __invoke_copy_from_user(to, from, n) \
+({ \
+ register void *__cu_to_r __asm__("$4"); \
+ register const void __user *__cu_from_r __asm__("$5"); \
+ register long __cu_len_r __asm__("$6"); \
+ \
+ __cu_to_r = (to); \
+ __cu_from_r = (from); \
+ __cu_len_r = (n); \
+ __asm__ __volatile__( \
+ ".set\tnoreorder\n\t" \
+ __MODULE_JAL(__copy_fromuser) \
+ ".set\tnoat\n\t" \
+ __UA_ADDU "\t$1, %1, %2\n\t" \
+ ".set\tat\n\t" \
+ ".set\treorder" \
+ : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \
+ : \
+ : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \
+ DADDI_SCRATCH, "memory"); \
+ __cu_len_r; \
+})
+#endif
+
+extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
+
+#define __invoke_copy_from_kernel_inatomic(to, from, n) \
+({ \
+ register void *__cu_to_r __asm__("$4"); \
+ register const void __user *__cu_from_r __asm__("$5"); \
+ register long __cu_len_r __asm__("$6"); \
+ \
+ __cu_to_r = (to); \
+ __cu_from_r = (from); \
+ __cu_len_r = (n); \
+ __asm__ __volatile__( \
+ ".set\tnoreorder\n\t" \
+ __MODULE_JAL(__copy_user_inatomic) \
+ ".set\tnoat\n\t" \
+ __UA_ADDU "\t$1, %1, %2\n\t" \
+ ".set\tat\n\t" \
+ ".set\treorder" \
+ : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \
+ : \
+ : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \
+ DADDI_SCRATCH, "memory"); \
+ __cu_len_r; \
+})
+
+#ifdef CONFIG_EVA
+extern size_t __copy_fromuser_inatomic(void *__to, const void *__from, size_t __n);
+
#define __invoke_copy_from_user_inatomic(to, from, n) \
({ \
register void *__cu_to_r __asm__("$4"); \
@@ -832,7 +1219,7 @@
__cu_len_r = (n); \
__asm__ __volatile__( \
".set\tnoreorder\n\t" \
- __MODULE_JAL(__copy_user_inatomic) \
+ __MODULE_JAL(__copy_fromuser_inatomic) \
".set\tnoat\n\t" \
__UA_ADDU "\t$1, %1, %2\n\t" \
".set\tat\n\t" \
@@ -844,6 +1231,32 @@
__cu_len_r; \
})
+extern size_t __copy_inuser(void *__to, const void *__from, size_t __n);
+
+#define __invoke_copy_in_user(to, from, n) \
+({ \
+ register void *__cu_to_r __asm__("$4"); \
+ register const void __user *__cu_from_r __asm__("$5"); \
+ register long __cu_len_r __asm__("$6"); \
+ \
+ __cu_to_r = (to); \
+ __cu_from_r = (from); \
+ __cu_len_r = (n); \
+ __asm__ __volatile__( \
+ ".set\tnoreorder\n\t" \
+ __MODULE_JAL(__copy_inuser) \
+ ".set\tnoat\n\t" \
+ __UA_ADDU "\t$1, %1, %2\n\t" \
+ ".set\tat\n\t" \
+ ".set\treorder" \
+ : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \
+ : \
+ : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \
+ DADDI_SCRATCH, "memory"); \
+ __cu_len_r; \
+})
+#endif
+
/*
* __copy_from_user: - Copy a block of data from user space, with less checking.
* @to: Destination address, in kernel space.
@@ -861,6 +1274,22 @@
* If some data could not be copied, this function will pad the copied
* data to the requested size using zero bytes.
*/
+#ifndef CONFIG_EVA
+#define __copy_from_user(to, from, n) \
+({ \
+ void *__cu_to; \
+ const void __user *__cu_from; \
+ long __cu_len; \
+ \
+ __cu_to = (to); \
+ __cu_from = (from); \
+ __cu_len = (n); \
+ might_fault(); \
+ __cu_len = __invoke_copy_from_kernel(__cu_to, __cu_from, \
+ __cu_len); \
+ __cu_len; \
+})
+#else
#define __copy_from_user(to, from, n) \
({ \
void *__cu_to; \
@@ -872,9 +1301,10 @@
__cu_len = (n); \
might_fault(); \
__cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \
- __cu_len); \
+ __cu_len); \
__cu_len; \
})
+#endif
/*
* copy_from_user: - Copy a block of data from user space.
@@ -892,7 +1322,25 @@
* If some data could not be copied, this function will pad the copied
* data to the requested size using zero bytes.
*/
-#define copy_from_user(to, from, n) \
+#ifndef CONFIG_EVA
+#define copy_from_user(to, from, n) \
+({ \
+ void *__cu_to; \
+ const void __user *__cu_from; \
+ long __cu_len; \
+ \
+ __cu_to = (to); \
+ __cu_from = (from); \
+ __cu_len = (n); \
+ if (access_ok(VERIFY_READ, __cu_from, __cu_len)) { \
+ might_fault(); \
+ __cu_len = __invoke_copy_from_kernel(__cu_to, __cu_from, \
+ __cu_len); \
+ } \
+ __cu_len; \
+})
+#else
+#define copy_from_user(to, from, n) \
({ \
void *__cu_to; \
const void __user *__cu_from; \
@@ -901,14 +1349,53 @@
__cu_to = (to); \
__cu_from = (from); \
__cu_len = (n); \
- if (access_ok(VERIFY_READ, __cu_from, __cu_len)) { \
- might_fault(); \
- __cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \
- __cu_len); \
- } \
+ if (segment_eq(get_fs(), KERNEL_DS)) \
+ __cu_len = __invoke_copy_from_kernel(__cu_to, __cu_from, \
+ __cu_len); \
+ else \
+ if (access_ok(VERIFY_READ, __cu_from, __cu_len)) { \
+ might_fault(); \
+ __cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \
+ __cu_len); \
+ } \
__cu_len; \
})
+#endif
+#ifndef CONFIG_EVA
+#define __copy_in_user(to, from, n) \
+({ \
+ void __user *__cu_to; \
+ const void __user *__cu_from; \
+ long __cu_len; \
+ \
+ __cu_to = (to); \
+ __cu_from = (from); \
+ __cu_len = (n); \
+ might_fault(); \
+ __cu_len = __invoke_copy_from_kernel(__cu_to, __cu_from, \
+ __cu_len); \
+ __cu_len; \
+})
+
+#define copy_in_user(to, from, n) \
+({ \
+ void __user *__cu_to; \
+ const void __user *__cu_from; \
+ long __cu_len; \
+ \
+ __cu_to = (to); \
+ __cu_from = (from); \
+ __cu_len = (n); \
+ if (likely(access_ok(VERIFY_READ, __cu_from, __cu_len) && \
+ access_ok(VERIFY_WRITE, __cu_to, __cu_len))) { \
+ might_fault(); \
+ __cu_len = __invoke_copy_from_kernel(__cu_to, __cu_from, \
+ __cu_len); \
+ } \
+ __cu_len; \
+})
+#else
#define __copy_in_user(to, from, n) \
({ \
void __user *__cu_to; \
@@ -919,8 +1406,8 @@
__cu_from = (from); \
__cu_len = (n); \
might_fault(); \
- __cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \
- __cu_len); \
+ __cu_len = __invoke_copy_in_user(__cu_to, __cu_from, \
+ __cu_len); \
__cu_len; \
})
@@ -936,11 +1423,12 @@
if (likely(access_ok(VERIFY_READ, __cu_from, __cu_len) && \
access_ok(VERIFY_WRITE, __cu_to, __cu_len))) { \
might_fault(); \
- __cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \
- __cu_len); \
+ __cu_len = __invoke_copy_in_user(__cu_to, __cu_from, \
+ __cu_len); \
} \
__cu_len; \
})
+#endif
/*
* __clear_user: - Zero a block of memory in user space, with less checking.
@@ -963,7 +1451,11 @@
"move\t$4, %1\n\t"
"move\t$5, $0\n\t"
"move\t$6, %2\n\t"
+#ifndef CONFIG_EVA
__MODULE_JAL(__bzero)
+#else
+ __MODULE_JAL(__bzero_user)
+#endif
"move\t%0, $6"
: "=r" (res)
: "r" (addr), "r" (size)
@@ -1012,7 +1504,11 @@
"move\t$4, %1\n\t"
"move\t$5, %2\n\t"
"move\t$6, %3\n\t"
+#ifndef CONFIG_EVA
+ __MODULE_JAL(__strncpy_from_kernel_nocheck_asm)
+#else
__MODULE_JAL(__strncpy_from_user_nocheck_asm)
+#endif
"move\t%0, $2"
: "=r" (res)
: "r" (__to), "r" (__from), "r" (__len)
@@ -1039,6 +1535,7 @@
* If @count is smaller than the length of the string, copies @count bytes
* and returns @count.
*/
+#ifndef CONFIG_EVA
static inline long
strncpy_from_user(char *__to, const char __user *__from, long __len)
{
@@ -1049,6 +1546,37 @@
"move\t$4, %1\n\t"
"move\t$5, %2\n\t"
"move\t$6, %3\n\t"
+ __MODULE_JAL(__strncpy_from_kernel_asm)
+ "move\t%0, $2"
+ : "=r" (res)
+ : "r" (__to), "r" (__from), "r" (__len)
+ : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
+
+ return res;
+}
+#else
+static inline long
+strncpy_from_user(char *__to, const char __user *__from, long __len)
+{
+ long res;
+
+ if (segment_eq(get_fs(), KERNEL_DS)) {
+ __asm__ __volatile__(
+ "move\t$4, %1\n\t"
+ "move\t$5, %2\n\t"
+ "move\t$6, %3\n\t"
+ __MODULE_JAL(__strncpy_from_kernel_asm)
+ "move\t%0, $2"
+ : "=r" (res)
+ : "r" (__to), "r" (__from), "r" (__len)
+ : "$2", "$3", "$4", "$5", "$6", __UA_t0, "$31", "memory");
+ return res;
+ }
+ might_fault();
+ __asm__ __volatile__(
+ "move\t$4, %1\n\t"
+ "move\t$5, %2\n\t"
+ "move\t$6, %3\n\t"
__MODULE_JAL(__strncpy_from_user_asm)
"move\t%0, $2"
: "=r" (res)
@@ -1057,6 +1585,7 @@
return res;
}
+#endif
/* Returns: 0 if bad, string length+1 (memory size) of string if ok */
static inline long __strlen_user(const char __user *s)
@@ -1066,7 +1595,11 @@
might_fault();
__asm__ __volatile__(
"move\t$4, %1\n\t"
+#ifndef CONFIG_EVA
+ __MODULE_JAL(__strlen_kernel_nocheck_asm)
+#else
__MODULE_JAL(__strlen_user_nocheck_asm)
+#endif
"move\t%0, $2"
: "=r" (res)
: "r" (s)
@@ -1096,7 +1629,11 @@
might_fault();
__asm__ __volatile__(
"move\t$4, %1\n\t"
+#ifndef CONFIG_EVA
+ __MODULE_JAL(__strlen_kernel_asm)
+#else
__MODULE_JAL(__strlen_user_asm)
+#endif
"move\t%0, $2"
: "=r" (res)
: "r" (s)
@@ -1114,7 +1651,11 @@
__asm__ __volatile__(
"move\t$4, %1\n\t"
"move\t$5, %2\n\t"
+#ifndef CONFIG_EVA
+ __MODULE_JAL(__strnlen_kernel_nocheck_asm)
+#else
__MODULE_JAL(__strnlen_user_nocheck_asm)
+#endif
"move\t%0, $2"
: "=r" (res)
: "r" (s), "r" (n)
@@ -1137,6 +1678,7 @@
* If there is a limit on the length of a valid string, you may wish to
* consider using strnlen_user() instead.
*/
+#ifndef CONFIG_EVA
static inline long strnlen_user(const char __user *s, long n)
{
long res;
@@ -1145,6 +1687,34 @@
__asm__ __volatile__(
"move\t$4, %1\n\t"
"move\t$5, %2\n\t"
+ __MODULE_JAL(__strnlen_kernel_asm)
+ "move\t%0, $2"
+ : "=r" (res)
+ : "r" (s), "r" (n)
+ : "$2", "$4", "$5", __UA_t0, "$31");
+
+ return res;
+}
+#else
+static inline long strnlen_user(const char __user *s, long n)
+{
+ long res;
+
+ if (segment_eq(get_fs(), KERNEL_DS)) {
+ __asm__ __volatile__(
+ "move\t$4, %1\n\t"
+ "move\t$5, %2\n\t"
+ __MODULE_JAL(__strnlen_kernel_asm)
+ "move\t%0, $2"
+ : "=r" (res)
+ : "r" (s), "r" (n)
+ : "$2", "$4", "$5", __UA_t0, "$31");
+ return res;
+ }
+ might_fault();
+ __asm__ __volatile__(
+ "move\t$4, %1\n\t"
+ "move\t$5, %2\n\t"
__MODULE_JAL(__strnlen_user_asm)
"move\t%0, $2"
: "=r" (res)
@@ -1153,6 +1723,7 @@
return res;
}
+#endif
struct exception_table_entry
{
diff --git a/arch/mips/include/asm/ucontext.h b/arch/mips/include/asm/ucontext.h
deleted file mode 100644
index 9bc07b9..0000000
--- a/arch/mips/include/asm/ucontext.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/ucontext.h>
diff --git a/arch/mips/include/asm/vdso.h b/arch/mips/include/asm/vdso.h
index cca56aa..77056fc 100644
--- a/arch/mips/include/asm/vdso.h
+++ b/arch/mips/include/asm/vdso.h
@@ -11,6 +11,9 @@
#include <linux/types.h>
+void mips_thread_vdso(struct thread_info *ti);
+void arch_release_thread_info(struct thread_info *info);
+void vdso_epc_adjust(struct pt_regs *xcp);
#ifdef CONFIG_32BIT
struct mips_vdso {
diff --git a/arch/mips/include/asm/vga.h b/arch/mips/include/asm/vga.h
index f4cff7e..f82c837 100644
--- a/arch/mips/include/asm/vga.h
+++ b/arch/mips/include/asm/vga.h
@@ -6,6 +6,7 @@
#ifndef _ASM_VGA_H
#define _ASM_VGA_H
+#include <asm/addrspace.h>
#include <asm/byteorder.h>
/*
@@ -13,7 +14,7 @@
* access the videoram directly without any black magic.
*/
-#define VGA_MAP_MEM(x, s) (0xb0000000L + (unsigned long)(x))
+#define VGA_MAP_MEM(x, s) CKSEG1ADDR(0x10000000L + (unsigned long)(x))
#define vga_readb(x) (*(x))
#define vga_writeb(x, y) (*(y) = (x))
diff --git a/arch/mips/include/asm/xor.h b/arch/mips/include/asm/xor.h
deleted file mode 100644
index c82eb12..0000000
--- a/arch/mips/include/asm/xor.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/xor.h>
diff --git a/arch/mips/include/uapi/asm/Kbuild b/arch/mips/include/uapi/asm/Kbuild
index 350cccc..be7196e 100644
--- a/arch/mips/include/uapi/asm/Kbuild
+++ b/arch/mips/include/uapi/asm/Kbuild
@@ -1,7 +1,9 @@
# UAPI Header export list
include include/uapi/asm-generic/Kbuild.asm
-header-y += auxvec.h
+generic-y += auxvec.h
+generic-y += ipcbuf.h
+
header-y += bitsperlong.h
header-y += break.h
header-y += byteorder.h
@@ -11,7 +13,6 @@
header-y += inst.h
header-y += ioctl.h
header-y += ioctls.h
-header-y += ipcbuf.h
header-y += kvm_para.h
header-y += mman.h
header-y += msgbuf.h
diff --git a/arch/mips/include/uapi/asm/auxvec.h b/arch/mips/include/uapi/asm/auxvec.h
deleted file mode 100644
index 7cf7f2d..0000000
--- a/arch/mips/include/uapi/asm/auxvec.h
+++ /dev/null
@@ -1,4 +0,0 @@
-#ifndef _ASM_AUXVEC_H
-#define _ASM_AUXVEC_H
-
-#endif /* _ASM_AUXVEC_H */
diff --git a/arch/mips/include/uapi/asm/hwcap.h b/arch/mips/include/uapi/asm/hwcap.h
new file mode 100644
index 0000000..c7484a7
--- /dev/null
+++ b/arch/mips/include/uapi/asm/hwcap.h
@@ -0,0 +1,8 @@
+#ifndef _UAPI_ASM_HWCAP_H
+#define _UAPI_ASM_HWCAP_H
+
+/* HWCAP flags */
+#define HWCAP_MIPS_R6 (1 << 0)
+#define HWCAP_MIPS_MSA (1 << 1)
+
+#endif /* _UAPI_ASM_HWCAP_H */
diff --git a/arch/mips/include/uapi/asm/inst.h b/arch/mips/include/uapi/asm/inst.h
index 0f4aec2..ab281d3 100644
--- a/arch/mips/include/uapi/asm/inst.h
+++ b/arch/mips/include/uapi/asm/inst.h
@@ -18,21 +18,37 @@
enum major_op {
spec_op, bcond_op, j_op, jal_op,
beq_op, bne_op, blez_op, bgtz_op,
+#ifndef CONFIG_CPU_MIPSR6
addi_op, addiu_op, slti_op, sltiu_op,
+#else
+ cbcond0_op, addiu_op, slti_op, sltiu_op,
+#endif
andi_op, ori_op, xori_op, lui_op,
cop0_op, cop1_op, cop2_op, cop1x_op,
beql_op, bnel_op, blezl_op, bgtzl_op,
+#ifndef CONFIG_CPU_MIPSR6
daddi_op, daddiu_op, ldl_op, ldr_op,
+#else
+ cbcond1_op, daddiu_op, ldl_op, ldr_op,
+#endif
spec2_op, jalx_op, mdmx_op, spec3_op,
lb_op, lh_op, lwl_op, lw_op,
lbu_op, lhu_op, lwr_op, lwu_op,
sb_op, sh_op, swl_op, sw_op,
sdl_op, sdr_op, swr_op, cache_op,
+#ifndef CONFIG_CPU_MIPSR6
ll_op, lwc1_op, lwc2_op, pref_op,
lld_op, ldc1_op, ldc2_op, ld_op,
sc_op, swc1_op, swc2_op, major_3b_op,
scd_op, sdc1_op, sdc2_op, sd_op
+#else
+ ll_op, lwc1_op, bc_op, pref_op,
+ lld_op, ldc1_op, jump_op, ld_op,
+ sc_op, swc1_op, balc_op, pcrel_op,
+ scd_op, sdc1_op, jump2_op, sd_op
+#endif
};
+#define msa_op mdmx_op
/*
* func field of spec opcode.
@@ -74,8 +90,19 @@
ext_op, dextm_op, dextu_op, dext_op,
ins_op, dinsm_op, dinsu_op, dins_op,
lx_op = 0x0a,
- bshfl_op = 0x20,
- dbshfl_op = 0x24,
+ lwle_op = 0x19,
+ lwre_op = 0x1a, cachee_op = 0x1b,
+ sbe_op = 0x1c, she_op = 0x1d,
+ sce_op = 0x1e, swe_op = 0x1f,
+ bshfl_op = 0x20, swle_op = 0x21,
+ swre_op = 0x22, prefe_op = 0x23,
+ dbshfl_op = 0x24, cache6_op = 0x25,
+ sc6_op = 0x26, scd6_op = 0x27,
+ lbue_op = 0x28, lhue_op = 0x29,
+ lbe_op = 0x2c, lhe_op = 0x2d,
+ lle_op = 0x2e, lwe_op = 0x2f,
+ pref6_op = 0x35, ll6_op = 0x36,
+ lld6_op = 0x37,
rdhwr_op = 0x3b
};
@@ -98,14 +125,16 @@
*/
enum cop_op {
mfc_op = 0x00, dmfc_op = 0x01,
- cfc_op = 0x02, mtc_op = 0x04,
- dmtc_op = 0x05, ctc_op = 0x06,
- bc_op = 0x08, cop_op = 0x10,
+ cfc_op = 0x02, mfhc_op = 0x03,
+ mtc_op = 0x04, dmtc_op = 0x05,
+ ctc_op = 0x06, mthc_op = 0x07,
+ rs_bc_op = 0x08, bc1eqz_op = 0x09,
+ bc1nez_op = 0x0d, cop_op = 0x10,
copm_op = 0x18
};
/*
- * rt field of cop.bc_op opcodes
+ * rt field of cop.rs_bc_op opcodes
*/
enum bcop_op {
bcf_op, bct_op, bcfl_op, bctl_op
@@ -149,9 +178,16 @@
fceill_op = 0x0a, ffloorl_op = 0x0b,
fround_op = 0x0c, ftrunc_op = 0x0d,
fceil_op = 0x0e, ffloor_op = 0x0f,
+ fsel_op = 0x10,
fmovc_op = 0x11, fmovz_op = 0x12,
- fmovn_op = 0x13, frecip_op = 0x15,
- frsqrt_op = 0x16, fcvts_op = 0x20,
+ fmovn_op = 0x13, fseleqz_op = 0x14,
+ frecip_op = 0x15, frsqrt_op = 0x16,
+ fselnez_op = 0x17,
+ fmaddf_op = 0x18, fmsubf_op = 0x19,
+ frint_op = 0x1a, fclass_op = 0x1b,
+ fmin_op = 0x1c, fmina_op = 0x1d,
+ fmax_op = 0x1e, fmaxa_op = 0x1f,
+ fcvts_op = 0x20,
fcvtd_op = 0x21, fcvte_op = 0x22,
fcvtw_op = 0x24, fcvtl_op = 0x25,
fcmp_op = 0x30
@@ -162,8 +198,8 @@
*/
enum cop1x_func {
lwxc1_op = 0x00, ldxc1_op = 0x01,
- pfetch_op = 0x07, swxc1_op = 0x08,
- sdxc1_op = 0x09, madd_s_op = 0x20,
+ swxc1_op = 0x08, sdxc1_op = 0x09,
+ pfetch_op = 0x0f, madd_s_op = 0x20,
madd_d_op = 0x21, madd_e_op = 0x22,
msub_s_op = 0x28, msub_d_op = 0x29,
msub_e_op = 0x2a, nmadd_s_op = 0x30,
@@ -194,6 +230,25 @@
};
/*
+ * func field for MSA MI10 format
+ */
+enum msa_mi10_func {
+ msa_ld_op = 8,
+ msa_st_op = 9,
+};
+
+enum relpc_op {
+ addiupc_op = 0,
+ lwpc_op = 1,
+ lwupc_op = 2,
+};
+
+enum relpc_func {
+ auipc_func = 6,
+ aluipc_func = 7,
+};
+
+/*
* (microMIPS) Major opcodes.
*/
enum mm_major_op {
@@ -538,6 +593,15 @@
;))))))
};
+struct spec3_format { /* SPEC3 */
+ BITFIELD_FIELD(unsigned int opcode : 6,
+ BITFIELD_FIELD(unsigned int rs : 5,
+ BITFIELD_FIELD(unsigned int rt : 5,
+ BITFIELD_FIELD(signed int simmediate : 9,
+ BITFIELD_FIELD(unsigned int ls_func : 7,
+ ;)))))
+};
+
struct f_format { /* FPU register format */
BITFIELD_FIELD(unsigned int opcode : 6,
BITFIELD_FIELD(unsigned int : 1,
@@ -588,6 +652,42 @@
;)))))))
};
+struct msa_mi10_format { /* MSA */
+ BITFIELD_FIELD(unsigned int opcode : 6,
+ BITFIELD_FIELD(signed int s10 : 10,
+ BITFIELD_FIELD(unsigned int rs : 5,
+ BITFIELD_FIELD(unsigned int wd : 5,
+ BITFIELD_FIELD(unsigned int func : 4,
+ BITFIELD_FIELD(unsigned int df : 2,
+ ;))))))
+};
+
+struct rel_format { /* PC-relative */
+ BITFIELD_FIELD(unsigned int opcode : 6,
+ BITFIELD_FIELD(unsigned int rs : 5,
+ BITFIELD_FIELD(unsigned int op : 2,
+ BITFIELD_FIELD(signed int simmediate : 19,
+ ;))))
+};
+
+struct rl16_format { /* PC-relative, 16bit offset */
+ BITFIELD_FIELD(unsigned int opcode : 6,
+ BITFIELD_FIELD(unsigned int rs : 5,
+ BITFIELD_FIELD(unsigned int op : 2,
+ BITFIELD_FIELD(unsigned int func : 3,
+ BITFIELD_FIELD(signed int simmediate : 16,
+ ;)))))
+};
+
+struct rl18_format { /* PC-relative, 18bit offset */
+ BITFIELD_FIELD(unsigned int opcode : 6,
+ BITFIELD_FIELD(unsigned int rs : 5,
+ BITFIELD_FIELD(unsigned int op : 2,
+ BITFIELD_FIELD(unsigned int unused : 1,
+ BITFIELD_FIELD(signed int simmediate : 18,
+ ;)))))
+};
+
/*
* microMIPS instruction formats (32-bit length)
*
@@ -854,8 +954,13 @@
struct c_format c_format;
struct r_format r_format;
struct p_format p_format;
+ struct spec3_format spec3_format;
struct f_format f_format;
struct ma_format ma_format;
+ struct msa_mi10_format msa_mi10_format;
+ struct rel_format rel_format;
+ struct rl16_format rl16_format;
+ struct rl18_format rl18_format;
struct b_format b_format;
struct ps_format ps_format;
struct v_format v_format;
diff --git a/arch/mips/include/uapi/asm/ipcbuf.h b/arch/mips/include/uapi/asm/ipcbuf.h
deleted file mode 100644
index 84c7e51..0000000
--- a/arch/mips/include/uapi/asm/ipcbuf.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/ipcbuf.h>
diff --git a/arch/mips/include/uapi/asm/sigcontext.h b/arch/mips/include/uapi/asm/sigcontext.h
index 6c9906f..85db0cb 100644
--- a/arch/mips/include/uapi/asm/sigcontext.h
+++ b/arch/mips/include/uapi/asm/sigcontext.h
@@ -12,6 +12,18 @@
#include <linux/types.h>
#include <asm/sgidefs.h>
+/* scalar FP context was used */
+#define USED_FP (1 << 0)
+
+/* the value of Status.FR when context was saved */
+#define USED_FR1 (1 << 1)
+
+/* FR=1, but with odd singles in bits 63:32 of preceeding even double */
+#define USED_HYBRID_FPRS (1 << 2)
+
+/* extended context was used, see struct extcontext for details */
+#define USED_EXTCONTEXT (1 << 3)
+
#if _MIPS_SIM == _MIPS_SIM_ABI32
/*
diff --git a/arch/mips/include/uapi/asm/swab.h b/arch/mips/include/uapi/asm/swab.h
index 97c2f81..7c05e93 100644
--- a/arch/mips/include/uapi/asm/swab.h
+++ b/arch/mips/include/uapi/asm/swab.h
@@ -13,7 +13,7 @@
#define __SWAB_64_THRU_32__
-#ifdef CONFIG_CPU_MIPSR2
+#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
static inline __attribute_const__ __u16 __arch_swab16(__u16 x)
{
@@ -39,8 +39,8 @@
#define __arch_swab32 __arch_swab32
/*
- * Having already checked for CONFIG_CPU_MIPSR2, enable the
- * optimized version for 64-bit kernel on r2 CPUs.
+ * Having already checked for CONFIG_CPU_MIPSR2/R6, enable the
+ * optimized version for 64-bit kernel on R2 & R6 CPUs.
*/
#ifdef CONFIG_64BIT
static inline __attribute_const__ __u64 __arch_swab64(__u64 x)
@@ -55,5 +55,5 @@
}
#define __arch_swab64 __arch_swab64
#endif /* CONFIG_64BIT */
-#endif /* CONFIG_CPU_MIPSR2 */
+#endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */
#endif /* _ASM_SWAB_H */
diff --git a/arch/mips/include/uapi/asm/sysmips.h b/arch/mips/include/uapi/asm/sysmips.h
index ae637e9..fa9d937 100644
--- a/arch/mips/include/uapi/asm/sysmips.h
+++ b/arch/mips/include/uapi/asm/sysmips.h
@@ -21,5 +21,19 @@
#define MIPS_FIXADE 7 /* control address error fixing */
#define MIPS_RDNVRAM 10 /* read NVRAM */
#define MIPS_ATOMIC_SET 2001 /* atomically set variable */
+#define MIPS_FPU_PRCTL 2002 /* FPU mode/emulation fine control */
+
+#ifndef PR_SET_FP_MODE
+#define PR_SET_FP_MODE 43
+#endif
+#ifndef PR_GET_FP_MODE
+#define PR_GET_FP_MODE 44
+#endif
+#ifndef PR_FP_MODE_FR
+#define PR_FP_MODE_FR (1 << 0)
+#endif
+#ifndef PR_FP_MODE_FRE
+#define PR_FP_MODE_FRE (1 << 1)
+#endif
#endif /* _ASM_SYSMIPS_H */
diff --git a/arch/mips/include/uapi/asm/ucontext.h b/arch/mips/include/uapi/asm/ucontext.h
new file mode 100644
index 0000000..2320144
--- /dev/null
+++ b/arch/mips/include/uapi/asm/ucontext.h
@@ -0,0 +1,65 @@
+#ifndef __MIPS_UAPI_ASM_UCONTEXT_H
+#define __MIPS_UAPI_ASM_UCONTEXT_H
+
+/**
+ * struct extcontext - extended context header structure
+ * @magic: magic value identifying the type of extended context
+ * @size: the size in bytes of the enclosing structure
+ *
+ * Extended context structures provide context which does not fit within struct
+ * sigcontext. They are placed sequentially in memory at the end of struct
+ * ucontext and struct sigframe, with each extended context structure beginning
+ * with a header defined by this struct. The type of context represented is
+ * indicated by the magic field. Userland may check each extended context
+ * structure against magic values that it recognises. The size field allows any
+ * unrecognised context to be skipped, allowing for future expansion. The end
+ * of the extended context data is indicated by the magic value
+ * END_EXTCONTEXT_MAGIC.
+ */
+struct extcontext {
+ unsigned int magic;
+ unsigned int size;
+};
+
+/**
+ * struct msa_extcontext - MSA extended context structure
+ * @ext: the extended context header, with magic == MSA_EXTCONTEXT_MAGIC
+ * @wr: the most significant 64 bits of each MSA vector register
+ * @csr: the value of the MSA control & status register
+ *
+ * If MSA context is live for a task at the time a signal is delivered to it,
+ * this structure will hold the MSA context of the task as it was prior to the
+ * signal delivery.
+ */
+struct msa_extcontext {
+ struct extcontext ext;
+#define MSA_EXTCONTEXT_MAGIC 0x784d5341 /* xMSA */
+
+ unsigned long long wr[32];
+ unsigned int csr;
+};
+
+#define END_EXTCONTEXT_MAGIC 0x78454e44 /* xEND */
+
+/**
+ * struct ucontext - user context structure
+ * @uc_flags:
+ * @uc_link:
+ * @uc_stack:
+ * @uc_mcontext: holds basic processor state
+ * @uc_sigmask:
+ * @uc_extcontext: holds extended processor state
+ */
+struct ucontext {
+ /* Historic fields matching asm-generic */
+ unsigned long uc_flags;
+ struct ucontext *uc_link;
+ stack_t uc_stack;
+ struct sigcontext uc_mcontext;
+ sigset_t uc_sigmask;
+
+ /* Extended context structures may follow ucontext */
+ unsigned long long uc_extcontext[0];
+};
+
+#endif /* __MIPS_UAPI_ASM_UCONTEXT_H */
diff --git a/arch/mips/include/uapi/asm/unistd.h b/arch/mips/include/uapi/asm/unistd.h
index 1dee279..af4d5c0 100644
--- a/arch/mips/include/uapi/asm/unistd.h
+++ b/arch/mips/include/uapi/asm/unistd.h
@@ -369,16 +369,22 @@
#define __NR_process_vm_writev (__NR_Linux + 346)
#define __NR_kcmp (__NR_Linux + 347)
#define __NR_finit_module (__NR_Linux + 348)
+/* Backporting seccomp, skip a few ...
+ * #define __NR_sched_setattr (__NR_Linux + 349)
+ * #define __NR_sched_getattr (__NR_Linux + 350)
+ * #define __NR_renameat2 (__NR_Linux + 351)
+ */
+#define __NR_seccomp (__NR_Linux + 352)
/*
* Offset of the last Linux o32 flavoured syscall
*/
-#define __NR_Linux_syscalls 348
+#define __NR_Linux_syscalls 352
#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
#define __NR_O32_Linux 4000
-#define __NR_O32_Linux_syscalls 348
+#define __NR_O32_Linux_syscalls 352
#if _MIPS_SIM == _MIPS_SIM_ABI64
@@ -695,16 +701,22 @@
#define __NR_kcmp (__NR_Linux + 306)
#define __NR_finit_module (__NR_Linux + 307)
#define __NR_getdents64 (__NR_Linux + 308)
+/* Backporting seccomp, skip a few ...
+ * #define __NR_sched_setattr (__NR_Linux + 309)
+ * #define __NR_sched_getattr (__NR_Linux + 310)
+ * #define __NR_renameat2 (__NR_Linux + 311)
+ */
+#define __NR_seccomp (__NR_Linux + 312)
/*
* Offset of the last Linux 64-bit flavoured syscall
*/
-#define __NR_Linux_syscalls 308
+#define __NR_Linux_syscalls 312
#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */
#define __NR_64_Linux 5000
-#define __NR_64_Linux_syscalls 308
+#define __NR_64_Linux_syscalls 312
#if _MIPS_SIM == _MIPS_SIM_NABI32
@@ -1025,15 +1037,21 @@
#define __NR_process_vm_writev (__NR_Linux + 310)
#define __NR_kcmp (__NR_Linux + 311)
#define __NR_finit_module (__NR_Linux + 312)
+/* Backporting seccomp, skip a few ...
+ * #define __NR_sched_setattr (__NR_Linux + 313)
+ * #define __NR_sched_getattr (__NR_Linux + 314)
+ * #define __NR_renameat2 (__NR_Linux + 315)
+ */
+#define __NR_seccomp (__NR_Linux + 316)
/*
* Offset of the last N32 flavoured syscall
*/
-#define __NR_Linux_syscalls 312
+#define __NR_Linux_syscalls 316
#endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */
#define __NR_N32_Linux 6000
-#define __NR_N32_Linux_syscalls 312
+#define __NR_N32_Linux_syscalls 316
#endif /* _UAPI_ASM_UNISTD_H */
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
index 423d871..646e933 100644
--- a/arch/mips/kernel/Makefile
+++ b/arch/mips/kernel/Makefile
@@ -4,9 +4,10 @@
extra-y := head.o vmlinux.lds
-obj-y += cpu-probe.o branch.o entry.o genex.o idle.o irq.o process.o \
- prom.o ptrace.o reset.o setup.o signal.o syscall.o \
- time.o topology.o traps.o unaligned.o watch.o vdso.o
+obj-y += cpu-probe.o branch.o elf.o entry.o genex.o idle.o irq.o \
+ process.o prom.o ptrace.o reset.o setup.o signal.o \
+ syscall.o time.o topology.o traps.o unaligned.o watch.o \
+ vdso.o
ifdef CONFIG_FUNCTION_TRACER
CFLAGS_REMOVE_ftrace.o = -pg
@@ -51,7 +52,7 @@
obj-$(CONFIG_MIPS_MT_FPAFF) += mips-mt-fpaff.o
obj-$(CONFIG_MIPS_MT_SMTC) += smtc.o smtc-asm.o smtc-proc.o
obj-$(CONFIG_MIPS_MT_SMP) += smp-mt.o
-obj-$(CONFIG_MIPS_CMP) += smp-cmp.o
+obj-$(CONFIG_MIPS_CMP) += smp-cmp.o cpc.o
obj-$(CONFIG_CPU_MIPSR2) += spram.o
obj-$(CONFIG_MIPS_VPE_LOADER) += vpe.o
@@ -86,6 +87,7 @@
obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
obj-$(CONFIG_SPINLOCK_TEST) += spinlock_test.o
obj-$(CONFIG_MIPS_MACHINE) += mips_machine.o
+obj-$(CONFIG_MIPS_INCOMPATIBLE_ARCH_EMULATION) += mips-r2-emul.o
CFLAGS_cpu-bugs64.o = $(shell if $(CC) $(KBUILD_CFLAGS) -Wa,-mdaddi -c -o /dev/null -x c /dev/null >/dev/null 2>&1; then echo "-DHAVE_AS_SET_DADDI"; fi)
diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c
index 0845091..c61c344 100644
--- a/arch/mips/kernel/asm-offsets.c
+++ b/arch/mips/kernel/asm-offsets.c
@@ -165,7 +165,74 @@
OFFSET(THREAD_FPR30, task_struct, thread.fpu.fpr[30]);
OFFSET(THREAD_FPR31, task_struct, thread.fpu.fpr[31]);
+ /* the least significant 64 bits of each FP register */
+ OFFSET(THREAD_FPR0_LS64, task_struct,
+ thread.fpu.fpr[0].val64[FPR_IDX(64, 0)]);
+ OFFSET(THREAD_FPR1_LS64, task_struct,
+ thread.fpu.fpr[1].val64[FPR_IDX(64, 0)]);
+ OFFSET(THREAD_FPR2_LS64, task_struct,
+ thread.fpu.fpr[2].val64[FPR_IDX(64, 0)]);
+ OFFSET(THREAD_FPR3_LS64, task_struct,
+ thread.fpu.fpr[3].val64[FPR_IDX(64, 0)]);
+ OFFSET(THREAD_FPR4_LS64, task_struct,
+ thread.fpu.fpr[4].val64[FPR_IDX(64, 0)]);
+ OFFSET(THREAD_FPR5_LS64, task_struct,
+ thread.fpu.fpr[5].val64[FPR_IDX(64, 0)]);
+ OFFSET(THREAD_FPR6_LS64, task_struct,
+ thread.fpu.fpr[6].val64[FPR_IDX(64, 0)]);
+ OFFSET(THREAD_FPR7_LS64, task_struct,
+ thread.fpu.fpr[7].val64[FPR_IDX(64, 0)]);
+ OFFSET(THREAD_FPR8_LS64, task_struct,
+ thread.fpu.fpr[8].val64[FPR_IDX(64, 0)]);
+ OFFSET(THREAD_FPR9_LS64, task_struct,
+ thread.fpu.fpr[9].val64[FPR_IDX(64, 0)]);
+ OFFSET(THREAD_FPR10_LS64, task_struct,
+ thread.fpu.fpr[10].val64[FPR_IDX(64, 0)]);
+ OFFSET(THREAD_FPR11_LS64, task_struct,
+ thread.fpu.fpr[11].val64[FPR_IDX(64, 0)]);
+ OFFSET(THREAD_FPR12_LS64, task_struct,
+ thread.fpu.fpr[12].val64[FPR_IDX(64, 0)]);
+ OFFSET(THREAD_FPR13_LS64, task_struct,
+ thread.fpu.fpr[13].val64[FPR_IDX(64, 0)]);
+ OFFSET(THREAD_FPR14_LS64, task_struct,
+ thread.fpu.fpr[14].val64[FPR_IDX(64, 0)]);
+ OFFSET(THREAD_FPR15_LS64, task_struct,
+ thread.fpu.fpr[15].val64[FPR_IDX(64, 0)]);
+ OFFSET(THREAD_FPR16_LS64, task_struct,
+ thread.fpu.fpr[16].val64[FPR_IDX(64, 0)]);
+ OFFSET(THREAD_FPR17_LS64, task_struct,
+ thread.fpu.fpr[17].val64[FPR_IDX(64, 0)]);
+ OFFSET(THREAD_FPR18_LS64, task_struct,
+ thread.fpu.fpr[18].val64[FPR_IDX(64, 0)]);
+ OFFSET(THREAD_FPR19_LS64, task_struct,
+ thread.fpu.fpr[19].val64[FPR_IDX(64, 0)]);
+ OFFSET(THREAD_FPR20_LS64, task_struct,
+ thread.fpu.fpr[20].val64[FPR_IDX(64, 0)]);
+ OFFSET(THREAD_FPR21_LS64, task_struct,
+ thread.fpu.fpr[21].val64[FPR_IDX(64, 0)]);
+ OFFSET(THREAD_FPR22_LS64, task_struct,
+ thread.fpu.fpr[22].val64[FPR_IDX(64, 0)]);
+ OFFSET(THREAD_FPR23_LS64, task_struct,
+ thread.fpu.fpr[23].val64[FPR_IDX(64, 0)]);
+ OFFSET(THREAD_FPR24_LS64, task_struct,
+ thread.fpu.fpr[24].val64[FPR_IDX(64, 0)]);
+ OFFSET(THREAD_FPR25_LS64, task_struct,
+ thread.fpu.fpr[25].val64[FPR_IDX(64, 0)]);
+ OFFSET(THREAD_FPR26_LS64, task_struct,
+ thread.fpu.fpr[26].val64[FPR_IDX(64, 0)]);
+ OFFSET(THREAD_FPR27_LS64, task_struct,
+ thread.fpu.fpr[27].val64[FPR_IDX(64, 0)]);
+ OFFSET(THREAD_FPR28_LS64, task_struct,
+ thread.fpu.fpr[28].val64[FPR_IDX(64, 0)]);
+ OFFSET(THREAD_FPR29_LS64, task_struct,
+ thread.fpu.fpr[29].val64[FPR_IDX(64, 0)]);
+ OFFSET(THREAD_FPR30_LS64, task_struct,
+ thread.fpu.fpr[30].val64[FPR_IDX(64, 0)]);
+ OFFSET(THREAD_FPR31_LS64, task_struct,
+ thread.fpu.fpr[31].val64[FPR_IDX(64, 0)]);
+
OFFSET(THREAD_FCR31, task_struct, thread.fpu.fcr31);
+ OFFSET(THREAD_MSA_CSR, task_struct, thread.fpu.msacsr);
BLANK();
}
diff --git a/arch/mips/kernel/branch.c b/arch/mips/kernel/branch.c
index 46c2ad0..e9628ea 100644
--- a/arch/mips/kernel/branch.c
+++ b/arch/mips/kernel/branch.c
@@ -204,12 +204,28 @@
* returns 0 or BRANCH_LIKELY_TAKEN as appropriate after
* evaluating the branch.
*/
+/* Note on R6 compact branches:
+ * Compact branches doesn't do exception (besides BC1EQZ/BC1NEZ)
+ * and doesn't execute instruction in Forbidden Slot if branch is
+ * to be taken. It means that return EPC for them can be safely set
+ * to EPC + 8 because it is the only case to get a BD precise exception
+ * doing instruction in Forbidden Slot while no branch.
+ *
+ * Unconditional compact jump/branches added for full picture
+ * (not doing BD precise exception, actually).
+ */
int __compute_return_epc_for_insn(struct pt_regs *regs,
union mips_instruction insn)
{
- unsigned int bit, fcr31, dspcontrol;
+ unsigned int bit;
long epc = regs->cp0_epc;
int ret = 0;
+ unsigned int fcr31;
+#ifdef CONFIG_CPU_MIPSR6
+ int reg;
+#else
+ unsigned dspcontrol;
+#endif
switch (insn.i_format.opcode) {
/*
@@ -219,8 +235,15 @@
switch (insn.r_format.func) {
case jalr_op:
regs->regs[insn.r_format.rd] = epc + 8;
- /* Fall through */
+ regs->cp0_epc = regs->regs[insn.r_format.rs];
+ break;
case jr_op:
+#ifdef CONFIG_CPU_MIPSR6
+ if (!mipsr2_emulation) {
+ ret = -SIGILL;
+ break;
+ }
+#endif
regs->cp0_epc = regs->regs[insn.r_format.rs];
break;
}
@@ -233,8 +256,14 @@
*/
case bcond_op:
switch (insn.i_format.rt) {
- case bltz_op:
case bltzl_op:
+#ifdef CONFIG_CPU_MIPSR6
+ if (!mipsr2_emulation) {
+ ret = -SIGILL;
+ break;
+ }
+#endif
+ case bltz_op:
if ((long)regs->regs[insn.i_format.rs] < 0) {
epc = epc + 4 + (insn.i_format.simmediate << 2);
if (insn.i_format.rt == bltzl_op)
@@ -244,8 +273,14 @@
regs->cp0_epc = epc;
break;
- case bgez_op:
case bgezl_op:
+#ifdef CONFIG_CPU_MIPSR6
+ if (!mipsr2_emulation) {
+ ret = -SIGILL;
+ break;
+ }
+#endif
+ case bgez_op:
if ((long)regs->regs[insn.i_format.rs] >= 0) {
epc = epc + 4 + (insn.i_format.simmediate << 2);
if (insn.i_format.rt == bgezl_op)
@@ -255,8 +290,21 @@
regs->cp0_epc = epc;
break;
- case bltzal_op:
case bltzall_op:
+#ifdef CONFIG_CPU_MIPSR6
+ if (!mipsr2_emulation) {
+ ret = -SIGILL;
+ break;
+ }
+#endif
+ case bltzal_op:
+#ifdef CONFIG_CPU_MIPSR6
+ /* MIPSR6: nal == bltzal $0 */
+ if (insn.i_format.rs && !mipsr2_emulation) {
+ ret = -SIGILL;
+ break;
+ }
+#endif
regs->regs[31] = epc + 8;
if ((long)regs->regs[insn.i_format.rs] < 0) {
epc = epc + 4 + (insn.i_format.simmediate << 2);
@@ -267,8 +315,21 @@
regs->cp0_epc = epc;
break;
- case bgezal_op:
case bgezall_op:
+#ifdef CONFIG_CPU_MIPSR6
+ if (!mipsr2_emulation) {
+ ret = -SIGILL;
+ break;
+ }
+#endif
+ case bgezal_op:
+#ifdef CONFIG_CPU_MIPSR6
+ /* MIPSR6: bal == bgezal $0 */
+ if (insn.i_format.rs && !mipsr2_emulation) {
+ ret = -SIGILL;
+ break;
+ }
+#endif
regs->regs[31] = epc + 8;
if ((long)regs->regs[insn.i_format.rs] >= 0) {
epc = epc + 4 + (insn.i_format.simmediate << 2);
@@ -279,6 +340,7 @@
regs->cp0_epc = epc;
break;
+#ifndef CONFIG_CPU_MIPSR6
case bposge32_op:
if (!cpu_has_dsp)
goto sigill;
@@ -291,6 +353,7 @@
epc += 8;
regs->cp0_epc = epc;
break;
+#endif
}
break;
@@ -312,24 +375,36 @@
/*
* These are conditional and in i_format.
*/
- case beq_op:
case beql_op:
+#ifdef CONFIG_CPU_MIPSR6
+ if (!mipsr2_emulation) {
+ ret = -SIGILL;
+ break;
+ }
+#endif
+ case beq_op:
if (regs->regs[insn.i_format.rs] ==
regs->regs[insn.i_format.rt]) {
epc = epc + 4 + (insn.i_format.simmediate << 2);
- if (insn.i_format.rt == beql_op)
+ if (insn.i_format.opcode == beql_op)
ret = BRANCH_LIKELY_TAKEN;
} else
epc += 8;
regs->cp0_epc = epc;
break;
- case bne_op:
case bnel_op:
+#ifdef CONFIG_CPU_MIPSR6
+ if (!mipsr2_emulation) {
+ ret = -SIGILL;
+ break;
+ }
+#endif
+ case bne_op:
if (regs->regs[insn.i_format.rs] !=
regs->regs[insn.i_format.rt]) {
epc = epc + 4 + (insn.i_format.simmediate << 2);
- if (insn.i_format.rt == bnel_op)
+ if (insn.i_format.opcode == bnel_op)
ret = BRANCH_LIKELY_TAKEN;
} else
epc += 8;
@@ -338,10 +413,30 @@
case blez_op: /* not really i_format */
case blezl_op:
+#ifdef CONFIG_CPU_MIPSR6
+ /*
+ * Compact branches: (blez:) blezalc, bgezalc, bgeuc
+ * Compact branches: (blezl:) blezc, bgezc, bgec
+ */
+ if (insn.i_format.rt) {
+ if (insn.i_format.opcode == blez_op)
+ if ((insn.i_format.rs == insn.i_format.rt) ||
+ !insn.i_format.rs) /* blezalc, bgezalc */
+ regs->regs[31] = epc + 4;
+ epc += 8;
+ regs->cp0_epc = epc;
+ break;
+ }
+
+ if ((insn.i_format.opcode != blez_op) && !mipsr2_emulation) {
+ ret = -SIGILL;
+ break;
+ }
+#endif
/* rt field assumed to be zero */
if ((long)regs->regs[insn.i_format.rs] <= 0) {
epc = epc + 4 + (insn.i_format.simmediate << 2);
- if (insn.i_format.rt == bnel_op)
+ if (insn.i_format.opcode == blezl_op)
ret = BRANCH_LIKELY_TAKEN;
} else
epc += 8;
@@ -350,23 +445,102 @@
case bgtz_op:
case bgtzl_op:
+#ifdef CONFIG_CPU_MIPSR6
+ /*
+ * Compact branches: (bgtz:) bltzalc, bgtzalc, bltuc
+ * Compact branches: (bgtzl:) bltc, bltzc, bgtzc
+ */
+ if (insn.i_format.rt) {
+ if (insn.i_format.opcode == bgtz_op)
+ if ((insn.i_format.rs == insn.i_format.rt) ||
+ !insn.i_format.rs) /* bltzalc, bgtzalc */
+ regs->regs[31] = epc + 4;
+ epc += 8;
+ regs->cp0_epc = epc;
+ break;
+ }
+
+ if ((insn.i_format.opcode != bgtz_op) && !mipsr2_emulation) {
+ ret = -SIGILL;
+ break;
+ }
+#endif
/* rt field assumed to be zero */
if ((long)regs->regs[insn.i_format.rs] > 0) {
epc = epc + 4 + (insn.i_format.simmediate << 2);
- if (insn.i_format.rt == bnel_op)
+ if (insn.i_format.opcode == bgtzl_op)
ret = BRANCH_LIKELY_TAKEN;
} else
epc += 8;
regs->cp0_epc = epc;
break;
+#ifdef CONFIG_CPU_MIPSR6
+ case cbcond0_op:
+ /*
+ * Compact branches: bovc, beqc, beqzalc
+ */
+
+ /* fall through */
+ case cbcond1_op:
+ /*
+ * Compact branches: bnvc, bnec, bnezalc
+ */
+ if (insn.i_format.rt && !insn.i_format.rs) /* beqzalc/bnezalc */
+ regs->regs[31] = epc + 4;
+ epc += 8;
+ regs->cp0_epc = epc;
+
+ break;
+#endif
+
/*
* And now the FPA/cp1 branch instructions.
*/
case cop1_op:
+#ifdef CONFIG_CPU_MIPSR6
+ if ((insn.i_format.rs == bc1eqz_op) ||
+ (insn.i_format.rs == bc1nez_op)) {
+
+ if (!used_math()) { /* First time FPU user. */
+ ret = init_fpu();
+ if (ret && raw_cpu_has_fpu && !mipsr2_emulation) {
+ ret = -ret;
+ break;
+ }
+ ret = 0;
+ }
+ lose_fpu(1); /* Save FPU state for the emulator. */
+ reg = insn.i_format.rt;
+ bit = 0;
+ switch (insn.i_format.rs) {
+ case bc1eqz_op:
+ if (!(get_fpr64(¤t->thread.fpu.fpr[reg], 0) & (__u64)0x1))
+ bit = 1;
+ break;
+ case bc1nez_op:
+ if (get_fpr64(¤t->thread.fpu.fpr[reg], 0) & (__u64)0x1)
+ bit = 1;
+ break;
+ }
+ own_fpu(1); /* Restore FPU state. */
+ if (bit)
+ epc = epc + 4 + (insn.i_format.simmediate << 2);
+ else
+ epc += 8;
+ regs->cp0_epc = epc;
+
+ break;
+ }
+
+ if (!mipsr2_emulation) {
+ ret = -SIGILL;
+ break;
+ }
+#endif
preempt_disable();
if (is_fpu_owner())
- asm volatile("cfc1\t%0,$31" : "=r" (fcr31));
+ fcr31 = fpu_get_fcr31();
else
fcr31 = current->thread.fpu.fcr31;
preempt_enable();
@@ -398,6 +572,38 @@
break;
}
break;
+
+#ifdef CONFIG_CPU_MIPSR6
+ case bc_op:
+ epc += 8;
+ regs->cp0_epc = epc;
+ break;
+
+ case jump_op:
+ if (insn.i_format.rs) /* beqzc */
+ epc = epc + 8;
+ else /* jic, no offset shift */
+ epc = regs->regs[insn.i_format.rt] + insn.i_format.simmediate;
+ regs->cp0_epc = epc;
+ break;
+
+ case balc_op:
+ regs->regs[31] = epc + 4;
+ epc = epc + 4 + (insn.i_format.simmediate << 2);
+ regs->cp0_epc = epc;
+ break;
+
+ case jump2_op:
+ if (insn.i_format.rs) /* bnezc */
+ epc = epc + 8;
+ else { /* jialc, no offset shift */
+ regs->regs[31] = epc + 4;
+ epc = regs->regs[insn.i_format.rt] + insn.i_format.simmediate;
+ }
+ regs->cp0_epc = epc;
+ break;
+#endif
+
#ifdef CONFIG_CPU_CAVIUM_OCTEON
case lwc2_op: /* This is bbit0 on Octeon */
if ((regs->regs[insn.i_format.rs] & (1ull<<insn.i_format.rt))
@@ -433,12 +639,18 @@
#endif
}
+ if (ret < 0) {
+ force_sig(-ret,current);
+ return -EFAULT;
+ }
return ret;
+#ifndef CONFIG_CPU_MIPSR6
sigill:
printk("%s: DSP branch but not DSP ASE - sending SIGBUS.\n", current->comm);
force_sig(SIGBUS, current);
return -EFAULT;
+#endif
}
EXPORT_SYMBOL_GPL(__compute_return_epc_for_insn);
diff --git a/arch/mips/kernel/cevt-gic.c b/arch/mips/kernel/cevt-gic.c
index 730eaf9..9a3c3cd 100644
--- a/arch/mips/kernel/cevt-gic.c
+++ b/arch/mips/kernel/cevt-gic.c
@@ -65,7 +65,7 @@
struct clock_event_device *cd;
unsigned int irq;
- if (!cpu_has_counter || !gic_frequency)
+ if (!gic_frequency)
return -ENXIO;
irq = MIPS_GIC_IRQ_BASE;
@@ -81,7 +81,7 @@
cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd);
cd->min_delta_ns = clockevent_delta2ns(0x300, cd);
- cd->rating = 300;
+ cd->rating = 350;
cd->irq = irq;
cd->cpumask = cpumask_of(cpu);
cd->set_next_event = gic_next_event;
diff --git a/arch/mips/kernel/cevt-r4k.c b/arch/mips/kernel/cevt-r4k.c
index 02033ea..52a3a43 100644
--- a/arch/mips/kernel/cevt-r4k.c
+++ b/arch/mips/kernel/cevt-r4k.c
@@ -16,6 +16,7 @@
#include <asm/time.h>
#include <asm/cevt-r4k.h>
#include <asm/gic.h>
+#include <asm/irq_cpu.h>
/*
* The SMTC Kernel for the 34K, 1004K, et. al. replaces several
@@ -50,7 +51,7 @@
#ifndef CONFIG_MIPS_MT_SMTC
irqreturn_t c0_compare_interrupt(int irq, void *dev_id)
{
- const int r2 = cpu_has_mips_r2;
+ const int r2 = cpu_has_mips_r2 | cpu_has_mips_r6;
struct clock_event_device *cd;
int cpu = smp_processor_id();
@@ -210,9 +211,6 @@
cd->set_mode = mips_set_clock_mode;
cd->event_handler = mips_event_handler;
-#ifdef CONFIG_CEVT_GIC
- if (!gic_present)
-#endif
clockevents_register_device(cd);
if (cp0_timer_irq_installed)
@@ -222,6 +220,12 @@
setup_irq(irq, &c0_compare_irqaction);
+#ifdef CONFIG_IRQ_CPU
+ mips_smp_c0_status_mask |= (0x100 << cp0_compare_irq);
+ if (cp0_perfcount_irq >= 0)
+ mips_smp_c0_status_mask |= (0x100 << cp0_perfcount_irq);
+#endif
+
return 0;
}
diff --git a/arch/mips/kernel/cpc.c b/arch/mips/kernel/cpc.c
new file mode 100644
index 0000000..8643a7b
--- /dev/null
+++ b/arch/mips/kernel/cpc.c
@@ -0,0 +1,268 @@
+/*
+ * This program is free software; you can distribute it and/or modify it
+ * under the terms of the GNU General Public License (Version 2) as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * Copyright (C) 2013 Imagination Technologies Ltd
+ * Leonid Yegoshin (Leonid.Yegoshin@imgtec.com)
+ */
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/smp.h>
+#include <linux/cpumask.h>
+#include <linux/interrupt.h>
+#include <linux/compiler.h>
+
+#include <linux/cpu.h>
+
+#include <linux/atomic.h>
+#include <asm/cacheflush.h>
+#include <asm/cpu.h>
+#include <asm/processor.h>
+#include <asm/hardirq.h>
+#include <asm/mmu_context.h>
+#include <asm/smp.h>
+#include <asm/time.h>
+#include <asm/mipsregs.h>
+#include <asm/mipsmtregs.h>
+#include <asm/mips_mt.h>
+#include <asm/amon.h>
+#include <asm/gic.h>
+#include <asm/gcmpregs.h>
+#include <asm/cpcregs.h>
+#include <asm/bootinfo.h>
+#include <asm/irq_cpu.h>
+
+unsigned long _cpc_base;
+int cpc_present = -1;
+
+static DEFINE_PER_CPU_ALIGNED(spinlock_t, mips_cpc_lock);
+
+
+int __init cpc_probe(unsigned long defaddr, unsigned long defsize)
+{
+ if (cpc_present >= 0)
+ return cpc_present;
+
+ if (gcmp_present <= 0) {
+ cpc_present = 0;
+ return 0;
+ }
+
+ if ((GCMPGCB(CPCST) & GCMP_GCB_CPCST_EN_MSK) == 0) {
+ cpc_present = 0;
+ return 0;
+ }
+
+ _cpc_base = GCMPGCBaddr(CPCBA);
+ if (_cpc_base & GCMP_GCB_CPCBA_EN_MSK)
+ goto success;
+
+ if (!defaddr) {
+ cpc_present = 0;
+ return 0;
+ }
+
+ /* Try to setup a platform value */
+ GCMPGCBaddrWrite(CPCBA, (defaddr | GCMP_GCB_CPCBA_EN_MSK));
+ _cpc_base = GCMPGCBaddr(CPCBA);
+ if ((_cpc_base & GCMP_GCB_CPCBA_EN_MSK) == 0) {
+ cpc_present = 0;
+ return 0;
+ }
+success:
+ pr_info("CPC available\n");
+ _cpc_base = (unsigned long) ioremap_nocache(_cpc_base & ~GCMP_GCB_CPCBA_EN_MSK, defsize);
+ cpc_present = 1;
+ return 1;
+}
+
+#ifdef CONFIG_SYSFS
+
+static ssize_t show_cpc_global(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int n = 0;
+
+ n = snprintf(buf, PAGE_SIZE,
+ "CPC Global CSR Access Privilege Register\t%08x\n"
+ "CPC Global Sequence Delay Counter\t\t%08x\n"
+ "CPC Global Rail Delay Counter Register\t\t%08x\n"
+ "CPC Global Reset Width Counter Register\t\t%08x\n"
+ "CPC Global Revision Register\t\t\t%08x\n"
+ ,
+ CPCGCB(CSRAPR),
+ CPCGCB(SEQDELAY),
+ CPCGCB(RAILDELAY),
+ CPCGCB(RESETWIDTH),
+ CPCGCB(REVID)
+ );
+ if (gcmp3_present) {
+ n += snprintf(buf+n, PAGE_SIZE-n,
+ "CPC3 Global Clock Control \t%08x\n"
+ "CPC3 Global CM Power Up\t\t%08x\n"
+ "CPC3 Global Reset Occurred\t\t%08x\n"
+ ,
+ CPCGCB(CLCTL),
+ CPCGCB(PWRUP),
+ CPCGCB(RESETST)
+ );
+ }
+
+ return n;
+}
+
+static char *cpc_cmd[] = { "0", "ClockOff", "PwrDown", "PwrUp", "Reset",
+ "5", "6", "7", "8", "9", "10", "11", "12", "13", "14", "15" };
+
+static char *cpc_status[] = { "PwrDwn", "VddOK", "UpDelay", "UClkOff", "Reset",
+ "ResetDly", "nonCoherent", "Coherent", "Isolate", "ClrBus",
+ "DClkOff", "11", "12", "13", "14", "15" };
+
+static ssize_t show_cpc_local(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long irq_flags;
+ int n = 0;
+
+ if (gcmp3_present) {
+ unsigned int cmd, status, clkctl;
+ unsigned int lpack, vcrun, vcsuspend;
+ unsigned int ramsleep;
+
+ local_irq_save(irq_flags);
+
+ GCMPCLCB(OTHER) = dev->id;
+
+ cmd = CPCOCB(CMD);
+ status = CPCOCB(STATUS);
+ clkctl = CPCOCB(CCCTL);
+ lpack = CPCOCB(LPACK);
+ vcrun = CPCOCB(VCRUN);
+ vcsuspend = CPCOCB(VCSPND);
+ ramsleep = CPCOCB(RAMSLEEP);
+
+ local_irq_restore(irq_flags);
+
+ n += snprintf(buf+n, PAGE_SIZE-n,
+ "CPC Local Command Register\t\t\t%08x: CMD=%s\n"
+ "CPC Local Status and Configuration register\t%08x: Status=%s, LastCMD=%s\n"
+ "CPC3 Local Clock Change Register\t\t%08x\n"
+ "CPC3 Local Lpack Register\t\t%x\n"
+ "CPC3 Local VC Run Register\t\t%x\n"
+ "CPC3 Local VC Suspend Register\t\t%x\n"
+ "CPC3 Local RAM Sleep Register\t\t%08x\n"
+ ,
+ cmd, cpc_cmd[(cmd & CPCL_CMD_MASK) >> CPCL_CMD_SH],
+ status, cpc_status[(status & CPCL_STATUS_MASK) >> CPCL_STATUS_SH],
+ cpc_cmd[(status & CPCL_CMD_MASK) >> CPCL_CMD_SH],
+ clkctl, lpack, vcrun, vcsuspend, ramsleep
+ );
+ } else {
+ unsigned int cmd, status, other;
+ int corenum;
+
+ preempt_disable();
+ corenum = current_cpu_data.core;
+ spin_lock_irqsave(&per_cpu(mips_cpc_lock, corenum),irq_flags);
+
+ CPCLCB(OTHER) = (dev->id)<<16;
+
+ cmd = CPCOCB(CMD);
+ status = CPCOCB(STATUS);
+ other = CPCOCB(OTHER);
+
+ spin_unlock_irqrestore(&per_cpu(mips_cpc_lock, corenum),irq_flags);
+ preempt_enable();
+
+ n += snprintf(buf+n, PAGE_SIZE-n,
+ "CPC Local Command Register\t\t\t%08x: CMD=%s\n"
+ "CPC Local Status and Configuration register\t%08x: Status=%s, LastCMD=%s\n"
+ "CPC Core Other Addressing Register\t\t%08x\n"
+ ,
+ cmd, cpc_cmd[(cmd & CPCL_CMD_MASK) >> CPCL_CMD_SH],
+ status, cpc_status[(status & CPCL_STATUS_MASK) >> CPCL_STATUS_SH],
+ cpc_cmd[(status & CPCL_CMD_MASK) >> CPCL_CMD_SH],
+ other
+ );
+ }
+
+ return n;
+}
+
+static DEVICE_ATTR(cpc_global, 0444, show_cpc_global, NULL);
+static DEVICE_ATTR(cpc_local, 0444, show_cpc_local, NULL);
+
+static struct bus_type cpc_subsys = {
+ .name = "cpc",
+ .dev_name = "cpc",
+};
+
+
+
+static __cpuinit int cpc_add_core(int cpu)
+{
+ struct device *dev;
+ int err;
+ char name[16];
+
+ dev = kzalloc(sizeof *dev, GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ dev->id = cpu;
+ dev->bus = &cpc_subsys;
+ snprintf(name, sizeof name, "core%d",cpu);
+ dev->init_name = name;
+
+ err = device_register(dev);
+ if (err)
+ return err;
+
+ err = device_create_file(dev, &dev_attr_cpc_local);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int __init init_cpc_sysfs(void)
+{
+ int rc;
+ int cpuN;
+ int cpu;
+
+ if (cpc_present <= 0)
+ return 0;
+
+ rc = subsys_system_register(&cpc_subsys, NULL);
+ if (rc)
+ return rc;
+
+ rc = device_create_file(cpc_subsys.dev_root, &dev_attr_cpc_global);
+ if (rc)
+ return rc;
+
+ cpuN = ((GCMPGCB(GC) & GCMP_GCB_GC_NUMCORES_MSK) >> GCMP_GCB_GC_NUMCORES_SHF) + 1;
+ for (cpu=0; cpu<cpuN; cpu++) {
+ rc = cpc_add_core(cpu);
+ if (rc)
+ return rc;
+ }
+
+ return 0;
+}
+
+device_initcall_sync(init_cpc_sysfs);
+
+#endif /* CONFIG_SYSFS */
diff --git a/arch/mips/kernel/cpu-bugs64.c b/arch/mips/kernel/cpu-bugs64.c
index de3c25f..942a65c 100644
--- a/arch/mips/kernel/cpu-bugs64.c
+++ b/arch/mips/kernel/cpu-bugs64.c
@@ -18,6 +18,8 @@
#include <asm/mipsregs.h>
#include <asm/setup.h>
+#ifndef CONFIG_CPU_MIPSR6
+
static char bug64hit[] __initdata =
"reliable operation impossible!\n%s";
static char nowar[] __initdata =
@@ -317,3 +319,14 @@
{
check_daddi();
}
+
+#else /* CONFIG_CPU_MIPSR6 */
+
+static volatile int daddi_ov __cpuinitdata;
+int daddiu_bug = 0;
+
+void __init check_bugs64_early(void) {}
+
+void __init check_bugs64(void) {}
+
+#endif /* CONFIG_CPU_MIPSR6 */
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index c6568bf..63a1291 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -22,11 +22,19 @@
#include <asm/cpu.h>
#include <asm/fpu.h>
#include <asm/mipsregs.h>
+#include <asm/msa.h>
#include <asm/watch.h>
#include <asm/elf.h>
#include <asm/spram.h>
#include <asm/uaccess.h>
+/* Hardware capabilities */
+#ifdef CONFIG_CPU_MIPSR6
+unsigned int elf_hwcap __read_mostly = HWCAP_MIPS_R6;
+#else
+unsigned int elf_hwcap __read_mostly;
+#endif
+
static int __cpuinitdata mips_fpu_disabled;
static int __init fpu_disable(char *s)
@@ -51,6 +59,20 @@
__setup("nodsp", dsp_disable);
+static int mips_htw_disabled;
+
+static int __init htw_disable(char *s)
+{
+ mips_htw_disabled = 1;
+ if (cpu_has_htw) {
+ write_c0_pwctl(HTW_PWCTL_BASE);
+ cpu_data[0].options2 &= ~MIPS_CPU_HTW;
+ }
+ return 1;
+}
+
+__setup("nohtw", htw_disable);
+
static inline void check_errata(void)
{
struct cpuinfo_mips *c = ¤t_cpu_data;
@@ -75,6 +97,8 @@
check_errata();
}
+#include <asm/pgtable.h>
+#include <asm/bootinfo.h>
/*
* Probe whether cpu has config register by trying to play with
* alternate cache bit and see whether it matters.
@@ -118,6 +142,22 @@
}
/*
+ * Set and Get the FPU CSR31.
+ */
+static inline unsigned long cpu_test_fpu_csr31(unsigned long fcr31)
+{
+ unsigned long tmp;
+
+ tmp = read_c0_status();
+ __enable_fpu();
+ write_32bit_cp1_register(CP1_STATUS,fcr31);
+ enable_fpu_hazard();
+ fcr31 = read_32bit_cp1_register(CP1_STATUS);
+ write_c0_status(tmp);
+ return fcr31;
+}
+
+/*
* Check the CPU has an FPU the official way.
*/
static inline int __cpu_has_fpu(void)
@@ -125,6 +165,21 @@
return ((cpu_get_fpu_id() & 0xff00) != FPIR_IMP_NONE);
}
+static inline unsigned long cpu_get_msa_id(void)
+{
+ unsigned long status, msa_id;
+
+ status = read_c0_status();
+ set_c0_status(ST0_CU1|ST0_FR);
+ enable_fpu_hazard();
+ clear_c0_config5(MIPS_CONF5_FRE);
+ enable_msa();
+ msa_id = read_msa_ir();
+ disable_msa();
+ write_c0_status(status);
+ return msa_id;
+}
+
static inline void cpu_probe_vmbits(struct cpuinfo_mips *c)
{
#ifdef __NEED_VMBITS_PROBE
@@ -134,6 +189,17 @@
#endif
}
+static inline void cpu_probe_pabits(struct cpuinfo_mips *c)
+{
+ unsigned int enlow;
+
+ write_c0_entrylo0((-1UL)>>2); /* skip RIXI bits */
+ back_to_back_c0_hazard();
+ enlow = read_c0_entrylo0();
+ if ((_MIPS_SZLONG - fls(enlow)) < ilog2(_PAGE_GLOBAL))
+ c->options2 |= MIPS_CPU_HIMEM;
+}
+
static void __cpuinit set_isa(struct cpuinfo_mips *c, unsigned int isa)
{
switch (isa) {
@@ -162,6 +228,54 @@
}
}
+static void report_kernel(void)
+{
+ printk("Kernel:");
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ printk(" EB");
+#endif
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+ printk(" EL");
+#endif
+#ifdef CONFIG_64BIT
+ printk(" 64bit, 64bit address");
+#ifdef CONFIG_48VMBITS
+ printk(", 48 segbits");
+#else
+ printk(", 40 segbits");
+#endif
+#elif defined(CONFIG_CPU_MIPS64)
+ printk(" 64bit, 32bit address");
+#else
+ printk(" 32bit");
+#endif
+#ifdef CONFIG_CPU_MIPSR6
+ printk(", R6");
+#endif
+#ifdef CONFIG_CPU_HAS_MSA
+ printk(", MSA");
+#endif
+#ifdef CONFIG_MIPS_HARDWARE_TRACE
+ printk(", HW trace");
+#endif
+#ifdef CONFIG_NO_HZ_IDLE
+ printk(", dynamic ticks");
+#else
+ printk(", periodic ticks");
+#endif
+ printk(" %dHz",CONFIG_HZ);
+#ifdef CONFIG_HIGH_RES_TIMERS
+ printk(", HiRes timers");
+#endif
+#ifdef CONFIG_DMA_COHERENT
+ printk(", coherent DMA");
+#endif
+#ifdef CONFIG_DMA_NONCOHERENT
+ printk(", non-coherent DMA");
+#endif
+ printk(", pagesize=%dKiB\n",(1 << (PAGE_SHIFT - 10)));
+}
+
static char unknown_isa[] __cpuinitdata = KERN_ERR \
"Unsupported ISA type, c0.config0: %d.";
@@ -174,6 +288,8 @@
if (((config0 & MIPS_CONF_MT) >> 7) == 1)
c->options |= MIPS_CPU_TLB;
+ if (((config0 & MIPS_CONF_MT) >> 7) == 4)
+ c->options |= MIPS_CPU_TLB;
isa = (config0 & MIPS_CONF_AT) >> 13;
switch (isa) {
case 0:
@@ -184,6 +300,9 @@
case 1:
set_isa(c, MIPS_CPU_ISA_M32R2);
break;
+ case 2:
+ c->isa_level = MIPS_CPU_ISA_M32R6;
+ break;
default:
goto unknown;
}
@@ -196,8 +315,9 @@
case 1:
set_isa(c, MIPS_CPU_ISA_M64R2);
break;
- default:
- goto unknown;
+ case 2:
+ c->isa_level = MIPS_CPU_ISA_M64R6;
+ break;
}
break;
default:
@@ -228,8 +348,11 @@
c->options |= MIPS_CPU_FPU;
c->options |= MIPS_CPU_32FPR;
}
- if (cpu_has_tlb)
+ if (cpu_has_tlb) {
c->tlbsize = ((config1 & MIPS_CONF1_TLBS) >> 25) + 1;
+ c->tlbsizevtlb = c->tlbsize;
+ c->tlbsizeftlbsets = 0;
+ }
return config1 & MIPS_CONF_M;
}
@@ -254,10 +377,14 @@
if (config3 & MIPS_CONF3_SM) {
c->ases |= MIPS_ASE_SMARTMIPS;
+#if defined(CONFIG_64BIT) || !defined(CONFIG_MIPS_HUGE_TLB_SUPPORT)
c->options |= MIPS_CPU_RIXI;
+#endif
}
+#if defined(CONFIG_64BIT) || !defined(CONFIG_MIPS_HUGE_TLB_SUPPORT)
if (config3 & MIPS_CONF3_RXI)
c->options |= MIPS_CPU_RIXI;
+#endif
if (config3 & MIPS_CONF3_DSP)
c->ases |= MIPS_ASE_DSP;
if (config3 & MIPS_CONF3_DSP2P)
@@ -277,28 +404,174 @@
#endif
if (config3 & MIPS_CONF3_VZ)
c->ases |= MIPS_ASE_VZ;
+ if (config3 & MIPS_CONF3_SC)
+ c->options |= MIPS_CPU_SEGMENTS;
+ /* Only tested on 32-bit cores */
+ if (config3 & MIPS_CONF3_PW && !config_enabled(CONFIG_MIPS_PGD_C0_CONTEXT))
+ c->options2 |= MIPS_CPU_HTW;
+ if (config3 & MIPS_CONF3_MSA)
+ c->ases |= MIPS_ASE_MSA;
return config3 & MIPS_CONF_M;
}
-static inline unsigned int decode_config4(struct cpuinfo_mips *c)
+static unsigned int cpu_capability = 0;
+
+static inline unsigned int decode_config4(struct cpuinfo_mips *c, int pass,
+ int conf6available)
{
unsigned int config4;
+ unsigned int newcf4;
+ unsigned int config6;
config4 = read_c0_config4();
- if ((config4 & MIPS_CONF4_MMUEXTDEF) == MIPS_CONF4_MMUEXTDEF_MMUSIZEEXT
- && cpu_has_tlb)
- c->tlbsize += (config4 & MIPS_CONF4_MMUSIZEEXT) * 0x40;
+ if (pass && cpu_has_tlb) {
+ if (config4 & MIPS_CONF4_IE) {
+ if (config4 & MIPS_CONF4_TLBINV) {
+ c->options |= MIPS_CPU_TLBINV;
+ printk("TLBINV/F supported, config4=0x%0x\n",config4);
+ if (config4 & MIPS_CONF4_TLBINV_FULL)
+ c->options |= MIPS_CPU_TLBINV_FULL;
+ }
+ }
+#ifdef CONFIG_CPU_MIPSR6
+ c->tlbsizevtlb = ((c->tlbsizevtlb - 1) |
+ (((config4 & MIPS_CONF4_VTLBSIZEEXT) >>
+ MIPS_CONF4_VTLBSIZEEXT_SHIFT) <<
+ MIPS_CONF1_TLBS_SIZE)) + 1;
+ c->tlbsize = c->tlbsizevtlb;
+
+ newcf4 = (config4 & ~MIPS_CONF4_FTLBPAGESIZE) |
+ ((((fls(PAGE_SIZE >> BASIC_PAGE_SHIFT)-1)/2)+1) <<
+ MIPS_CONF4_FTLBPAGESIZE_SHIFT);
+ write_c0_config4(newcf4);
+ back_to_back_c0_hazard();
+ config4 = read_c0_config4();
+ if (config4 != newcf4) {
+ printk(KERN_ERR "PAGE_SIZE 0x%0lx is not supported by FTLB (config4=0x%0x)\n",
+ PAGE_SIZE, config4);
+ if (conf6available && (cpu_capability & MIPS_FTLB_CAPABLE)) {
+ printk("Switching FTLB OFF\n");
+ config6 = read_c0_config6();
+ write_c0_config6(config6 & ~MIPS_CONF6_FTLBEN);
+ }
+ printk("Total TLB(VTLB) inuse: %d\n",c->tlbsizevtlb);
+ } else {
+ c->tlbsizeftlbsets = 1 <<
+ ((config4 & MIPS_CONF4_FTLBSETS) >>
+ MIPS_CONF4_FTLBSETS_SHIFT);
+ c->tlbsizeftlbways = ((config4 & MIPS_CONF4_FTLBWAYS) >>
+ MIPS_CONF4_FTLBWAYS_SHIFT) + 2;
+ c->tlbsize += (c->tlbsizeftlbways *
+ c->tlbsizeftlbsets);
+ printk("V/FTLB found: VTLB=%d, FTLB sets=%d, ways=%d total TLB=%d\n",
+ c->tlbsizevtlb, c->tlbsizeftlbsets, c->tlbsizeftlbways, c->tlbsize);
+ }
+#else
+ switch (config4 & MIPS_CONF4_MMUEXTDEF) {
+ case MIPS_CONF4_MMUEXTDEF_MMUSIZEEXT:
+ c->tlbsize =
+ ((((config4 & MIPS_CONF4_MMUSIZEEXT) >>
+ MIPS_CONF4_MMUSIZEEXT_SHIFT) <<
+ MIPS_CONF1_TLBS_SIZE) |
+ (c->tlbsize - 1)) + 1;
+ c->tlbsizevtlb = c->tlbsize;
+ printk("MMUSizeExt found, total TLB=%d\n",c->tlbsize);
+ break;
+ case MIPS_CONF4_MMUEXTDEF_VTLBSIZEEXT:
+ c->tlbsizevtlb = ((c->tlbsizevtlb - 1) |
+ (((config4 & MIPS_CONF4_VTLBSIZEEXT) >>
+ MIPS_CONF4_VTLBSIZEEXT_SHIFT) <<
+ MIPS_CONF1_TLBS_SIZE)) + 1;
+ c->tlbsize = c->tlbsizevtlb;
+ /* fall through */
+ case MIPS_CONF4_MMUEXTDEF_FTLBSIZEEXT:
+ newcf4 = (config4 & ~MIPS_CONF4_FTLBPAGESIZE) |
+ ((((fls(PAGE_SIZE >> BASIC_PAGE_SHIFT)-1)/2)+1) <<
+ MIPS_CONF4_FTLBPAGESIZE_SHIFT);
+ write_c0_config4(newcf4);
+ back_to_back_c0_hazard();
+ config4 = read_c0_config4();
+ if (config4 != newcf4) {
+ printk(KERN_ERR "PAGE_SIZE 0x%0lx is not supported by FTLB (config4=0x%0x)\n",
+ PAGE_SIZE, config4);
+ if (conf6available && (cpu_capability & MIPS_FTLB_CAPABLE)) {
+ printk("Switching FTLB OFF\n");
+ config6 = read_c0_config6();
+ write_c0_config6(config6 & ~MIPS_CONF6_FTLBEN);
+ }
+ printk("Total TLB(VTLB) inuse: %d\n",c->tlbsizevtlb);
+ break;
+ }
+ c->tlbsizeftlbsets = 1 <<
+ ((config4 & MIPS_CONF4_FTLBSETS) >>
+ MIPS_CONF4_FTLBSETS_SHIFT);
+ c->tlbsizeftlbways = ((config4 & MIPS_CONF4_FTLBWAYS) >>
+ MIPS_CONF4_FTLBWAYS_SHIFT) + 2;
+ c->tlbsize += (c->tlbsizeftlbways *
+ c->tlbsizeftlbsets);
+ printk("V/FTLB found: VTLB=%d, FTLB sets=%d, ways=%d total TLB=%d\n",
+ c->tlbsizevtlb, c->tlbsizeftlbsets, c->tlbsizeftlbways, c->tlbsize);
+ break;
+ }
+#endif
+ }
c->kscratch_mask = (config4 >> 16) & 0xff;
return config4 & MIPS_CONF_M;
}
-static void __cpuinit decode_configs(struct cpuinfo_mips *c)
+static inline unsigned int decode_config5(struct cpuinfo_mips *c)
{
- int ok;
+ unsigned int config5;
+
+ config5 = read_c0_config5();
+ config5 &= ~(MIPS_CONF5_UFR | MIPS_CONF5_UFE);
+ write_c0_config5(config5);
+
+ if (config5 & MIPS_CONF5_EVA)
+ c->options |= MIPS_CPU_EVA;
+ if (config5 & MIPS_CONF5_MRP)
+ c->options2 |= MIPS_CPU_MAAR;
+ if (config5 & MIPS_CONF5_L2C)
+ c->options2 |= MIPS_CPU_L2C;
+ if (config5 & MIPS_CONF5_VC)
+ c->options2 |= MIPS_CPU_VC;
+
+ return config5 & MIPS_CONF_M;
+}
+
+static inline unsigned int decode_config6_ftlb(struct cpuinfo_mips *c)
+{
+ unsigned int config6;
+
+ if (cpu_capability & MIPS_FTLB_CAPABLE) {
+
+ /*
+ * Can't rely on mips_ftlb_disabled since kernel command line
+ * hasn't been processed yet. Need to peek at the raw command
+ * line for "noftlb".
+ */
+ if (strstr(arcs_cmdline, "noftlb") == NULL) {
+ config6 = read_c0_config6();
+
+ printk("Enable FTLB attempt\n");
+ write_c0_config6(config6 | MIPS_CONF6_FTLBEN);
+ back_to_back_c0_hazard();
+
+ return(1);
+ }
+ }
+
+ return(0);
+}
+
+
+static void decode_configs(struct cpuinfo_mips *c)
+{
+ int ok, ok3 = 0, ok6 = 0;
/* MIPS32 or MIPS64 compliant CPU. */
c->options = MIPS_CPU_4KEX | MIPS_CPU_4K_CACHE | MIPS_CPU_COUNTER |
@@ -307,20 +580,36 @@
c->scache.flags = MIPS_CACHE_NOT_PRESENT;
ok = decode_config0(c); /* Read Config registers. */
- BUG_ON(!ok); /* Arch spec violation! */
+ BUG_ON(!ok); /* Arch spec violation! */
if (ok)
ok = decode_config1(c);
if (ok)
ok = decode_config2(c);
if (ok)
- ok = decode_config3(c);
+ ok = ok3 = decode_config3(c);
if (ok)
- ok = decode_config4(c);
+ ok = decode_config4(c,0,0); /* first pass - just return Mbit */
+ if (ok)
+ ok = decode_config5(c);
+ if (cpu_capability & MIPS_FTLB_CAPABLE)
+ ok6 = decode_config6_ftlb(c);
+
+ if (ok3)
+ ok = decode_config4(c,1,ok6); /* real parse pass, thanks HW team :-/ */
mips_probe_watch_registers(c);
- if (cpu_has_mips_r2)
+ if (cpu_has_mips_r2 || cpu_has_mips_r6)
c->core = read_c0_ebase() & 0x3ff;
+
+ if (cpu_has_rixi) {
+ write_c0_pagegrain(read_c0_pagegrain() | PG_IEC);
+ back_to_back_c0_hazard();
+ if (read_c0_pagegrain() & PG_IEC) {
+ c->options |= MIPS_CPU_RIXI_EXCEPT;
+ pr_info("TLBRI/TLBXI exceptions are used\n");
+ }
+ }
}
#define R4K_OPTS (MIPS_CPU_TLB | MIPS_CPU_4KEX | MIPS_CPU_4K_CACHE \
@@ -648,8 +937,11 @@
static inline void cpu_probe_mips(struct cpuinfo_mips *c, unsigned int cpu)
{
- decode_configs(c);
switch (c->processor_id & 0xff00) {
+ case PRID_IMP_QEMU:
+ c->cputype = CPU_QEMU;
+ __cpu_name[cpu] = "MIPS GENERIC QEMU";
+ break;
case PRID_IMP_4KC:
c->cputype = CPU_4KC;
__cpu_name[cpu] = "MIPS 4Kc";
@@ -712,7 +1004,39 @@
c->cputype = CPU_74K;
__cpu_name[cpu] = "MIPS 1074Kc";
break;
+ case PRID_IMP_PROAPTIV_UP:
+ c->cputype = CPU_PROAPTIV;
+ __cpu_name[cpu] = "MIPS proAptiv";
+ cpu_capability = MIPS_FTLB_CAPABLE;
+ break;
+ case PRID_IMP_PROAPTIV_MP:
+ c->cputype = CPU_PROAPTIV;
+ __cpu_name[cpu] = "MIPS proAptiv (multi)";
+ cpu_capability = MIPS_FTLB_CAPABLE;
+ break;
+ case PRID_IMP_INTERAPTIV_UP:
+ c->cputype = CPU_INTERAPTIV;
+ __cpu_name[cpu] = "MIPS interAptiv UP";
+ break;
+ case PRID_IMP_INTERAPTIV_MP:
+ c->cputype = CPU_INTERAPTIV;
+ __cpu_name[cpu] = "MIPS interAptiv";
+ case PRID_IMP_VIRTUOSO:
+ c->cputype = CPU_VIRTUOSO;
+ __cpu_name[cpu] = "MIPS Virtuoso";
+ break;
+ case PRID_IMP_P5600:
+ c->cputype = CPU_P5600;
+ __cpu_name[cpu] = "MIPS P5600";
+ cpu_capability = MIPS_FTLB_CAPABLE;
+ break;
+ case PRID_IMP_SAMURAI_UP:
+ c->cputype = CPU_SAMURAI;
+ __cpu_name[cpu] = "MIPS Samurai UP";
+ break;
}
+ report_kernel();
+ decode_configs(c);
spram_config();
}
@@ -969,6 +1293,9 @@
const char *__cpu_name[NR_CPUS];
const char *__elf_platform;
+unsigned int fpu_fcr31 __read_mostly = 0;
+unsigned int system_has_fpu __read_mostly = 0;
+unsigned int global_fpu_id __read_mostly = 0;
__cpuinit void cpu_probe(void)
{
@@ -1029,17 +1356,34 @@
if (mips_dsp_disabled)
c->ases &= ~(MIPS_ASE_DSP | MIPS_ASE_DSP2P);
- if (c->options & MIPS_CPU_FPU) {
- c->fpu_id = cpu_get_fpu_id();
+ c->htw_level = 0;
+ if (c->options2 & MIPS_CPU_HTW)
+ write_c0_pwctl(HTW_PWCTL_BASE);
+ if (mips_htw_disabled)
+ c->options2 &= ~MIPS_CPU_HTW;
- if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 |
- MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2)) {
+ if (c->options & MIPS_CPU_FPU) {
+ system_has_fpu = 1;
+ fpu_fcr31 = cpu_test_fpu_csr31(FPU_CSR_DEFAULT);
+
+ global_fpu_id = cpu_get_fpu_id();
+ c->fpu_id = global_fpu_id;
+
+ if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M64R1 |
+ MIPS_CPU_ISA_M32R2 | MIPS_CPU_ISA_M64R2 |
+ MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R6)) {
if (c->fpu_id & MIPS_FPIR_3D)
c->ases |= MIPS_ASE_MIPS3D;
+ if (c->fpu_id & MIPS_FPIR_HAS2008)
+ fpu_fcr31 = cpu_test_fpu_csr31(FPU_CSR_DEFAULT|FPU_CSR_MAC2008|FPU_CSR_ABS2008|FPU_CSR_NAN2008);
+ if (c->fpu_id & MIPS_FPIR_FREP) {
+ c->options2 |= MIPS_CPU_FRE;
+ pr_info("FPU has FRE support\n");
+ }
}
}
- if (cpu_has_mips_r2) {
+ if (cpu_has_mips_r2 || cpu_has_mips_r6) {
c->srsets = ((read_c0_srsctl() >> 26) & 0x0f) + 1;
/* R2 has Performance Counter Interrupt indicator */
c->options |= MIPS_CPU_PCI;
@@ -1047,7 +1391,15 @@
else
c->srsets = 1;
+ if (cpu_has_msa) {
+ c->msa_id = cpu_get_msa_id();
+ WARN(c->msa_id & MSA_IR_WRPF,
+ "Vector register partitioning unimplemented!");
+ elf_hwcap |= HWCAP_MIPS_MSA;
+ }
+
cpu_probe_vmbits(c);
+ cpu_probe_pabits(c);
#ifdef CONFIG_64BIT
if (cpu == 0)
@@ -1059,8 +1411,10 @@
{
struct cpuinfo_mips *c = ¤t_cpu_data;
- printk(KERN_INFO "CPU revision is: %08x (%s)\n",
- c->processor_id, cpu_name_string());
+ printk(KERN_INFO "CPU%d revision is: %08x (%s)\n",
+ smp_processor_id(), c->processor_id, cpu_name_string());
if (c->options & MIPS_CPU_FPU)
printk(KERN_INFO "FPU revision is: %08x\n", c->fpu_id);
+ if (cpu_has_msa)
+ pr_info("MSA revision is: %08x\n", c->msa_id);
}
diff --git a/arch/mips/kernel/elf.c b/arch/mips/kernel/elf.c
new file mode 100644
index 0000000..ec947e6
--- /dev/null
+++ b/arch/mips/kernel/elf.c
@@ -0,0 +1,210 @@
+/*
+ * Copyright (C) 2014 Imagination Technologies
+ * Author: Paul Burton <paul.burton@imgtec.com>
+ * Edited by: Leonid Yegoshin <Leonid.Yegoshin@imgtec.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/elf.h>
+#include <linux/sched.h>
+
+/* ELF Binary and interpreter FPU ABI matching matrix.
+ * Line format: { ANY, DOUBLE, SINGLE, SOFT,
+ * OLD_64, XX, 64, 64A }
+ *
+ * Note: SOFT + SMTHNG is set to SMTHNG because today there is NO inter calls
+ * between ELF binary and interpreter with float point arguments.
+ * It should be FP_ERROR in other case.
+ * It is not applied to NXX because of total incompatibility of O32 and N32/N64
+ */
+static int mips_abi_fp_mix[MIPS_ABI_FP_MAX][MIPS_ABI_FP_MAX] = {
+/* ANY */
+{ MIPS_ABI_FP_ANY, MIPS_ABI_FP_DOUBLE, MIPS_ABI_FP_SINGLE, MIPS_ABI_FP_SOFT,
+ MIPS_ABI_FP_OLD_64, MIPS_ABI_FP_XX, MIPS_ABI_FP_64, MIPS_ABI_FP_64A },
+/* DOUBLE */
+{ MIPS_ABI_FP_DOUBLE, MIPS_ABI_FP_DOUBLE, FP_ERROR, MIPS_ABI_FP_DOUBLE,
+ FP_ERROR, MIPS_ABI_FP_DOUBLE, FP_ERROR, FP_DOUBLE_64A },
+/* SINGLE */
+{ MIPS_ABI_FP_SINGLE, FP_ERROR, MIPS_ABI_FP_SINGLE, MIPS_ABI_FP_SINGLE,
+ FP_ERROR, FP_ERROR, FP_ERROR, FP_ERROR },
+/* SOFT */
+{ MIPS_ABI_FP_SOFT, MIPS_ABI_FP_DOUBLE, MIPS_ABI_FP_SINGLE, MIPS_ABI_FP_SOFT,
+ MIPS_ABI_FP_OLD_64, MIPS_ABI_FP_XX, MIPS_ABI_FP_64, MIPS_ABI_FP_64A },
+
+/* OLD_64 */
+{ MIPS_ABI_FP_OLD_64, FP_ERROR, FP_ERROR, MIPS_ABI_FP_OLD_64,
+ MIPS_ABI_FP_OLD_64, FP_ERROR, FP_ERROR, FP_ERROR },
+/* XX */
+{ MIPS_ABI_FP_XX, MIPS_ABI_FP_DOUBLE, FP_ERROR, MIPS_ABI_FP_XX,
+ FP_ERROR, MIPS_ABI_FP_XX, MIPS_ABI_FP_64, MIPS_ABI_FP_64A },
+/* 64 */
+{ MIPS_ABI_FP_64, FP_ERROR, FP_ERROR, MIPS_ABI_FP_64,
+ FP_ERROR, MIPS_ABI_FP_64, MIPS_ABI_FP_64, MIPS_ABI_FP_64 },
+/* 64A */
+{ MIPS_ABI_FP_64A, FP_DOUBLE_64A, FP_ERROR, MIPS_ABI_FP_64A,
+ FP_ERROR, MIPS_ABI_FP_64A, MIPS_ABI_FP_64, MIPS_ABI_FP_64A }
+};
+
+/* ELF Binary and interpreter FPU ABI matching matrix with UNKNOWN MIPS32 FPU
+ * It is assumed that it is O32 DOUBLE FPU.
+ * Line format: { ANY, DOUBLE, SINGLE, SOFT,
+ * OLD_64, XX, 64, 64A }
+ */
+static int mips_abi_fp_unknown[MIPS_ABI_FP_MAX] =
+/* UNKNOWN */
+{ MIPS_ABI_FP_UNKNOWN, MIPS_ABI_FP_DOUBLE, FP_ERROR, MIPS_ABI_FP_UNKNOWN,
+ FP_ERROR, MIPS_ABI_FP_XX, FP_ERROR, FP_DOUBLE_64A };
+
+int arch_elf_pt_proc(void *_ehdr, void *_phdr, struct file *elf,
+ bool is_interp, struct arch_elf_state *state)
+{
+ struct elf32_hdr *ehdr = _ehdr;
+ struct elf32_phdr *phdr = _phdr;
+ struct mips_elf_abiflags_v0 abiflags;
+ int ret;
+
+ if ((config_enabled(CONFIG_64BIT) &&
+ (ehdr->e_ident[EI_CLASS] != ELFCLASS32)) ||
+ ((ehdr->e_ident[EI_CLASS] == ELFCLASS32) &&
+ ((ehdr->e_flags & EF_MIPS_ABI2) != 0) &&
+ ((ehdr->e_flags & EF_MIPS_ABI) == 0)))
+ return 0;
+
+ if (phdr->p_type != PT_MIPS_ABIFLAGS)
+ return 0;
+ if (phdr->p_filesz < sizeof(abiflags))
+ return -EINVAL;
+
+ ret = kernel_read(elf, phdr->p_offset, (char *)&abiflags,
+ sizeof(abiflags));
+ if (ret < 0)
+ return ret;
+ if (ret != sizeof(abiflags))
+ return -EIO;
+
+ /* Record the required FP ABIs for use by mips_check_elf */
+ if (is_interp)
+ state->interp_fp_abi = abiflags.fp_abi;
+ else
+ state->fp_abi = abiflags.fp_abi;
+
+ return 0;
+}
+
+static inline unsigned get_fp_abi(struct elf32_hdr *ehdr, int in_abi)
+{
+ if ((in_abi >= MIPS_ABI_FP_MAX) &&
+ (in_abi != MIPS_ABI_FP_NXX) &&
+ (in_abi != MIPS_ABI_FP_UNKNOWN))
+ return FP_ERROR;
+
+ /* If the ABI requirement is provided, simply return that */
+ if (in_abi != MIPS_ABI_FP_UNKNOWN)
+ return in_abi;
+
+ if ((config_enabled(CONFIG_64BIT) &&
+ (ehdr->e_ident[EI_CLASS] != ELFCLASS32)) ||
+ ((ehdr->e_ident[EI_CLASS] == ELFCLASS32) &&
+ ((ehdr->e_flags & EF_MIPS_ABI2) != 0) &&
+ ((ehdr->e_flags & EF_MIPS_ABI) == 0))) {
+ /* Set NXX FP ABI for N32/N64 */
+ return MIPS_ABI_FP_NXX;
+ }
+
+ /* If the EF_MIPS_32BITMODE_FP64 flag was set, return MIPS_ABI_FP_OLD_64 */
+ if ((ehdr->e_ident[EI_CLASS] == ELFCLASS32) &&
+ (ehdr->e_flags & EF_MIPS_32BITMODE_FP64))
+ return MIPS_ABI_FP_OLD_64;
+
+ return MIPS_ABI_FP_UNKNOWN;
+}
+
+int arch_check_elf(void *_ehdr, bool has_interpreter,
+ struct arch_elf_state *state)
+{
+ struct elf32_hdr *ehdr = _ehdr;
+ unsigned fp_abi, interp_fp_abi;
+
+ fp_abi = get_fp_abi(ehdr, state->fp_abi);
+ if (fp_abi == FP_ERROR)
+ return -ELIBBAD;
+
+ if (!has_interpreter) {
+ state->overall_abi = fp_abi;
+ return 0;
+ }
+
+ interp_fp_abi = get_fp_abi(ehdr, state->interp_fp_abi);
+ if (fp_abi == FP_ERROR)
+ return -ELIBBAD;
+
+ if (interp_fp_abi == MIPS_ABI_FP_NXX) {
+ if (fp_abi == MIPS_ABI_FP_NXX) {
+ state->overall_abi = MIPS_ABI_FP_NXX;
+ return 0;
+ }
+ return -ELIBBAD;
+ }
+
+ if ((interp_fp_abi == MIPS_ABI_FP_UNKNOWN) &&
+ (fp_abi == MIPS_ABI_FP_UNKNOWN)) {
+ state->overall_abi = MIPS_ABI_FP_UNKNOWN;
+ return 0;
+ }
+ if (interp_fp_abi == MIPS_ABI_FP_UNKNOWN)
+ state->overall_abi = mips_abi_fp_unknown[fp_abi];
+ else if (fp_abi == MIPS_ABI_FP_UNKNOWN)
+ state->overall_abi = mips_abi_fp_unknown[interp_fp_abi];
+ else
+ state->overall_abi = mips_abi_fp_mix[fp_abi][interp_fp_abi];
+
+ if (state->overall_abi == FP_ERROR)
+ return -ELIBBAD;
+
+ return 0;
+}
+
+void mips_set_personality_fp(struct arch_elf_state *state)
+{
+ switch (state->overall_abi) {
+ case MIPS_ABI_FP_UNKNOWN:
+ case MIPS_ABI_FP_DOUBLE:
+ case MIPS_ABI_FP_SINGLE:
+ /* FR=0 */
+ clear_thread_local_flags(LTIF_FPU_FR|LTIF_FPU_FRE);
+ break;
+
+ case FP_DOUBLE_64A:
+ /* Android elf bins mix, FR=1 and FRE=1 are enforced */
+ set_thread_local_flags(LTIF_FPU_FR|LTIF_FPU_FRE);
+ break;
+
+ case MIPS_ABI_FP_NXX:
+ case MIPS_ABI_FP_64:
+ case MIPS_ABI_FP_64A:
+ case MIPS_ABI_FP_OLD_64:
+ /* FR=1 */
+ set_thread_local_flags(LTIF_FPU_FR);
+ clear_thread_local_flags(LTIF_FPU_FRE);
+ break;
+
+ case MIPS_ABI_FP_XX:
+ case MIPS_ABI_FP_ANY:
+ case MIPS_ABI_FP_SOFT:
+ if (config_enabled(CONFIG_CPU_MIPSR6))
+ set_thread_local_flags(LTIF_FPU_FR);
+ else
+ clear_thread_local_flags(LTIF_FPU_FR);
+
+ clear_thread_local_flags(LTIF_FPU_FRE);
+ break;
+
+ default:
+ case FP_ERROR:
+ BUG();
+ }
+}
diff --git a/arch/mips/kernel/entry.S b/arch/mips/kernel/entry.S
index e578685..3ca5fa3 100644
--- a/arch/mips/kernel/entry.S
+++ b/arch/mips/kernel/entry.S
@@ -129,6 +129,20 @@
RESTORE_AT
RESTORE_STATIC
restore_partial: # restore partial frame
+ LONG_L v0, PT_STATUS(sp) # returning to kernel mode?
+ andi v0, v0, KU_USER
+ beqz v0, 3f
+ LONG_L v0, TI_FLAGS($28)
+ and v0, _TIF_FPU_LOSE_REQUEST
+ beqz v0, 3f
+ SAVE_STATIC
+ SAVE_AT
+ SAVE_TEMP
+ jal mips_lose_fpu
+ RESTORE_TEMP
+ RESTORE_AT
+ RESTORE_STATIC
+3:
#ifdef CONFIG_TRACE_IRQFLAGS
SAVE_STATIC
SAVE_AT
@@ -196,17 +210,28 @@
jal syscall_trace_leave
b resume_userspace
-#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_MIPS_MT)
+#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) || defined(CONFIG_MIPS_MT)
/*
* MIPS32R2 Instruction Hazard Barrier - must be called
*
* For C code use the inline version named instruction_hazard().
*/
+#ifdef CONFIG_EVA
+ .align 8
+#endif
LEAF(mips_ihb)
- .set mips32r2
+#ifdef CONFIG_CPU_MIPSR6
+ .set mips64r6
+#else
+ .set mips32r2
+#endif
jr.hb ra
nop
+ .set mips0
+#ifdef CONFIG_EVA
+ .align 8
+#endif
END(mips_ihb)
-#endif /* CONFIG_CPU_MIPSR2 or CONFIG_MIPS_MT */
+#endif /* CONFIG_CPU_MIPSR2 or CONFIG_CPU_MIPSR6 or CONFIG_MIPS_MT */
diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c
index dba90ec..d635ba2 100644
--- a/arch/mips/kernel/ftrace.c
+++ b/arch/mips/kernel/ftrace.c
@@ -87,6 +87,7 @@
static int ftrace_modify_code(unsigned long ip, unsigned int new_code)
{
int faulted;
+ mm_segment_t old_fs;
/* *(unsigned int *)ip = new_code; */
safe_store_code(new_code, ip, faulted);
@@ -94,7 +95,10 @@
if (unlikely(faulted))
return -EFAULT;
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
flush_icache_range(ip, ip + 8);
+ set_fs(old_fs);
return 0;
}
diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S
index 31fa856..22153da 100644
--- a/arch/mips/kernel/genex.S
+++ b/arch/mips/kernel/genex.S
@@ -67,8 +67,8 @@
*/
NESTED(except_vec3_r4000, 0, sp)
.set push
- .set mips3
- .set noat
+ .set mips3
+ .set noat
mfc0 k1, CP0_CAUSE
li k0, 31<<2
andi k1, k1, 0x7c
@@ -139,12 +139,16 @@
nop
nop
#endif
+#ifdef CONFIG_CPU_MIPSR6
+ .set mips64r6
+#else
.set mips3
+#endif
wait
/* end of rollback region (the region size must be power of two) */
1:
jr ra
- nop
+ nop
.set pop
END(__r4k_wait)
@@ -190,9 +194,12 @@
#else
and k0, ST0_IE
bnez k0, 1f
-
+#ifdef CONFIG_CPU_MIPSR6
+ eretnc
+#else
eret
#endif
+#endif
1:
.set pop
#endif
@@ -374,12 +381,33 @@
NESTED(nmi_handler, PT_SIZE, sp)
.set push
.set noat
+ /* Clear ERL - restore segment mapping */
+ mfc0 k0, CP0_STATUS
+ ori k0, k0, ST0_EXL
+ lui k1, 0xffff & ~(ST0_BEV>>16)
+ ori k1, k1, 0xffff & ~(ST0_ERL)
+ and k0, k0, k1
+ mtc0 k0, CP0_STATUS
+ ehb
SAVE_ALL
move a0, sp
jal nmi_exception_handler
RESTORE_ALL
+ /* Set ERL and clear EXL|NMI */
+ mfc0 k0, CP0_STATUS
+ ori k0, k0, ST0_ERL
+ lui k1, 0xffff & ~(ST0_NMI>>16)
+ ori k1, k1, 0xffff & ~(ST0_EXL)
+ and k0, k0, k1
+ mtc0 k0, CP0_STATUS
+ ehb
+#ifdef CONFIG_CPU_MIPSR6
+ .set mips64r6
+ eretnc
+#else
.set mips3
eret
+#endif
.set pop
END(nmi_handler)
@@ -400,6 +428,7 @@
.set push
/* gas fails to assemble cfc1 for some archs (octeon).*/ \
.set mips1
+ SET_HARDFLOAT
cfc1 a1, fcr31
li a2, ~(0x3f << 12)
and a2, a1
@@ -409,6 +438,15 @@
STI
.endm
+ .macro __build_clear_msa_fpe
+ _cfcmsa a1, MSA_CSR
+ li a2, ~(0x3f << 12)
+ and a1, a1, a2
+ _ctcmsa MSA_CSR, a1
+ TRACE_IRQS_ON
+ STI
+ .endm
+
.macro __build_clear_ade
MFC0 t0, CP0_BADVADDR
PTR_S t0, PT_BVADDR(sp)
@@ -467,7 +505,10 @@
BUILD_HANDLER cpu cpu sti silent /* #11 */
BUILD_HANDLER ov ov sti silent /* #12 */
BUILD_HANDLER tr tr sti silent /* #13 */
+ BUILD_HANDLER msa_fpe msa_fpe msa_fpe silent /* #14 */
BUILD_HANDLER fpe fpe fpe silent /* #15 */
+ BUILD_HANDLER ftlb ftlb none silent /* #16 */
+ BUILD_HANDLER msa msa sti silent /* #21 */
BUILD_HANDLER mdmx mdmx sti silent /* #22 */
#ifdef CONFIG_HARDWARE_WATCHPOINTS
/*
@@ -566,14 +607,19 @@
ori k1, _THREAD_MASK
xori k1, _THREAD_MASK
LONG_L v1, TI_TP_VALUE(k1)
+#ifdef CONFIG_CPU_MIPSR6
+ .set mips64r6
+ eretnc
+#else
.set mips3
eret
+#endif
.set mips0
#endif
.set pop
END(handle_ri_rdhwr)
-#ifdef CONFIG_64BIT
+#if defined(CONFIG_64BIT) && !defined(CONFIG_CPU_MIPSR6)
/* A temporary overflow handler used by check_daddi(). */
__INIT
diff --git a/arch/mips/kernel/head.S b/arch/mips/kernel/head.S
index c61cdae..aa98896 100644
--- a/arch/mips/kernel/head.S
+++ b/arch/mips/kernel/head.S
@@ -142,9 +142,13 @@
__REF
+#ifdef CONFIG_EVA
+ .align 8
+#endif
+
NESTED(kernel_entry, 16, sp) # kernel entry point
- kernel_entry_setup # cpu specific setup
+ kernel_entry_setup # cpu specific setup
setup_c0_status_pri
@@ -152,6 +156,9 @@
so we jump there. */
PTR_LA t0, 0f
jr t0
+#ifdef CONFIG_EVA
+ .align 8
+#endif
0:
#ifdef CONFIG_MIPS_MT_SMTC
@@ -172,6 +179,23 @@
mtc0 t0, CP0_STATUS
#endif /* CONFIG_MIPS_MT_SMTC */
+#ifdef CONFIG_MIPS_APPENDED_DTB
+ PTR_LA t0, __appended_dtb
+ PTR_LI t3, 0
+
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ PTR_LI t1, 0xd00dfeed
+#else
+ PTR_LI t1, 0xedfe0dd0
+#endif
+ LONG_L t2, (t0)
+ bne t1, t2, not_found
+
+ PTR_LA t3, __appended_dtb
+
+not_found:
+#endif
+
PTR_LA t0, __bss_start # clear .bss
LONG_S zero, (t0)
PTR_LA t1, __bss_stop - LONGSIZE
@@ -185,6 +209,10 @@
LONG_S a2, fw_arg2
LONG_S a3, fw_arg3
+#ifdef CONFIG_MIPS_APPENDED_DTB
+ LONG_S t3, initial_boot_params
+#endif
+
MTC0 zero, CP0_CONTEXT # clear context register
PTR_LA $28, init_thread_union
/* Set the SP after an empty pt_regs. */
@@ -204,7 +232,13 @@
* SMP slave cpus entry point. Board specific code for bootstrap calls this
* function after setting up the stack and gp registers.
*/
+
+#ifdef CONFIG_EVA
+ .align 8
+#endif
+
NESTED(smp_bootstrap, 16, sp)
+
#ifdef CONFIG_MIPS_MT_SMTC
/*
* Read-modify-writes of Status must be atomic, and this
@@ -216,8 +250,10 @@
DMT 10 # dmt t2 /* t0, t1 are used by CLI and setup_c0_status() */
jal mips_ihb
#endif /* CONFIG_MIPS_MT_SMTC */
- setup_c0_status_sec
+
smp_slave_setup
+ setup_c0_status_sec
+
#ifdef CONFIG_MIPS_MT_SMTC
andi t2, t2, VPECONTROL_TE
beqz t2, 2f
@@ -225,6 +261,9 @@
2:
#endif /* CONFIG_MIPS_MT_SMTC */
j start_secondary
+#ifdef CONFIG_EVA
+ .align 8
+#endif
END(smp_bootstrap)
#endif /* CONFIG_SMP */
diff --git a/arch/mips/kernel/idle.c b/arch/mips/kernel/idle.c
index 0c655de..5b11d9e 100644
--- a/arch/mips/kernel/idle.c
+++ b/arch/mips/kernel/idle.c
@@ -182,6 +182,10 @@
case CPU_24K:
case CPU_34K:
case CPU_1004K:
+ case CPU_PROAPTIV:
+ case CPU_INTERAPTIV:
+ case CPU_VIRTUOSO:
+ case CPU_P5600:
cpu_wait = r4k_wait;
if (read_c0_config7() & MIPS_CONF7_WII)
cpu_wait = r4k_wait_irqoff;
@@ -194,6 +198,7 @@
break;
case CPU_TX49XX:
+ case CPU_SAMURAI:
cpu_wait = r4k_wait_irqoff;
break;
case CPU_ALCHEMY:
diff --git a/arch/mips/kernel/irq-gic.c b/arch/mips/kernel/irq-gic.c
index c01b307..33b11aa 100644
--- a/arch/mips/kernel/irq-gic.c
+++ b/arch/mips/kernel/irq-gic.c
@@ -12,6 +12,9 @@
#include <linux/irq.h>
#include <linux/clocksource.h>
+#include <linux/cpu.h>
+#include <linux/slab.h>
+
#include <asm/io.h>
#include <asm/gic.h>
#include <asm/setup.h>
@@ -19,6 +22,7 @@
#include <asm/gcmpregs.h>
#include <linux/hardirq.h>
#include <asm-generic/bitops/find.h>
+#include <asm/irq_cpu.h>
unsigned int gic_frequency;
unsigned int gic_present;
@@ -104,12 +108,11 @@
spurious_interrupt();
}
-static void __init vpe_local_setup(unsigned int numvpes)
+static void __init vpe_local_setup(void)
{
unsigned long timer_intr = GIC_INT_TMR;
unsigned long perf_intr = GIC_INT_PERFCTR;
unsigned int vpe_ctl;
- int i;
if (cpu_has_veic) {
/*
@@ -126,29 +129,33 @@
/*
* Setup the default performance counter timer interrupts
- * for all VPEs
*/
- for (i = 0; i < numvpes; i++) {
- GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), i);
- /* Are Interrupts locally routable? */
- GICREAD(GIC_REG(VPE_OTHER, GIC_VPE_CTL), vpe_ctl);
- if (vpe_ctl & GIC_VPE_CTL_TIMER_RTBL_MSK)
- GICWRITE(GIC_REG(VPE_OTHER, GIC_VPE_TIMER_MAP),
- GIC_MAP_TO_PIN_MSK | timer_intr);
- if (cpu_has_veic) {
- set_vi_handler(timer_intr + GIC_PIN_TO_VEC_OFFSET,
- gic_eic_irq_dispatch);
- gic_shared_intr_map[timer_intr + GIC_PIN_TO_VEC_OFFSET].local_intr_mask |= GIC_VPE_RMASK_TIMER_MSK;
- }
+ /* Are Interrupts locally routable? */
+ GICREAD(GIC_REG(VPE_LOCAL, GIC_VPE_CTL), vpe_ctl);
+ if (vpe_ctl & GIC_VPE_CTL_TIMER_RTBL_MSK) {
+ if (cp0_compare_irq >= 2)
+ timer_intr = cp0_compare_irq - 2;
+ GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_TIMER_MAP),
+ GIC_MAP_TO_PIN_MSK | timer_intr);
+ mips_smp_c0_status_mask |= (0x400 << timer_intr);
+ }
+ if (cpu_has_veic) {
+ set_vi_handler(timer_intr + GIC_PIN_TO_VEC_OFFSET,
+ gic_eic_irq_dispatch);
+ gic_shared_intr_map[timer_intr + GIC_PIN_TO_VEC_OFFSET].local_intr_mask |= GIC_VPE_RMASK_TIMER_MSK;
+ }
- if (vpe_ctl & GIC_VPE_CTL_PERFCNT_RTBL_MSK)
- GICWRITE(GIC_REG(VPE_OTHER, GIC_VPE_PERFCTR_MAP),
- GIC_MAP_TO_PIN_MSK | perf_intr);
- if (cpu_has_veic) {
- set_vi_handler(perf_intr + GIC_PIN_TO_VEC_OFFSET, gic_eic_irq_dispatch);
- gic_shared_intr_map[perf_intr + GIC_PIN_TO_VEC_OFFSET].local_intr_mask |= GIC_VPE_RMASK_PERFCNT_MSK;
- }
+ if (vpe_ctl & GIC_VPE_CTL_PERFCNT_RTBL_MSK) {
+ if (cp0_perfcount_irq >= 2)
+ perf_intr = cp0_perfcount_irq - 2;
+ GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_PERFCTR_MAP),
+ GIC_MAP_TO_PIN_MSK | perf_intr);
+ mips_smp_c0_status_mask |= (0x400 << perf_intr);
+ }
+ if (cpu_has_veic) {
+ set_vi_handler(perf_intr + GIC_PIN_TO_VEC_OFFSET, gic_eic_irq_dispatch);
+ gic_shared_intr_map[perf_intr + GIC_PIN_TO_VEC_OFFSET].local_intr_mask |= GIC_VPE_RMASK_PERFCNT_MSK;
}
}
@@ -202,9 +209,10 @@
GIC_SET_INTR_MASK(d->irq - gic_irq_base);
}
-#ifdef CONFIG_SMP
static DEFINE_SPINLOCK(gic_lock);
+#ifdef CONFIG_SMP
+
static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask,
bool force)
{
@@ -219,16 +227,15 @@
/* Assumption : cpumask refers to a single CPU */
spin_lock_irqsave(&gic_lock, flags);
- for (;;) {
- /* Re-route this IRQ */
- GIC_SH_MAP_TO_VPE_SMASK(irq, first_cpu(tmp));
- /* Update the pcpu_masks */
- for (i = 0; i < NR_CPUS; i++)
- clear_bit(irq, pcpu_masks[i].pcpu_mask);
- set_bit(irq, pcpu_masks[first_cpu(tmp)].pcpu_mask);
+ /* Re-route this IRQ */
+ GIC_SH_MAP_TO_VPE_SMASK(irq, first_cpu(tmp));
- }
+ /* Update the pcpu_masks */
+ for (i = 0; i < NR_CPUS; i++)
+ clear_bit(irq, pcpu_masks[i].pcpu_mask);
+ set_bit(irq, pcpu_masks[first_cpu(tmp)].pcpu_mask);
+
cpumask_copy(d->affinity, cpumask);
spin_unlock_irqrestore(&gic_lock, flags);
@@ -248,11 +255,13 @@
#endif
};
-static void __init gic_setup_intr(unsigned int intr, unsigned int cpu,
+static void __init gic_setup_intr(unsigned int intr,
unsigned int pin, unsigned int polarity, unsigned int trigtype,
unsigned int flags)
{
struct gic_shared_intr_map *map_ptr;
+ unsigned int gic_vpe;
+ unsigned int cpu;
/* Setup Intr to Pin mapping */
if (pin & GIC_MAP_TO_NMI_MSK) {
@@ -267,7 +276,8 @@
GICWRITE(GIC_REG_ADDR(SHARED, GIC_SH_MAP_TO_PIN(intr)),
GIC_MAP_TO_PIN_MSK | pin);
/* Setup Intr to CPU mapping */
- GIC_SH_MAP_TO_VPE_SMASK(intr, cpu);
+ gic_vpe = GIC_ID(smp_processor_id());
+ GIC_SH_MAP_TO_VPE_SMASK(intr, gic_vpe);
if (cpu_has_veic) {
set_vi_handler(pin + GIC_PIN_TO_VEC_OFFSET,
gic_eic_irq_dispatch);
@@ -288,22 +298,58 @@
GIC_CLR_INTR_MASK(intr);
/* Initialise per-cpu Interrupt software masks */
if (flags & GIC_FLAG_IPI)
- set_bit(intr, pcpu_masks[cpu].pcpu_mask);
- if ((flags & GIC_FLAG_TRANSPARENT) && (cpu_has_veic == 0))
+ set_bit(intr, pcpu_masks[smp_processor_id()].pcpu_mask);
+ if (((flags & GIC_FLAG_TRANSPARENT) && (cpu_has_veic == 0)) ||
+ (flags & GIC_FLAG_IPI))
GIC_SET_INTR_MASK(intr);
if (trigtype == GIC_TRIG_EDGE)
gic_irq_flags[intr] |= GIC_TRIG_EDGE;
}
-static void __init gic_basic_init(int numintrs, int numvpes,
- struct gic_intr_map *intrmap, int mapsize)
+static int _gic_intr_map_size;
+static struct gic_intr_map *_gic_intr_map;
+
+void vpe_gic_setup(void)
{
unsigned int i, cpu;
unsigned int pin_offset = 0;
+ /*
+ * In EIC mode, the HW_INT# is offset by (2-1). Need to subtract
+ * one because the GIC will add one (since 0=no intr).
+ */
+ if (cpu_has_veic)
+ pin_offset = (GIC_CPU_TO_VEC_OFFSET - GIC_PIN_TO_VEC_OFFSET);
+
+ /* Setup specifics */
+ spin_lock(&gic_lock);
+ for (i = 0; i < _gic_intr_map_size; i++) {
+ cpu = _gic_intr_map[i].cpunum;
+ if (cpu == GIC_UNUSED)
+ continue;
+ if (cpu == 0 && i != 0 && _gic_intr_map[i].flags == 0)
+ continue;
+ if (cpu != smp_processor_id())
+ continue;
+ gic_setup_intr(i,
+ _gic_intr_map[i].pin + pin_offset,
+ _gic_intr_map[i].polarity,
+ _gic_intr_map[i].trigtype,
+ _gic_intr_map[i].flags);
+ }
+ spin_unlock(&gic_lock);
+
+ vpe_local_setup();
+}
+
+static void __init gic_basic_init(int numintrs,
+ struct gic_intr_map *intrmap, int mapsize)
+{
+ unsigned int i;
+
board_bind_eic_interrupt = &gic_bind_eic_interrupt;
- /* Setup defaults */
+ /* Setup defaults, no need to spin_lock because it is a boot */
for (i = 0; i < numintrs; i++) {
GIC_SET_POLARITY(i, GIC_POL_POS);
GIC_SET_TRIGGER(i, GIC_TRIG_LEVEL);
@@ -315,29 +361,9 @@
}
}
- /*
- * In EIC mode, the HW_INT# is offset by (2-1). Need to subtract
- * one because the GIC will add one (since 0=no intr).
- */
- if (cpu_has_veic)
- pin_offset = (GIC_CPU_TO_VEC_OFFSET - GIC_PIN_TO_VEC_OFFSET);
-
- /* Setup specifics */
- for (i = 0; i < mapsize; i++) {
- cpu = intrmap[i].cpunum;
- if (cpu == GIC_UNUSED)
- continue;
- if (cpu == 0 && i != 0 && intrmap[i].flags == 0)
- continue;
- gic_setup_intr(i,
- intrmap[i].cpunum,
- intrmap[i].pin + pin_offset,
- intrmap[i].polarity,
- intrmap[i].trigtype,
- intrmap[i].flags);
- }
-
- vpe_local_setup(numvpes);
+ _gic_intr_map_size = mapsize;
+ _gic_intr_map = intrmap;
+ vpe_gic_setup();
}
void __init gic_init(unsigned long gic_base_addr,
@@ -346,22 +372,294 @@
unsigned int irqbase)
{
unsigned int gicconfig;
- int numvpes, numintrs;
+ int numintrs;
_gic_base = (unsigned long) ioremap_nocache(gic_base_addr,
gic_addrspace_size);
gic_irq_base = irqbase;
GICREAD(GIC_REG(SHARED, GIC_SH_CONFIG), gicconfig);
+ gicconfig &= ~GIC_SH_CONFIG_COUNTSTOP_MSK;
+ GICWRITE(GIC_REG(SHARED, GIC_SH_CONFIG), gicconfig);
numintrs = (gicconfig & GIC_SH_CONFIG_NUMINTRS_MSK) >>
GIC_SH_CONFIG_NUMINTRS_SHF;
numintrs = ((numintrs + 1) * 8);
- numvpes = (gicconfig & GIC_SH_CONFIG_NUMVPES_MSK) >>
- GIC_SH_CONFIG_NUMVPES_SHF;
- numvpes = numvpes + 1;
-
- gic_basic_init(numintrs, numvpes, intr_map, intr_map_size);
+ gic_basic_init(numintrs, intr_map, intr_map_size);
gic_platform_init(numintrs, &gic_irq_controller);
}
+
+
+#ifdef CONFIG_SYSFS
+static ssize_t show_gic_global(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int n = 0;
+ int i,j;
+ int numints;
+
+ n = snprintf(buf, PAGE_SIZE,
+ "GIC Config Register\t\t%08x\n"
+ "GIC CounterLo\t\t\t%08x\n"
+ "GIC CounterHi\t\t\t%08x\n"
+ "GIC Revision\t\t\t%08x\n"
+
+ "Global Interrupt Polarity Registers:\t\t%08x %08x %08x %08x\n"
+ "\t\t\t\t\t\t%08x %08x %08x %08x\n"
+ "Global Interrupt Trigger Type Registers:\t%08x %08x %08x %08x\n"
+ "\t\t\t\t\t\t%08x %08x %08x %08x\n"
+ "Global Interrupt Dual Edge Registers:\t\t%08x %08x %08x %08x\n"
+ "\t\t\t\t\t\t%08x %08x %08x %08x\n"
+ "Global Interrupt Write Edge Register:\t\t%08x\n"
+ "Global Interrupt Reset Mask Registers:\t\t%08x %08x %08x %08x\n"
+ "\t\t\t\t\t\t%08x %08x %08x %08x\n"
+ "Global Interrupt Set Mask Registers:\t\t%08x %08x %08x %08x\n"
+ "\t\t\t\t\t\t%08x %08x %08x %08x\n"
+ "Global Interrupt Mask Registers:\t\t%08x %08x %08x %08x\n"
+ "\t\t\t\t\t\t%08x %08x %08x %08x\n"
+ "Global Interrupt Pending Registers:\t\t%08x %08x %08x %08x\n"
+ "\t\t\t\t\t\t%08x %08x %08x %08x\n"
+ ,
+ GIC_REG(SHARED,GIC_SH_CONFIG),
+ GIC_REG(SHARED,GIC_SH_COUNTER_31_00),
+ GIC_REG(SHARED,GIC_SH_COUNTER_63_32),
+ GIC_REG(SHARED,GIC_SH_REVISIONID),
+ GIC_REG(SHARED,GIC_SH_POL_31_0), GIC_REG(SHARED,GIC_SH_POL_63_32),
+ GIC_REG(SHARED,GIC_SH_POL_95_64), GIC_REG(SHARED,GIC_SH_POL_127_96),
+ GIC_REG(SHARED,GIC_SH_POL_159_128), GIC_REG(SHARED,GIC_SH_POL_191_160),
+ GIC_REG(SHARED,GIC_SH_POL_223_192), GIC_REG(SHARED,GIC_SH_POL_255_224),
+ GIC_REG(SHARED,GIC_SH_TRIG_31_0), GIC_REG(SHARED,GIC_SH_TRIG_63_32),
+ GIC_REG(SHARED,GIC_SH_TRIG_95_64), GIC_REG(SHARED,GIC_SH_TRIG_127_96),
+ GIC_REG(SHARED,GIC_SH_TRIG_159_128), GIC_REG(SHARED,GIC_SH_TRIG_191_160),
+ GIC_REG(SHARED,GIC_SH_TRIG_223_192), GIC_REG(SHARED,GIC_SH_TRIG_255_224),
+ GIC_REG(SHARED,GIC_SH_DUAL_31_0), GIC_REG(SHARED,GIC_SH_DUAL_63_32),
+ GIC_REG(SHARED,GIC_SH_DUAL_95_64), GIC_REG(SHARED,GIC_SH_DUAL_127_96),
+ GIC_REG(SHARED,GIC_SH_DUAL_159_128), GIC_REG(SHARED,GIC_SH_DUAL_191_160),
+ GIC_REG(SHARED,GIC_SH_DUAL_223_192), GIC_REG(SHARED,GIC_SH_DUAL_255_224),
+ GIC_REG(SHARED,GIC_SH_WEDGE),
+ GIC_REG(SHARED,GIC_SH_RMASK_31_0), GIC_REG(SHARED,GIC_SH_RMASK_63_32),
+ GIC_REG(SHARED,GIC_SH_RMASK_95_64), GIC_REG(SHARED,GIC_SH_RMASK_127_96),
+ GIC_REG(SHARED,GIC_SH_RMASK_159_128), GIC_REG(SHARED,GIC_SH_RMASK_191_160),
+ GIC_REG(SHARED,GIC_SH_RMASK_223_192), GIC_REG(SHARED,GIC_SH_RMASK_255_224),
+ GIC_REG(SHARED,GIC_SH_SMASK_31_0), GIC_REG(SHARED,GIC_SH_SMASK_63_32),
+ GIC_REG(SHARED,GIC_SH_SMASK_95_64), GIC_REG(SHARED,GIC_SH_SMASK_127_96),
+ GIC_REG(SHARED,GIC_SH_SMASK_159_128), GIC_REG(SHARED,GIC_SH_SMASK_191_160),
+ GIC_REG(SHARED,GIC_SH_SMASK_223_192), GIC_REG(SHARED,GIC_SH_SMASK_255_224),
+ GIC_REG(SHARED,GIC_SH_MASK_31_0), GIC_REG(SHARED,GIC_SH_MASK_63_32),
+ GIC_REG(SHARED,GIC_SH_MASK_95_64), GIC_REG(SHARED,GIC_SH_MASK_127_96),
+ GIC_REG(SHARED,GIC_SH_MASK_159_128), GIC_REG(SHARED,GIC_SH_MASK_191_160),
+ GIC_REG(SHARED,GIC_SH_MASK_223_192), GIC_REG(SHARED,GIC_SH_MASK_255_224),
+ GIC_REG(SHARED,GIC_SH_PEND_31_0), GIC_REG(SHARED,GIC_SH_PEND_63_32),
+ GIC_REG(SHARED,GIC_SH_PEND_95_64), GIC_REG(SHARED,GIC_SH_PEND_127_96),
+ GIC_REG(SHARED,GIC_SH_PEND_159_128), GIC_REG(SHARED,GIC_SH_PEND_191_160),
+ GIC_REG(SHARED,GIC_SH_PEND_223_192), GIC_REG(SHARED,GIC_SH_PEND_255_224)
+ );
+
+ numints = (GIC_REG(SHARED,GIC_SH_CONFIG) & GIC_SH_CONFIG_NUMINTRS_MSK) >> GIC_SH_CONFIG_NUMINTRS_SHF;
+ numints = (numints + 1) * 8;
+
+ n += snprintf(buf+n, PAGE_SIZE-n,
+ "\nGlobal Interrupt Map SrcX to Pin:\n");
+ for (i=0; i<numints; i++) {
+
+ if ((i % 8) == 0)
+ n += snprintf(buf+n, PAGE_SIZE-n, "%02x:\t",i);
+ n += snprintf(buf+n, PAGE_SIZE-n,
+ "%08x ",GIC_REG_ADDR(SHARED,GIC_SH_MAP_TO_PIN(i)));
+ if ((i % 8) == 7)
+ n += snprintf(buf+n, PAGE_SIZE-n, "\n");
+ };
+
+ n += snprintf(buf+n, PAGE_SIZE-n,
+ "\nGlobal Interrupt Map SrcX to VPE:\n");
+ for (i=0; i<numints; i++) {
+ if ((i % 4) == 0)
+ n += snprintf(buf+n, PAGE_SIZE-n, "%02x:\t",i);
+ for (j=0; j<2; j++) {
+ n += snprintf(buf+n, PAGE_SIZE-n,
+ "%08x ",GIC_REG_ADDR(SHARED,GIC_SH_INTR_MAP_TO_VPE_BASE_OFS + ((j * 4) + (i * 32))));
+ };
+ n += snprintf(buf+n, PAGE_SIZE-n, "\t");
+ if ((i % 4) == 3)
+ n += snprintf(buf+n, PAGE_SIZE-n, "\n");
+ };
+
+ if (gcmp3_present) {
+ n += snprintf(buf+n, PAGE_SIZE-n,
+ "EJTAG Break register\t\t\t%08x%08x\n"
+ "Debug Team ID low\t\t\t%08x%08x\n"
+ "Debug Team ID high\t\t\t%08x%08x\n"
+ "Debug Team ID for external VC\t\t%08x\n"
+ "GIC Debug Mode Config Register\t\t%08x\n"
+ "GIC Shared DINT Group Participate\t%08x%08x\n"
+ "GIC Shared Debug Mode Status\t\t%08x%08x\n"
+ ,
+ GIC_REGhi(SHARED,GIC_EJTAG), GIC_REG(SHARED,GIC_EJTAG),
+ GIC_REGhi(SHARED,GIC_DTLOW), GIC_REG(SHARED,GIC_DTLOW),
+ GIC_REGhi(SHARED,GIC_DTHI), GIC_REG(SHARED,GIC_DTHI),
+ GIC_REG(SHARED,GIC_DTEXT),
+ GIC_REG(SHARED,GIC_DMODECONF),
+ GIC_REGhi(SHARED,GIC_DINTGRP), GIC_REG(SHARED,GIC_DINTGRP),
+ GIC_REGhi(SHARED,GIC_DMSTATUS), GIC_REG(SHARED,GIC_DMSTATUS)
+ );
+ } else {
+ n += snprintf(buf+n, PAGE_SIZE-n,
+ "\nDINT Send to Group Register\t\t%08x\n",
+ GIC_REG(SHARED,GIC_DINT));
+ }
+
+ return n;
+}
+
+static ssize_t show_gic_local(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long irq_flags;
+ int n = 0;
+ int i;
+
+ local_irq_save(irq_flags);
+ GIC_REG(VPE_LOCAL,GIC_VPE_OTHER_ADDR) = (dev->id);
+
+ n += snprintf(buf+n, PAGE_SIZE-n,
+ "Local Interrupt Control Register:\t\t%08x\n"
+ "Local Interrupt Pending Register:\t\t%08x\n"
+ "Local Mask Register:\t\t\t\t%08x\n"
+ "Local Reset Mask Register:\t\t\t%08x\n"
+ "Local Set Mask Register:\t\t\t%08x\n"
+ "Local WatchDog Map-to-Pin Register:\t\t%08x\n"
+ "Local GIC Counter/Compare Map-to-Pin Register:\t%08x\n"
+ "Local CPU Timer Map-to-Pin Register:\t\t%08x\n"
+ "Local CPU Fast Debug Channel Map-to-Pin:\t%08x\n"
+ "Local Perf Counter Map-to-Pin Register:\t\t%08x\n"
+ "Local SWInt0 Map-to-Pin Register:\t\t%08x\n"
+ "Local SWInt1 Map-to-Pin Register:\t\t%08x\n"
+ "VPE-Other Addressing Register:\t\t\t%08x\n"
+ "VPE-Local Identification Register:\t\t%08x\n"
+ "Programmable/Watchdog Timer0 Config Register:\t\t%08x\n"
+ "Programmable/Watchdog Timer0 Count Register:\t\t%08x\n"
+ "Programmable/Watchdog Timer0 Initial Count Register:\t%08x\n"
+ "CompareLo Register:\t\t\t\t%08x\n"
+ "CompareHi Register:\t\t\t\t%08x\n"
+ ,
+ GIC_REG(VPE_OTHER,GIC_VPE_CTL),
+ GIC_REG(VPE_OTHER,GIC_VPE_PEND),
+ GIC_REG(VPE_OTHER,GIC_VPE_MASK),
+ GIC_REG(VPE_OTHER,GIC_VPE_RMASK),
+ GIC_REG(VPE_OTHER,GIC_VPE_SMASK),
+ GIC_REG(VPE_OTHER,GIC_VPE_WD_MAP),
+ GIC_REG(VPE_OTHER,GIC_VPE_COMPARE_MAP),
+ GIC_REG(VPE_OTHER,GIC_VPE_TIMER_MAP),
+ GIC_REG(VPE_OTHER,GIC_VPE_FDEBUG_MAP),
+ GIC_REG(VPE_OTHER,GIC_VPE_PERFCTR_MAP),
+ GIC_REG(VPE_OTHER,GIC_VPE_SWINT0_MAP),
+ GIC_REG(VPE_OTHER,GIC_VPE_SWINT1_MAP),
+ GIC_REG(VPE_OTHER,GIC_VPE_OTHER_ADDR),
+ GIC_REG(VPE_OTHER,GIC_VPE_ID),
+ GIC_REG(VPE_OTHER,GIC_VPE_WD_CONFIG0),
+ GIC_REG(VPE_OTHER,GIC_VPE_WD_COUNT0),
+ GIC_REG(VPE_OTHER,GIC_VPE_WD_INITIAL0),
+ GIC_REG(VPE_OTHER,GIC_VPE_COMPARE_LO),
+ GIC_REG(VPE_OTHER,GIC_VPE_COMPARE_HI)
+ );
+
+ n += snprintf(buf+n, PAGE_SIZE-n,
+ "\nEIC Shadow Set for Interrupt SrcX:\n");
+ for (i=0; i<64; i++) {
+ if ((i % 8) == 0)
+ n += snprintf(buf+n, PAGE_SIZE-n, "%02x:\t",i);
+ n += snprintf(buf+n, PAGE_SIZE-n,
+ "%08x ",GIC_REG_ADDR(VPE_OTHER,GIC_VPE_EIC_SS(i)));
+ if ((i % 8) == 7)
+ n += snprintf(buf+n, PAGE_SIZE-n, "\n");
+ };
+
+ n += snprintf(buf+n, PAGE_SIZE-n,
+ "\nGuest Counter Offset\t\t\t%08x\n",
+ GIC_REG(VPE_OTHER,GIC_VPE_GCTR_OFFSET));
+
+ n += snprintf(buf+n, PAGE_SIZE-n,
+ "\nVPE Local DINT Group Participate Register:\t%08x\n"
+ "VPE Local DebugBreak Group Register:\t\t%08x\n"
+ ,
+ GIC_REG(VPE_OTHER,GIC_VPE_DINT),
+ GIC_REG(VPE_OTHER,GIC_VPE_DEBUG_BREAK)
+ );
+
+ local_irq_restore(irq_flags);
+
+ return n;
+}
+
+static DEVICE_ATTR(gic_global, 0444, show_gic_global, NULL);
+static DEVICE_ATTR(gic_local, 0400, show_gic_local, NULL);
+
+static struct bus_type gic_subsys = {
+ .name = "gic",
+ .dev_name = "gic",
+};
+
+
+
+static __cpuinit int gic_add_vpe(int cpu)
+{
+ struct device *dev;
+ int err;
+ char name[16];
+
+ dev = kzalloc(sizeof *dev, GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ dev->id = cpu;
+ dev->bus = &gic_subsys;
+ snprintf(name, sizeof name, "vpe%d",cpu);
+ dev->init_name = name;
+
+ err = device_register(dev);
+ if (err)
+ return err;
+
+ err = device_create_file(dev, &dev_attr_gic_local);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int __init init_gic_sysfs(void)
+{
+ int rc;
+ int vpeN;
+ int vpe;
+ int gvpe;
+
+ if (!gic_present)
+ return 0;
+
+ rc = subsys_system_register(&gic_subsys, NULL);
+ if (rc)
+ return rc;
+
+ rc = device_create_file(gic_subsys.dev_root, &dev_attr_gic_global);
+ if (rc)
+ return rc;
+
+ vpeN = ((GIC_REG(SHARED,GIC_SH_CONFIG) & GIC_SH_CONFIG_NUMVPES_MSK) >> GIC_SH_CONFIG_NUMVPES_SHF) + 1;
+ for (vpe=0; vpe<vpeN; vpe++) {
+ gvpe = cpu_data[vpe].g_vpe;
+ /* if gvpe == 0 it mean VPE is skipped during boot */
+ if (vpe && !gvpe)
+ continue;
+ rc = gic_add_vpe(gvpe);
+ if (rc)
+ return rc;
+ }
+
+ return 0;
+}
+
+device_initcall_sync(init_gic_sysfs);
+
+#endif /* CONFIG_SYSFS */
diff --git a/arch/mips/kernel/irq-msc01.c b/arch/mips/kernel/irq-msc01.c
index fab40f7..ac9facc 100644
--- a/arch/mips/kernel/irq-msc01.c
+++ b/arch/mips/kernel/irq-msc01.c
@@ -131,7 +131,7 @@
board_bind_eic_interrupt = &msc_bind_eic_interrupt;
- for (; nirq >= 0; nirq--, imp++) {
+ for (; nirq > 0; nirq--, imp++) {
int n = imp->im_irq;
switch (imp->im_type) {
diff --git a/arch/mips/kernel/irq_cpu.c b/arch/mips/kernel/irq_cpu.c
index 72ef2d2..9457bb6 100644
--- a/arch/mips/kernel/irq_cpu.c
+++ b/arch/mips/kernel/irq_cpu.c
@@ -26,6 +26,8 @@
*
* This file exports one global function:
* void mips_cpu_irq_init(void);
+ * and one variable:
+ * unsigned mips_smp_c0_status_mask;
*/
#include <linux/init.h>
#include <linux/interrupt.h>
@@ -37,6 +39,8 @@
#include <asm/mipsregs.h>
#include <asm/mipsmtregs.h>
+unsigned mips_smp_c0_status_mask;
+
static inline void unmask_mips_irq(struct irq_data *d)
{
set_c0_status(0x100 << (d->irq - MIPS_CPU_IRQ_BASE));
diff --git a/arch/mips/kernel/kgdb.c b/arch/mips/kernel/kgdb.c
index fcaac2f..284f84b 100644
--- a/arch/mips/kernel/kgdb.c
+++ b/arch/mips/kernel/kgdb.c
@@ -32,6 +32,7 @@
#include <asm/cacheflush.h>
#include <asm/processor.h>
#include <asm/sigcontext.h>
+#include <asm/uaccess.h>
static struct hard_trap_info {
unsigned char tt; /* Trap type code for MIPS R3xxx and R4xxx */
@@ -208,7 +209,14 @@
static void kgdb_call_nmi_hook(void *ignored)
{
+ mm_segment_t old_fs;
+
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+
kgdb_nmicallback(raw_smp_processor_id(), NULL);
+
+ set_fs(old_fs);
}
void kgdb_roundup_cpus(unsigned long flags)
@@ -282,6 +290,7 @@
struct die_args *args = (struct die_args *)ptr;
struct pt_regs *regs = args->regs;
int trap = (regs->cp0_cause & 0x7c) >> 2;
+ mm_segment_t old_fs;
#ifdef CONFIG_KPROBES
/*
@@ -296,11 +305,16 @@
if (user_mode(regs))
return NOTIFY_DONE;
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
+
if (atomic_read(&kgdb_active) != -1)
kgdb_nmicallback(smp_processor_id(), regs);
- if (kgdb_handle_exception(trap, compute_signal(trap), cmd, regs))
+ if (kgdb_handle_exception(trap, compute_signal(trap), cmd, regs)) {
+ set_fs(old_fs);
return NOTIFY_DONE;
+ }
if (atomic_read(&kgdb_setting_breakpoint))
if ((trap == 9) && (regs->cp0_epc == (unsigned long)breakinst))
@@ -310,6 +324,7 @@
local_irq_enable();
__flush_cache_all();
+ set_fs(old_fs);
return NOTIFY_STOP;
}
diff --git a/arch/mips/kernel/mcount.S b/arch/mips/kernel/mcount.S
index 33d0671..a03e93c 100644
--- a/arch/mips/kernel/mcount.S
+++ b/arch/mips/kernel/mcount.S
@@ -168,15 +168,11 @@
#endif
/* arg3: Get frame pointer of current stack */
-#ifdef CONFIG_FRAME_POINTER
- move a2, fp
-#else /* ! CONFIG_FRAME_POINTER */
#ifdef CONFIG_64BIT
PTR_LA a2, PT_SIZE(sp)
#else
PTR_LA a2, (PT_SIZE+8)(sp)
#endif
-#endif
jal prepare_ftrace_return
nop
diff --git a/arch/mips/kernel/mips-r2-emul.c b/arch/mips/kernel/mips-r2-emul.c
new file mode 100644
index 0000000..eda08c4
--- /dev/null
+++ b/arch/mips/kernel/mips-r2-emul.c
@@ -0,0 +1,2008 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2014 Imagination Technologies, LLC. All rights reserved.
+ *
+ * MIPS R2 user space instruction emulator
+ *
+ * Author: Leonid Yegoshin
+ */
+#include <linux/bug.h>
+#include <linux/compiler.h>
+#include <linux/ptrace.h>
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+#include <asm/branch.h>
+#include <asm/fpu.h>
+#include <asm/fpu_emulator.h>
+#include <asm/ptrace.h>
+#include <asm/uaccess.h>
+#include <asm/asm.h>
+
+#include <asm/break.h>
+#include <asm/inst.h>
+#include <asm/local.h>
+
+/* number of loop cycles before we gave up */
+#define TOTAL_PASS 10
+
+#ifdef CONFIG_CPU_MIPS64
+#define ADDIU "daddiu "
+#define INS "dins "
+#define EXT "dext "
+#define SB "sb "
+#define LB "lb "
+#define LL "ll "
+#define SC "sc "
+#else /* !CONFIG_CPU_MIPS64 */
+#define ADDIU "addiu "
+#define INS "ins "
+#define EXT "ext "
+#ifdef CONFIG_EVA
+#define SB "sbe "
+#define LB "lbe "
+#define LL "lle "
+#define SC "sce "
+#else
+#define SB "sb "
+#define LB "lb "
+#define LL "ll "
+#define SC "sc "
+#endif
+#endif
+
+#ifdef CONFIG_DEBUG_FS
+
+struct mips_r2_emulator_stats {
+ local_t movs;
+ local_t hilo;
+ local_t muls;
+ local_t divs;
+ local_t dsps;
+ local_t bops;
+ local_t traps;
+ local_t fpus;
+ local_t loads;
+ local_t stores;
+ local_t llsc;
+ local_t dsemul;
+};
+
+DEFINE_PER_CPU(struct mips_r2_emulator_stats, mipsr2emustats);
+DEFINE_PER_CPU(struct mips_r2_emulator_stats, mipsr2bdemustats);
+
+#define MIPS_R2_STATS(M) \
+do { \
+ u32 nir; \
+ int err; \
+ \
+ preempt_disable(); \
+ __local_inc(&__get_cpu_var(mipsr2emustats).M); \
+ err = __get_user(nir, (u32 __user *)regs->cp0_epc); \
+ if (!err) { \
+ if (nir == BREAK_MATH) \
+ __local_inc(&__get_cpu_var(mipsr2bdemustats).M); \
+ } \
+ preempt_enable(); \
+} while (0)
+
+struct mips_r2br_emulator_stats {
+ local_t jrs;
+ local_t bltzl;
+ local_t bgezl;
+ local_t bltzll;
+ local_t bgezll;
+ local_t bltzall;
+ local_t bgezall;
+ local_t bltzal;
+ local_t bgezal;
+ local_t beql;
+ local_t bnel;
+ local_t blezl;
+ local_t bgtzl;
+};
+
+DEFINE_PER_CPU(struct mips_r2br_emulator_stats, mipsr2bremustats);
+
+#define MIPS_R2BR_STATS(M) \
+do { \
+ preempt_disable(); \
+ __local_inc(&__get_cpu_var(mipsr2bremustats).M); \
+ preempt_enable(); \
+} while (0)
+
+#else
+
+#define MIPS_R2_STATS(M) do { } while (0)
+#define MIPS_R2BR_STATS(M) do { } while (0)
+
+#endif /* CONFIG_DEBUG_FS */
+
+struct r2_decoder_table {
+ u32 mask;
+ u32 code;
+ int (*func)(struct pt_regs *regs, u32 inst);
+};
+
+
+int mipsr2_emulation = 1;
+
+void do_trap_or_bp(struct pt_regs *regs, unsigned int code,
+ const char *str);
+
+static int __init nomipsr2_func(char *s)
+{
+ mipsr2_emulation = 0;
+ return 1;
+}
+__setup("nomipsr2", nomipsr2_func);
+
+/* Emulate some frequent R2/R5/R6 instructions in JR BD slot for performance.
+ * Otherwise it will be emulated via stack trampoline... very slow.
+ */
+static inline int mipsr6_emul(struct pt_regs *regs, u32 ir)
+{
+ switch (MIPSInst_OPCODE(ir)) {
+ case addiu_op:
+ if (MIPSInst_RT(ir))
+ regs->regs[MIPSInst_RT(ir)] =
+ (s32)regs->regs[MIPSInst_RS(ir)] +
+ (s32)MIPSInst_SIMM(ir);
+ return(0);
+#ifdef CONFIG_64BIT
+ case daddiu_op:
+ if (MIPSInst_RT(ir))
+ regs->regs[MIPSInst_RT(ir)] =
+ (s64)regs->regs[MIPSInst_RS(ir)] +
+ (s64)MIPSInst_SIMM(ir);
+ return(0);
+#endif
+ case lwc1_op:
+ case swc1_op:
+ case cop1_op:
+ case cop1x_op:
+ /* indicate FPU instructions */
+ return(-SIGFPE);
+ case spec_op:
+ switch (MIPSInst_FUNC(ir)) {
+ case or_op:
+ if (MIPSInst_RD(ir))
+ regs->regs[MIPSInst_RD(ir)] =
+ regs->regs[MIPSInst_RS(ir)] |
+ regs->regs[MIPSInst_RT(ir)];
+ return(0);
+ case sll_op:
+ if (MIPSInst_RS(ir))
+ break;
+ if (MIPSInst_RD(ir))
+ regs->regs[MIPSInst_RD(ir)] =
+ (s32)(((u32)regs->regs[MIPSInst_RT(ir)]) <<
+ MIPSInst_FD(ir));
+ return(0);
+ case srl_op:
+ if (MIPSInst_RS(ir))
+ break;
+ if (MIPSInst_RD(ir))
+ regs->regs[MIPSInst_RD(ir)] =
+ (s32)(((u32)regs->regs[MIPSInst_RT(ir)]) >>
+ MIPSInst_FD(ir));
+ return(0);
+ case addu_op:
+ if (MIPSInst_FD(ir))
+ break;
+ if (MIPSInst_RD(ir))
+ regs->regs[MIPSInst_RD(ir)] =
+ (s32)((u32)regs->regs[MIPSInst_RS(ir)] +
+ (u32)regs->regs[MIPSInst_RT(ir)]);
+ return(0);
+ case subu_op:
+ if (MIPSInst_FD(ir))
+ break;
+ if (MIPSInst_RD(ir))
+ regs->regs[MIPSInst_RD(ir)] =
+ (s32)((u32)regs->regs[MIPSInst_RS(ir)] -
+ (u32)regs->regs[MIPSInst_RT(ir)]);
+ return(0);
+#ifdef CONFIG_64BIT
+ case dsll_op:
+ if (MIPSInst_RS(ir))
+ break;
+ if (MIPSInst_RD(ir))
+ regs->regs[MIPSInst_RD(ir)] =
+ (s64)(((u64)regs->regs[MIPSInst_RT(ir)]) <<
+ MIPSInst_FD(ir));
+ return(0);
+ case dsrl_op:
+ if (MIPSInst_RS(ir))
+ break;
+ if (MIPSInst_RD(ir))
+ regs->regs[MIPSInst_RD(ir)] =
+ (s64)(((u64)regs->regs[MIPSInst_RT(ir)]) >>
+ MIPSInst_FD(ir));
+ return(0);
+ case daddu_op:
+ if (MIPSInst_FD(ir))
+ break;
+ if (MIPSInst_RD(ir))
+ regs->regs[MIPSInst_RD(ir)] =
+ (u64)regs->regs[MIPSInst_RS(ir)] +
+ (u64)regs->regs[MIPSInst_RT(ir)];
+ return(0);
+ case dsubu_op:
+ if (MIPSInst_FD(ir))
+ break;
+ if (MIPSInst_RD(ir))
+ regs->regs[MIPSInst_RD(ir)] =
+ (s64)((u64)regs->regs[MIPSInst_RS(ir)] -
+ (u64)regs->regs[MIPSInst_RT(ir)]);
+ return(0);
+#endif
+ }
+ break;
+ }
+ return SIGILL;
+}
+
+static int movf_func(struct pt_regs *regs, u32 ir)
+{
+ u32 csr;
+ u32 cond;
+
+ csr = current->thread.fpu.fcr31;
+ cond = fpucondbit[MIPSInst_RT(ir) >> 2];
+ if (((csr & cond) == 0) && MIPSInst_RD(ir))
+ regs->regs[MIPSInst_RD(ir)] = regs->regs[MIPSInst_RS(ir)];
+ MIPS_R2_STATS(movs);
+ return(0);
+}
+
+static int movt_func(struct pt_regs *regs, u32 ir)
+{
+ u32 csr;
+ u32 cond;
+
+ csr = current->thread.fpu.fcr31;
+ cond = fpucondbit[MIPSInst_RT(ir) >> 2];
+ if (((csr & cond) != 0) && MIPSInst_RD(ir))
+ regs->regs[MIPSInst_RD(ir)] = regs->regs[MIPSInst_RS(ir)];
+ MIPS_R2_STATS(movs);
+ return(0);
+}
+
+static int jr_func(struct pt_regs *regs, u32 ir)
+{
+ int err;
+ unsigned long epc;
+ unsigned long cpc;
+ unsigned long nepc;
+ unsigned long r31;
+ u32 nir;
+
+ if (delay_slot(regs))
+ return(SIGILL);
+ nepc = regs->cp0_epc;
+ regs->cp0_epc -= 4;
+ epc = regs->cp0_epc;
+ r31 = regs->regs[31];
+ err = __compute_return_epc(regs);
+ if (err < 0)
+ return(SIGEMT);
+ cpc = regs->cp0_epc;
+ err = __get_user(nir, (u32 __user *)nepc);
+ if (err)
+ return(SIGSEGV);
+ MIPS_R2BR_STATS(jrs);
+ if (nir) { /* NOP is easy */
+ /* Negative err means FPU instruction in BD-slot,
+ Zero err means 'BD-slot emulation done' */
+ if ((err = mipsr6_emul(regs,nir)) > 0) {
+ regs->cp0_epc = nepc;
+ err = mips_dsemul(regs, nir, cpc, epc, r31);
+ if (err == SIGILL)
+ err = SIGEMT;
+ MIPS_R2_STATS(dsemul);
+ }
+ }
+ return (err);
+}
+
+static int movz_func(struct pt_regs *regs, u32 ir)
+{
+ if (((regs->regs[MIPSInst_RT(ir)]) == 0) && MIPSInst_RD(ir))
+ regs->regs[MIPSInst_RD(ir)] = regs->regs[MIPSInst_RS(ir)];
+ MIPS_R2_STATS(movs);
+ return(0);
+}
+
+static int movn_func(struct pt_regs *regs, u32 ir)
+{
+ if (((regs->regs[MIPSInst_RT(ir)]) != 0) && MIPSInst_RD(ir))
+ regs->regs[MIPSInst_RD(ir)] = regs->regs[MIPSInst_RS(ir)];
+ MIPS_R2_STATS(movs);
+ return(0);
+}
+
+static int mfhi_func(struct pt_regs *regs, u32 ir)
+{
+ if (MIPSInst_RD(ir))
+ regs->regs[MIPSInst_RD(ir)] = regs->hi;
+ MIPS_R2_STATS(hilo);
+ return(0);
+}
+
+static int mthi_func(struct pt_regs *regs, u32 ir)
+{
+ regs->hi = regs->regs[MIPSInst_RS(ir)];
+ MIPS_R2_STATS(hilo);
+ return(0);
+}
+
+static int mflo_func(struct pt_regs *regs, u32 ir)
+{
+ if (MIPSInst_RD(ir))
+ regs->regs[MIPSInst_RD(ir)] = regs->lo;
+ MIPS_R2_STATS(hilo);
+ return(0);
+}
+
+static int mtlo_func(struct pt_regs *regs, u32 ir)
+{
+ regs->lo = regs->regs[MIPSInst_RS(ir)];
+ MIPS_R2_STATS(hilo);
+ return(0);
+}
+
+static int mult_func(struct pt_regs *regs, u32 ir)
+{
+ s64 res;
+ s32 rt, rs;
+
+ rt = regs->regs[MIPSInst_RT(ir)];
+ rs = regs->regs[MIPSInst_RS(ir)];
+ res = (s64)rt * (s64)rs;
+
+ rs = res;
+ regs->lo = (s64)rs;
+ rt = res >> 32;
+ res = (s64)rt;
+ regs->hi = res;
+ MIPS_R2_STATS(muls);
+ return(0);
+}
+
+static int multu_func(struct pt_regs *regs, u32 ir)
+{
+ u64 res;
+ u32 rt, rs;
+
+ rt = regs->regs[MIPSInst_RT(ir)];
+ rs = regs->regs[MIPSInst_RS(ir)];
+ res = (u64)rt * (u64)rs;
+ rt = res;
+ regs->lo = (s64)rt;
+ regs->hi = (s64)(res >> 32);
+ MIPS_R2_STATS(muls);
+ return(0);
+}
+
+static int div_func(struct pt_regs *regs, u32 ir)
+{
+ s32 rt, rs;
+
+ rt = regs->regs[MIPSInst_RT(ir)];
+ rs = regs->regs[MIPSInst_RS(ir)];
+
+ regs->lo = (s64)(rs / rt);
+ regs->hi = (s64)(rs % rt);
+ MIPS_R2_STATS(divs);
+ return(0);
+}
+
+static int divu_func(struct pt_regs *regs, u32 ir)
+{
+ u32 rt, rs;
+
+ rt = regs->regs[MIPSInst_RT(ir)];
+ rs = regs->regs[MIPSInst_RS(ir)];
+
+ regs->lo = (s64)(rs / rt);
+ regs->hi = (s64)(rs % rt);
+ MIPS_R2_STATS(divs);
+ return(0);
+}
+
+#ifdef CONFIG_64BIT
+static int dmult_func(struct pt_regs *regs, u32 ir)
+{
+ s64 res;
+ s64 rt, rs;
+
+ rt = regs->regs[MIPSInst_RT(ir)];
+ rs = regs->regs[MIPSInst_RS(ir)];
+ res = rt * rs;
+
+ regs->lo = res;
+ __asm__ __volatile__("dmuh %0, %1, %2" : "=r"(res) : "r"(rt), "r"(rs));
+ regs->hi = res;
+ MIPS_R2_STATS(muls);
+ return(0);
+}
+
+static int dmultu_func(struct pt_regs *regs, u32 ir)
+{
+ u64 res;
+ u64 rt, rs;
+
+ rt = regs->regs[MIPSInst_RT(ir)];
+ rs = regs->regs[MIPSInst_RS(ir)];
+ res = rt * rs;
+
+ regs->lo = res;
+ __asm__ __volatile__("dmuhu %0, %1, %2" : "=r"(res) : "r"(rt), "r"(rs));
+ regs->hi = res;
+ MIPS_R2_STATS(muls);
+ return(0);
+}
+
+static int ddiv_func(struct pt_regs *regs, u32 ir)
+{
+ s64 rt, rs;
+
+ rt = regs->regs[MIPSInst_RT(ir)];
+ rs = regs->regs[MIPSInst_RS(ir)];
+
+ regs->lo = rs / rt;
+ regs->hi = rs % rt;
+ MIPS_R2_STATS(divs);
+ return(0);
+}
+
+static int ddivu_func(struct pt_regs *regs, u32 ir)
+{
+ u64 rt, rs;
+
+ rt = regs->regs[MIPSInst_RT(ir)];
+ rs = regs->regs[MIPSInst_RS(ir)];
+
+ regs->lo = rs / rt;
+ regs->hi = rs % rt;
+ MIPS_R2_STATS(divs);
+ return(0);
+}
+#endif
+
+static struct r2_decoder_table spec_op_table[] = {
+ { 0xfc1ff83f, 0x00000008, jr_func },
+ { 0xfc00ffff, 0x00000018, mult_func }, /* case AC=0 only */
+ { 0xfc00ffff, 0x00000019, multu_func }, /* case AC=0 only */
+#ifdef CONFIG_64BIT
+ { 0xfc00ffff, 0x0000001c, dmult_func }, /* case AC=0 only */
+ { 0xfc00ffff, 0x0000001d, dmultu_func }, /* case AC=0 only */
+#endif
+ { 0xffff07ff, 0x00000010, mfhi_func },
+ { 0xfc1fffff, 0x00000011, mthi_func },
+ { 0xffff07ff, 0x00000012, mflo_func },
+ { 0xfc1fffff, 0x00000013, mtlo_func },
+ { 0xfc0307ff, 0x00000001, movf_func },
+ { 0xfc0307ff, 0x00010001, movt_func },
+ { 0xfc0007ff, 0x0000000a, movz_func },
+ { 0xfc0007ff, 0x0000000b, movn_func },
+ { 0xfc00ffff, 0x0000001a, div_func },
+ { 0xfc00ffff, 0x0000001b, divu_func },
+#ifdef CONFIG_64BIT
+ { 0xfc00ffff, 0x0000001e, ddiv_func },
+ { 0xfc00ffff, 0x0000001f, ddivu_func },
+#endif
+ { }
+};
+
+static int madd_func(struct pt_regs *regs, u32 ir)
+{
+ s64 res;
+ s32 rt, rs;
+
+ rt = regs->regs[MIPSInst_RT(ir)];
+ rs = regs->regs[MIPSInst_RS(ir)];
+ res = (s64)rt * (s64)rs;
+ rt = regs->hi;
+ rs = regs->lo;
+ res += ((((s64)rt) << 32) | (u32)rs);
+
+ rt = res;
+ regs->lo = (s64)rt;
+ rs = res >> 32;
+ regs->hi = (s64)rs;
+ MIPS_R2_STATS(dsps);
+ return(0);
+}
+
+static int maddu_func(struct pt_regs *regs, u32 ir)
+{
+ u64 res;
+ u32 rt, rs;
+
+ rt = regs->regs[MIPSInst_RT(ir)];
+ rs = regs->regs[MIPSInst_RS(ir)];
+ res = (u64)rt * (u64)rs;
+ rt = regs->hi;
+ rs = regs->lo;
+ res += ((((s64)rt) << 32) | (u32)rs);
+
+ rt = res;
+ regs->lo = (s64)rt;
+ rs = res >> 32;
+ regs->hi = (s64)rs;
+ MIPS_R2_STATS(dsps);
+ return(0);
+}
+
+static int msub_func(struct pt_regs *regs, u32 ir)
+{
+ s64 res;
+ s32 rt, rs;
+
+ rt = regs->regs[MIPSInst_RT(ir)];
+ rs = regs->regs[MIPSInst_RS(ir)];
+ res = (s64)rt * (s64)rs;
+ rt = regs->hi;
+ rs = regs->lo;
+ res = ((((s64)rt) << 32) | (u32)rs) - res;
+
+ rt = res;
+ regs->lo = (s64)rt;
+ rs = res >> 32;
+ regs->hi = (s64)rs;
+ MIPS_R2_STATS(dsps);
+ return(0);
+}
+
+static int msubu_func(struct pt_regs *regs, u32 ir)
+{
+ u64 res;
+ u32 rt, rs;
+
+ rt = regs->regs[MIPSInst_RT(ir)];
+ rs = regs->regs[MIPSInst_RS(ir)];
+ res = (u64)rt * (u64)rs;
+ rt = regs->hi;
+ rs = regs->lo;
+ res = ((((s64)rt) << 32) | (u32)rs) - res;
+
+ rt = res;
+ regs->lo = (s64)rt;
+ rs = res >> 32;
+ regs->hi = (s64)rs;
+ MIPS_R2_STATS(dsps);
+ return(0);
+}
+
+static int mul_func(struct pt_regs *regs, u32 ir)
+{
+ s64 res;
+ s32 rt, rs;
+
+ if (!MIPSInst_RD(ir))
+ return(0);
+ rt = regs->regs[MIPSInst_RT(ir)];
+ rs = regs->regs[MIPSInst_RS(ir)];
+ res = (s64)rt * (s64)rs;
+
+ rs = res;
+ regs->regs[MIPSInst_RD(ir)] = (s64)rs;
+ MIPS_R2_STATS(muls);
+ return(0);
+}
+
+static int clz_func(struct pt_regs *regs, u32 ir)
+{
+ u32 res;
+ u32 rs;
+
+ if (!MIPSInst_RD(ir))
+ return(0);
+ rs = regs->regs[MIPSInst_RS(ir)];
+ __asm__ __volatile__("clz %0, %1" : "=r"(res) : "r"(rs));
+ regs->regs[MIPSInst_RD(ir)] = res;
+ MIPS_R2_STATS(bops);
+ return(0);
+}
+
+static int clo_func(struct pt_regs *regs, u32 ir)
+{
+ u32 res;
+ u32 rs;
+
+ if (!MIPSInst_RD(ir))
+ return(0);
+ rs = regs->regs[MIPSInst_RS(ir)];
+ __asm__ __volatile__("clo %0, %1" : "=r"(res) : "r"(rs));
+ regs->regs[MIPSInst_RD(ir)] = res;
+ MIPS_R2_STATS(bops);
+ return(0);
+}
+
+#ifdef CONFIG_64BIT
+static int dclz_func(struct pt_regs *regs, u32 ir)
+{
+ u64 res;
+ u64 rs;
+
+ if (!MIPSInst_RD(ir))
+ return(0);
+ rs = regs->regs[MIPSInst_RS(ir)];
+ __asm__ __volatile__("dclz %0, %1" : "=r"(res) : "r"(rs));
+ regs->regs[MIPSInst_RD(ir)] = res;
+ MIPS_R2_STATS(bops);
+ return(0);
+}
+
+static int dclo_func(struct pt_regs *regs, u32 ir)
+{
+ u64 res;
+ u64 rs;
+
+ if (!MIPSInst_RD(ir))
+ return(0);
+ rs = regs->regs[MIPSInst_RS(ir)];
+ __asm__ __volatile__("dclo %0, %1" : "=r"(res) : "r"(rs));
+ regs->regs[MIPSInst_RD(ir)] = res;
+ MIPS_R2_STATS(bops);
+ return(0);
+}
+#endif
+
+static struct r2_decoder_table spec2_op_table[] = {
+ { 0xfc00ffff, 0x70000000, madd_func },
+ { 0xfc00ffff, 0x70000001, maddu_func },
+ { 0xfc0007ff, 0x70000002, mul_func },
+ { 0xfc00ffff, 0x70000004, msub_func }, /* case AC=0 only */
+ { 0xfc00ffff, 0x70000005, msubu_func }, /* case AC=0 only */
+ { 0xfc0007ff, 0x70000020, clz_func },
+ { 0xfc0007ff, 0x70000021, clo_func },
+#ifdef CONFIG_64BIT
+ { 0xfc0007ff, 0x70000024, dclz_func },
+ { 0xfc0007ff, 0x70000025, dclo_func },
+#endif
+/* { 0xfc00003f, 0x7000003f, sdbbp_func }, - no SDBBP support in R2 emulation */
+ { }
+};
+
+static inline int mipsr2_find_ex(struct pt_regs *regs, u32 inst, struct r2_decoder_table *table)
+{
+ struct r2_decoder_table *p;
+ int err;
+
+ for (p=table; p->func; p++) {
+ if ((inst & p->mask) == p->code) {
+ err = (p->func)(regs, inst);
+ return(err);
+ }
+ }
+ return(SIGILL);
+}
+
+int mipsr2_decoder(struct pt_regs *regs, u32 instruction)
+{
+ int err = 0;
+ unsigned long vaddr;
+ u32 inst = instruction;
+ u32 nir;
+ unsigned long r31;
+ unsigned long epc;
+ unsigned long nepc;
+ unsigned long cpc;
+ unsigned long rt;
+ unsigned long rs;
+ unsigned long res;
+ void __user *fault_addr = NULL;
+ int pass = 0;
+
+repeat:
+ r31 = regs->regs[31];
+ epc = regs->cp0_epc;
+ err = compute_return_epc(regs);
+ if (err < 0)
+ return(SIGEMT);
+
+ switch (MIPSInst_OPCODE(inst)) {
+
+ case spec_op:
+ err = mipsr2_find_ex(regs, inst, spec_op_table);
+ if (err < 0) {
+ /* FPU instruction under JR */
+ regs->cp0_cause |= CAUSEF_BD;
+ goto fpu_emul;
+ }
+ break;
+
+ case spec2_op:
+ err = mipsr2_find_ex(regs, inst, spec2_op_table);
+ break;
+
+ case bcond_op:
+ rt = MIPSInst_RT(inst);
+ rs = MIPSInst_RS(inst);
+ switch (rt) {
+ case tgei_op:
+ if ((long)regs->regs[rs] >= MIPSInst_SIMM(inst))
+ do_trap_or_bp(regs, 0, "TGEI");
+ MIPS_R2_STATS(traps);
+ break;
+ case tgeiu_op:
+ if (regs->regs[rs] >= MIPSInst_UIMM(inst))
+ do_trap_or_bp(regs, 0, "TGEIU");
+ MIPS_R2_STATS(traps);
+ break;
+ case tlti_op:
+ if ((long)regs->regs[rs] < MIPSInst_SIMM(inst))
+ do_trap_or_bp(regs, 0, "TLTI");
+ MIPS_R2_STATS(traps);
+ break;
+ case tltiu_op:
+ if (regs->regs[rs] < MIPSInst_UIMM(inst))
+ do_trap_or_bp(regs, 0, "TLTIU");
+ MIPS_R2_STATS(traps);
+ break;
+ case teqi_op:
+ if (regs->regs[rs] == MIPSInst_SIMM(inst))
+ do_trap_or_bp(regs, 0, "TEQI");
+ MIPS_R2_STATS(traps);
+ break;
+ case tnei_op:
+ if (regs->regs[rs] != MIPSInst_SIMM(inst))
+ do_trap_or_bp(regs, 0, "TNEI");
+ MIPS_R2_STATS(traps);
+ break;
+ case bltzl_op:
+ case bgezl_op:
+ case bltzall_op:
+ case bgezall_op:
+ if (delay_slot(regs)) {
+ err = SIGILL;
+ break;
+ }
+ regs->regs[31] = r31;
+ regs->cp0_epc = epc;
+ err = __compute_return_epc(regs);
+ if (err < 0)
+ return(SIGEMT);
+ if (err != BRANCH_LIKELY_TAKEN)
+ break;
+ cpc = regs->cp0_epc;
+ nepc = epc + 4;
+ err = __get_user(nir, (u32 __user *)nepc);
+ if (err) {
+ err = SIGSEGV;
+ break;
+ }
+#ifdef CONFIG_DEBUG_FS
+ switch (rt) {
+ case bltzl_op:
+ MIPS_R2BR_STATS(bltzl);
+ break;
+ case bgezl_op:
+ MIPS_R2BR_STATS(bgezl);
+ break;
+ case bltzall_op:
+ MIPS_R2BR_STATS(bltzall);
+ break;
+ case bgezall_op:
+ MIPS_R2BR_STATS(bgezall);
+ break;
+ }
+#endif
+ switch (MIPSInst_OPCODE(nir)) {
+ case cop1_op:
+ case cop1x_op:
+ case lwc1_op:
+ case swc1_op:
+ regs->cp0_cause |= CAUSEF_BD;
+ goto fpu_emul;
+ }
+ if (nir) { /* NOP is easy */
+ if ((err = mipsr6_emul(regs,nir)) > 0) {
+ regs->cp0_epc = nepc;
+ err = mips_dsemul(regs, nir, cpc, epc, r31);
+ if (err == SIGILL)
+ err = SIGEMT;
+ MIPS_R2_STATS(dsemul);
+ }
+ }
+ break;
+ case bltzal_op:
+ case bgezal_op:
+ if (delay_slot(regs)) {
+ err = SIGILL;
+ break;
+ }
+ regs->regs[31] = r31;
+ regs->cp0_epc = epc;
+ err = __compute_return_epc(regs);
+ if (err < 0)
+ return(SIGEMT);
+ cpc = regs->cp0_epc;
+ nepc = epc + 4;
+ err = __get_user(nir, (u32 __user *)nepc);
+ if (err) {
+ err = SIGSEGV;
+ break;
+ }
+#ifdef CONFIG_DEBUG_FS
+ switch (rt) {
+ case bltzal_op:
+ MIPS_R2BR_STATS(bltzal);
+ break;
+ case bgezal_op:
+ MIPS_R2BR_STATS(bgezal);
+ break;
+ }
+#endif
+ switch (MIPSInst_OPCODE(nir)) {
+ case cop1_op:
+ case cop1x_op:
+ case lwc1_op:
+ case swc1_op:
+ regs->cp0_cause |= CAUSEF_BD;
+ goto fpu_emul;
+ }
+ if (nir) { /* NOP is easy */
+ if ((err = mipsr6_emul(regs,nir)) > 0) {
+ regs->cp0_epc = nepc;
+ err = mips_dsemul(regs, nir, cpc, epc, r31);
+ if (err == SIGILL)
+ err = SIGEMT;
+ MIPS_R2_STATS(dsemul);
+ }
+ }
+ break;
+ default:
+ regs->regs[31] = r31;
+ regs->cp0_epc = epc;
+ err = SIGILL;
+ break;
+ }
+ break;
+
+ case beql_op:
+ case bnel_op:
+ case blezl_op:
+ case bgtzl_op:
+ if (delay_slot(regs)) {
+ err = SIGILL;
+ break;
+ }
+ regs->regs[31] = r31;
+ regs->cp0_epc = epc;
+ err = __compute_return_epc(regs);
+ if (err < 0)
+ return(SIGEMT);
+ if (err != BRANCH_LIKELY_TAKEN)
+ break;
+ cpc = regs->cp0_epc;
+ nepc = epc + 4;
+ err = __get_user(nir, (u32 __user *)nepc);
+ if (err) {
+ err = SIGSEGV;
+ break;
+ }
+#ifdef CONFIG_DEBUG_FS
+ switch (MIPSInst_OPCODE(inst)) {
+ case beql_op:
+ MIPS_R2BR_STATS(beql);
+ break;
+ case bnel_op:
+ MIPS_R2BR_STATS(bnel);
+ break;
+ case blezl_op:
+ MIPS_R2BR_STATS(blezl);
+ break;
+ case bgtzl_op:
+ MIPS_R2BR_STATS(bgtzl);
+ break;
+ }
+#endif
+ switch (MIPSInst_OPCODE(nir)) {
+ case cop1_op:
+ case cop1x_op:
+ case lwc1_op:
+ case swc1_op:
+ regs->cp0_cause |= CAUSEF_BD;
+ goto fpu_emul;
+ }
+ if (nir) { /* NOP is easy */
+ if ((err = mipsr6_emul(regs,nir)) > 0) {
+ regs->cp0_epc = nepc;
+ err = mips_dsemul(regs, nir, cpc, epc, r31);
+ if (err == SIGILL)
+ err = SIGEMT;
+ MIPS_R2_STATS(dsemul);
+ }
+ }
+ break;
+ case lwc1_op:
+ case swc1_op:
+ case cop1_op:
+ case cop1x_op:
+fpu_emul:
+ regs->regs[31] = r31;
+ regs->cp0_epc = epc;
+ /* all cop1/cop1x RI in R6 are a removed R5 FPU instructions */
+ if (!used_math()) /* First time FPU user. */
+ init_fpu();
+ lose_fpu(1); /* Save FPU state for the emulator. */
+
+ err = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 0,
+ &fault_addr);
+
+ /* this is a tricky issue - lose_fpu() uses LL/SC atomics
+ if FPU is owned and effectively cancels user level LL/SC.
+ So, it could be logical to don't restore FPU ownership here.
+ But the sequence of multiple FPU instructions is much much
+ more often than LL-FPU-SC and I prefer loop here until
+ next scheduler cycle cancels FPU ownership. */
+ own_fpu(1); /* Restore FPU state. */
+
+ if (err)
+ current->thread.cp0_baduaddr = (unsigned long)fault_addr;
+
+ MIPS_R2_STATS(fpus);
+ break;;
+
+ case lwl_op:
+ rt = regs->regs[MIPSInst_RT(inst)];
+ vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
+ if (!access_ok(VERIFY_READ, vaddr, 4)) {
+ current->thread.cp0_baduaddr = vaddr;
+ err = SIGSEGV;
+ break;
+ }
+ __asm__ __volatile__(
+ " .set push \n"
+ " .set reorder \n"
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+ "1:" LB "%1, 0(%2) \n"
+ INS "%0, %1, 24, 8 \n"
+ " andi %1, %2, 0x3 \n"
+ " beq $0, %1, 9f \n"
+ ADDIU "%2, %2, -1 \n"
+ "2:" LB "%1, 0(%2) \n"
+ INS "%0, %1, 16, 8 \n"
+ " andi %1, %2, 0x3 \n"
+ " beq $0, %1, 9f \n"
+ ADDIU "%2, %2, -1 \n"
+ "3:" LB "%1, 0(%2) \n"
+ INS "%0, %1, 8, 8 \n"
+ " andi %1, %2, 0x3 \n"
+ " beq $0, %1, 9f \n"
+ ADDIU "%2, %2, -1 \n"
+ "4:" LB "%1, 0(%2) \n"
+ INS "%0, %1, 0, 8 \n"
+#else /* !CONFIG_CPU_LITTLE_ENDIAN */
+ "1:" LB "%1, 0(%2) \n"
+ INS "%0, %1, 24, 8 \n"
+ ADDIU "%2, %2, 1 \n"
+ " andi %1, %2, 0x3 \n"
+ " beq $0, %1, 9f \n"
+ "2:" LB "%1, 0(%2) \n"
+ INS "%0, %1, 16, 8 \n"
+ ADDIU "%2, %2, 1 \n"
+ " andi %1, %2, 0x3 \n"
+ " beq $0, %1, 9f \n"
+ "3:" LB "%1, 0(%2) \n"
+ INS "%0, %1, 8, 8 \n"
+ ADDIU "%2, %2, 1 \n"
+ " andi %1, %2, 0x3 \n"
+ " beq $0, %1, 9f \n"
+ "4:" LB "%1, 0(%2) \n"
+ INS "%0, %1, 0, 8 \n"
+#endif /* CONFIG_CPU_LITTLE_ENDIAN */
+ "9: sll %0, %0, 0 \n"
+ "10: \n"
+ " .insn \n"
+ " .section .fixup,\"ax\" \n"
+ "8: li %3,%4 \n"
+ " j 10b \n"
+ " .previous \n"
+ " .section __ex_table,\"a\"\n"
+ __stringify(PTR) " 1b,8b \n"
+ __stringify(PTR) " 2b,8b \n"
+ __stringify(PTR) " 3b,8b \n"
+ __stringify(PTR) " 4b,8b \n"
+ " .previous \n"
+ " .set pop \n"
+ : "+&r"(rt), "=&r"(rs),
+ "+&r"(vaddr), "+&r"(err)
+ : "i"(SIGSEGV));
+ if (MIPSInst_RT(inst) && !err)
+ regs->regs[MIPSInst_RT(inst)] = rt;
+
+ MIPS_R2_STATS(loads);
+ break;
+
+ case lwr_op:
+ rt = regs->regs[MIPSInst_RT(inst)];
+ vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
+ if (!access_ok(VERIFY_READ, vaddr, 4)) {
+ current->thread.cp0_baduaddr = vaddr;
+ err = SIGSEGV;
+ break;
+ }
+ __asm__ __volatile__(
+ " .set push \n"
+ " .set reorder \n"
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+ "1:" LB "%1, 0(%2) \n"
+ INS "%0, %1, 0, 8 \n"
+ ADDIU "%2, %2, 1 \n"
+ " andi %1, %2, 0x3 \n"
+ " beq $0, %1, 9f \n"
+ "2:" LB "%1, 0(%2) \n"
+ INS "%0, %1, 8, 8 \n"
+ ADDIU "%2, %2, 1 \n"
+ " andi %1, %2, 0x3 \n"
+ " beq $0, %1, 9f \n"
+ "3:" LB "%1, 0(%2) \n"
+ INS "%0, %1, 16, 8 \n"
+ ADDIU "%2, %2, 1 \n"
+ " andi %1, %2, 0x3 \n"
+ " beq $0, %1, 9f \n"
+ "4:" LB "%1, 0(%2) \n"
+ INS "%0, %1, 24, 8 \n"
+ " sll %0, %0, 0 \n"
+#else /* !CONFIG_CPU_LITTLE_ENDIAN */
+ "1:" LB "%1, 0(%2) \n"
+ INS "%0, %1, 0, 8 \n"
+ " andi %1, %2, 0x3 \n"
+ " beq $0, %1, 9f \n"
+ ADDIU "%2, %2, -1 \n"
+ "2:" LB "%1, 0(%2) \n"
+ INS "%0, %1, 8, 8 \n"
+ " andi %1, %2, 0x3 \n"
+ " beq $0, %1, 9f \n"
+ ADDIU "%2, %2, -1 \n"
+ "3:" LB "%1, 0(%2) \n"
+ INS "%0, %1, 16, 8 \n"
+ " andi %1, %2, 0x3 \n"
+ " beq $0, %1, 9f \n"
+ ADDIU "%2, %2, -1 \n"
+ "4:" LB "%1, 0(%2) \n"
+ INS "%0, %1, 24, 8 \n"
+ " sll %0, %0, 0 \n"
+#endif /* CONFIG_CPU_LITTLE_ENDIAN */
+ "9: \n"
+ "10: \n"
+ " .insn \n"
+ " .section .fixup,\"ax\" \n"
+ "8: li %3,%4 \n"
+ " j 10b \n"
+ " .previous \n"
+ " .section __ex_table,\"a\"\n"
+ __stringify(PTR) " 1b,8b \n"
+ __stringify(PTR) " 2b,8b \n"
+ __stringify(PTR) " 3b,8b \n"
+ __stringify(PTR) " 4b,8b \n"
+ " .previous \n"
+ " .set pop \n"
+ : "+&r"(rt), "=&r"(rs),
+ "+&r"(vaddr), "+&r"(err)
+ : "i"(SIGSEGV));
+ if (MIPSInst_RT(inst) && !err)
+ regs->regs[MIPSInst_RT(inst)] = rt;
+
+ MIPS_R2_STATS(loads);
+ break;
+
+ case swl_op:
+ rt = regs->regs[MIPSInst_RT(inst)];
+ vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
+ if (!access_ok(VERIFY_WRITE, vaddr, 4)) {
+ current->thread.cp0_baduaddr = vaddr;
+ err = SIGSEGV;
+ break;
+ }
+ __asm__ __volatile__(
+ " .set push \n"
+ " .set reorder \n"
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+ EXT "%1, %0, 24, 8 \n"
+ "1:" SB "%1, 0(%2) \n"
+ " andi %1, %2, 0x3 \n"
+ " beq $0, %1, 9f \n"
+ ADDIU "%2, %2, -1 \n"
+ EXT "%1, %0, 16, 8 \n"
+ "2:" SB "%1, 0(%2) \n"
+ " andi %1, %2, 0x3 \n"
+ " beq $0, %1, 9f \n"
+ ADDIU "%2, %2, -1 \n"
+ EXT "%1, %0, 8, 8 \n"
+ "3:" SB "%1, 0(%2) \n"
+ " andi %1, %2, 0x3 \n"
+ " beq $0, %1, 9f \n"
+ ADDIU "%2, %2, -1 \n"
+ EXT "%1, %0, 0, 8 \n"
+ "4:" SB "%1, 0(%2) \n"
+#else /* !CONFIG_CPU_LITTLE_ENDIAN */
+ EXT "%1, %0, 24, 8 \n"
+ "1:" SB "%1, 0(%2) \n"
+ ADDIU "%2, %2, 1 \n"
+ " andi %1, %2, 0x3 \n"
+ " beq $0, %1, 9f \n"
+ EXT "%1, %0, 16, 8 \n"
+ "2:" SB "%1, 0(%2) \n"
+ ADDIU "%2, %2, 1 \n"
+ " andi %1, %2, 0x3 \n"
+ " beq $0, %1, 9f \n"
+ EXT "%1, %0, 8, 8 \n"
+ "3:" SB "%1, 0(%2) \n"
+ ADDIU "%2, %2, 1 \n"
+ " andi %1, %2, 0x3 \n"
+ " beq $0, %1, 9f \n"
+ EXT "%1, %0, 0, 8 \n"
+ "4:" SB "%1, 0(%2) \n"
+#endif /* CONFIG_CPU_LITTLE_ENDIAN */
+ "9: \n"
+ " .insn \n"
+ " .section .fixup,\"ax\" \n"
+ "8: li %3,%4 \n"
+ " j 9b \n"
+ " .previous \n"
+ " .section __ex_table,\"a\"\n"
+ __stringify(PTR) " 1b,8b \n"
+ __stringify(PTR) " 2b,8b \n"
+ __stringify(PTR) " 3b,8b \n"
+ __stringify(PTR) " 4b,8b \n"
+ " .previous \n"
+ " .set pop \n"
+ : "+&r"(rt), "=&r"(rs),
+ "+&r"(vaddr), "+&r"(err)
+ : "i"(SIGSEGV)
+ : "memory");
+
+ MIPS_R2_STATS(stores);
+ break;
+
+ case swr_op:
+ rt = regs->regs[MIPSInst_RT(inst)];
+ vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
+ if (!access_ok(VERIFY_WRITE, vaddr, 4)) {
+ current->thread.cp0_baduaddr = vaddr;
+ err = SIGSEGV;
+ break;
+ }
+ __asm__ __volatile__(
+ " .set push \n"
+ " .set reorder \n"
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+ EXT "%1, %0, 0, 8 \n"
+ "1:" SB "%1, 0(%2) \n"
+ ADDIU "%2, %2, 1 \n"
+ " andi %1, %2, 0x3 \n"
+ " beq $0, %1, 9f \n"
+ EXT "%1, %0, 8, 8 \n"
+ "2:" SB "%1, 0(%2) \n"
+ ADDIU "%2, %2, 1 \n"
+ " andi %1, %2, 0x3 \n"
+ " beq $0, %1, 9f \n"
+ EXT "%1, %0, 16, 8 \n"
+ "3:" SB "%1, 0(%2) \n"
+ ADDIU "%2, %2, 1 \n"
+ " andi %1, %2, 0x3 \n"
+ " beq $0, %1, 9f \n"
+ EXT "%1, %0, 24, 8 \n"
+ "4:" SB "%1, 0(%2) \n"
+#else /* !CONFIG_CPU_LITTLE_ENDIAN */
+ EXT "%1, %0, 0, 8 \n"
+ "1:" SB "%1, 0(%2) \n"
+ " andi %1, %2, 0x3 \n"
+ " beq $0, %1, 9f \n"
+ ADDIU "%2, %2, -1 \n"
+ EXT "%1, %0, 8, 8 \n"
+ "2:" SB "%1, 0(%2) \n"
+ " andi %1, %2, 0x3 \n"
+ " beq $0, %1, 9f \n"
+ ADDIU "%2, %2, -1 \n"
+ EXT "%1, %0, 16, 8 \n"
+ "3:" SB "%1, 0(%2) \n"
+ " andi %1, %2, 0x3 \n"
+ " beq $0, %1, 9f \n"
+ ADDIU "%2, %2, -1 \n"
+ EXT "%1, %0, 24, 8 \n"
+ "4:" SB "%1, 0(%2) \n"
+#endif /* CONFIG_CPU_LITTLE_ENDIAN */
+ "9: \n"
+ " .insn \n"
+ " .section .fixup,\"ax\" \n"
+ "8: li %3,%4 \n"
+ " j 9b \n"
+ " .previous \n"
+ " .section __ex_table,\"a\"\n"
+ __stringify(PTR) " 1b,8b \n"
+ __stringify(PTR) " 2b,8b \n"
+ __stringify(PTR) " 3b,8b \n"
+ __stringify(PTR) " 4b,8b \n"
+ " .previous \n"
+ " .set pop \n"
+ : "+&r"(rt), "=&r"(rs),
+ "+&r"(vaddr), "+&r"(err)
+ : "i"(SIGSEGV)
+ : "memory");
+
+ MIPS_R2_STATS(stores);
+ break;
+
+#ifdef CONFIG_64BIT
+ case ldl_op:
+ rt = regs->regs[MIPSInst_RT(inst)];
+ vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
+ if (!access_ok(VERIFY_READ, vaddr, 8)) {
+ current->thread.cp0_baduaddr = vaddr;
+ err = SIGSEGV;
+ break;
+ }
+ __asm__ __volatile__(
+ " .set push \n"
+ " .set reorder \n"
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+ "1: lb %1, 0(%2) \n"
+ " dinsu %0, %1, 56, 8 \n"
+ " andi %1, %2, 0x7 \n"
+ " beq $0, %1, 9f \n"
+ " daddiu %2, %2, -1 \n"
+ "2: lb %1, 0(%2) \n"
+ " dinsu %0, %1, 48, 8 \n"
+ " andi %1, %2, 0x7 \n"
+ " beq $0, %1, 9f \n"
+ " daddiu %2, %2, -1 \n"
+ "3: lb %1, 0(%2) \n"
+ " dinsu %0, %1, 40, 8 \n"
+ " andi %1, %2, 0x7 \n"
+ " beq $0, %1, 9f \n"
+ " daddiu %2, %2, -1 \n"
+ "4: lb %1, 0(%2) \n"
+ " dinsu %0, %1, 32, 8 \n"
+ " andi %1, %2, 0x7 \n"
+ " beq $0, %1, 9f \n"
+ " daddiu %2, %2, -1 \n"
+ "5: lb %1, 0(%2) \n"
+ " dins %0, %1, 24, 8 \n"
+ " andi %1, %2, 0x7 \n"
+ " beq $0, %1, 9f \n"
+ " daddiu %2, %2, -1 \n"
+ "6: lb %1, 0(%2) \n"
+ " dins %0, %1, 16, 8 \n"
+ " andi %1, %2, 0x7 \n"
+ " beq $0, %1, 9f \n"
+ " daddiu %2, %2, -1 \n"
+ "7: lb %1, 0(%2) \n"
+ " dins %0, %1, 8, 8 \n"
+ " andi %1, %2, 0x7 \n"
+ " beq $0, %1, 9f \n"
+ " daddiu %2, %2, -1 \n"
+ "0: lb %1, 0(%2) \n"
+ " dins %0, %1, 0, 8 \n"
+#else /* !CONFIG_CPU_LITTLE_ENDIAN */
+ "1: lb %1, 0(%2) \n"
+ " dinsu %0, %1, 56, 8 \n"
+ " daddiu %2, %2, 1 \n"
+ " andi %1, %2, 0x7 \n"
+ " beq $0, %1, 9f \n"
+ "2: lb %1, 0(%2) \n"
+ " dinsu %0, %1, 48, 8 \n"
+ " daddiu %2, %2, 1 \n"
+ " andi %1, %2, 0x7 \n"
+ " beq $0, %1, 9f \n"
+ "3: lb %1, 0(%2) \n"
+ " dinsu %0, %1, 40, 8 \n"
+ " daddiu %2, %2, 1 \n"
+ " andi %1, %2, 0x7 \n"
+ " beq $0, %1, 9f \n"
+ "4: lb %1, 0(%2) \n"
+ " dinsu %0, %1, 32, 8 \n"
+ " daddiu %2, %2, 1 \n"
+ " andi %1, %2, 0x7 \n"
+ " beq $0, %1, 9f \n"
+ "5: lb %1, 0(%2) \n"
+ " dins %0, %1, 24, 8 \n"
+ " daddiu %2, %2, 1 \n"
+ " andi %1, %2, 0x7 \n"
+ " beq $0, %1, 9f \n"
+ "6: lb %1, 0(%2) \n"
+ " dins %0, %1, 16, 8 \n"
+ " daddiu %2, %2, 1 \n"
+ " andi %1, %2, 0x7 \n"
+ " beq $0, %1, 9f \n"
+ "7: lb %1, 0(%2) \n"
+ " dins %0, %1, 8, 8 \n"
+ " daddiu %2, %2, 1 \n"
+ " andi %1, %2, 0x7 \n"
+ " beq $0, %1, 9f \n"
+ "0: lb %1, 0(%2) \n"
+ " dins %0, %1, 0, 8 \n"
+#endif /* CONFIG_CPU_LITTLE_ENDIAN */
+ "9: \n"
+ " .insn \n"
+ " .section .fixup,\"ax\" \n"
+ "8: li %3,%4 \n"
+ " j 9b \n"
+ " .previous \n"
+ " .section __ex_table,\"a\"\n"
+ __stringify(PTR) " 1b,8b \n"
+ __stringify(PTR) " 2b,8b \n"
+ __stringify(PTR) " 3b,8b \n"
+ __stringify(PTR) " 4b,8b \n"
+ __stringify(PTR) " 5b,8b \n"
+ __stringify(PTR) " 6b,8b \n"
+ __stringify(PTR) " 7b,8b \n"
+ __stringify(PTR) " 0b,8b \n"
+ " .previous \n"
+ " .set pop \n"
+ : "+&r"(rt), "=&r"(rs),
+ "+&r"(vaddr), "+&r"(err)
+ : "i"(SIGSEGV));
+ if (MIPSInst_RT(inst) && !err)
+ regs->regs[MIPSInst_RT(inst)] = rt;
+
+ MIPS_R2_STATS(loads);
+ break;
+
+ case ldr_op:
+ rt = regs->regs[MIPSInst_RT(inst)];
+ vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
+ if (!access_ok(VERIFY_READ, vaddr, 8)) {
+ current->thread.cp0_baduaddr = vaddr;
+ err = SIGSEGV;
+ break;
+ }
+ __asm__ __volatile__(
+ " .set push \n"
+ " .set reorder \n"
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+ "1: lb %1, 0(%2) \n"
+ " dins %0, %1, 0, 8 \n"
+ " daddiu %2, %2, 1 \n"
+ " andi %1, %2, 0x7 \n"
+ " beq $0, %1, 9f \n"
+ "2: lb %1, 0(%2) \n"
+ " dins %0, %1, 8, 8 \n"
+ " daddiu %2, %2, 1 \n"
+ " andi %1, %2, 0x7 \n"
+ " beq $0, %1, 9f \n"
+ "3: lb %1, 0(%2) \n"
+ " dins %0, %1, 16, 8 \n"
+ " daddiu %2, %2, 1 \n"
+ " andi %1, %2, 0x7 \n"
+ " beq $0, %1, 9f \n"
+ "4: lb %1, 0(%2) \n"
+ " dins %0, %1, 24, 8 \n"
+ " daddiu %2, %2, 1 \n"
+ " andi %1, %2, 0x7 \n"
+ " beq $0, %1, 9f \n"
+ "5: lb %1, 0(%2) \n"
+ " dinsu %0, %1, 32, 8 \n"
+ " daddiu %2, %2, 1 \n"
+ " andi %1, %2, 0x7 \n"
+ " beq $0, %1, 9f \n"
+ "6: lb %1, 0(%2) \n"
+ " dinsu %0, %1, 40, 8 \n"
+ " daddiu %2, %2, 1 \n"
+ " andi %1, %2, 0x7 \n"
+ " beq $0, %1, 9f \n"
+ "7: lb %1, 0(%2) \n"
+ " dinsu %0, %1, 48, 8 \n"
+ " daddiu %2, %2, 1 \n"
+ " andi %1, %2, 0x7 \n"
+ " beq $0, %1, 9f \n"
+ "0: lb %1, 0(%2) \n"
+ " dinsu %0, %1, 56, 8 \n"
+#else /* !CONFIG_CPU_LITTLE_ENDIAN */
+ "1: lb %1, 0(%2) \n"
+ " dins %0, %1, 0, 8 \n"
+ " andi %1, %2, 0x7 \n"
+ " beq $0, %1, 9f \n"
+ " daddiu %2, %2, -1 \n"
+ "2: lb %1, 0(%2) \n"
+ " dins %0, %1, 8, 8 \n"
+ " andi %1, %2, 0x7 \n"
+ " beq $0, %1, 9f \n"
+ " daddiu %2, %2, -1 \n"
+ "3: lb %1, 0(%2) \n"
+ " dins %0, %1, 16, 8 \n"
+ " andi %1, %2, 0x7 \n"
+ " beq $0, %1, 9f \n"
+ " daddiu %2, %2, -1 \n"
+ "4: lb %1, 0(%2) \n"
+ " dins %0, %1, 24, 8 \n"
+ " andi %1, %2, 0x7 \n"
+ " beq $0, %1, 9f \n"
+ " daddiu %2, %2, -1 \n"
+ "5: lb %1, 0(%2) \n"
+ " dinsu %0, %1, 32, 8 \n"
+ " andi %1, %2, 0x7 \n"
+ " beq $0, %1, 9f \n"
+ " daddiu %2, %2, -1 \n"
+ "6: lb %1, 0(%2) \n"
+ " dinsu %0, %1, 40, 8 \n"
+ " andi %1, %2, 0x7 \n"
+ " beq $0, %1, 9f \n"
+ " daddiu %2, %2, -1 \n"
+ "7: lb %1, 0(%2) \n"
+ " dinsu %0, %1, 48, 8 \n"
+ " andi %1, %2, 0x7 \n"
+ " beq $0, %1, 9f \n"
+ " daddiu %2, %2, -1 \n"
+ "0: lb %1, 0(%2) \n"
+ " dinsu %0, %1, 56, 8 \n"
+#endif /* CONFIG_CPU_LITTLE_ENDIAN */
+ "9: \n"
+ " .insn \n"
+ " .section .fixup,\"ax\" \n"
+ "8: li %3,%4 \n"
+ " j 9b \n"
+ " .previous \n"
+ " .section __ex_table,\"a\"\n"
+ __stringify(PTR) " 1b,8b \n"
+ __stringify(PTR) " 2b,8b \n"
+ __stringify(PTR) " 3b,8b \n"
+ __stringify(PTR) " 4b,8b \n"
+ __stringify(PTR) " 5b,8b \n"
+ __stringify(PTR) " 6b,8b \n"
+ __stringify(PTR) " 7b,8b \n"
+ __stringify(PTR) " 0b,8b \n"
+ " .previous \n"
+ " .set pop \n"
+ : "+&r"(rt), "=&r"(rs),
+ "+&r"(vaddr), "+&r"(err)
+ : "i"(SIGSEGV));
+ if (MIPSInst_RT(inst) && !err)
+ regs->regs[MIPSInst_RT(inst)] = rt;
+
+ MIPS_R2_STATS(loads);
+ break;
+
+ case sdl_op:
+ rt = regs->regs[MIPSInst_RT(inst)];
+ vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
+ if (!access_ok(VERIFY_WRITE, vaddr, 8)) {
+ current->thread.cp0_baduaddr = vaddr;
+ err = SIGSEGV;
+ break;
+ }
+ __asm__ __volatile__(
+ " .set push \n"
+ " .set reorder \n"
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+ " dextu %1, %0, 56, 8 \n"
+ "1: sb %1, 0(%2) \n"
+ " andi %1, %2, 0x7 \n"
+ " beq $0, %1, 9f \n"
+ " daddiu %2, %2, -1 \n"
+ " dextu %1, %0, 48, 8 \n"
+ "2: sb %1, 0(%2) \n"
+ " andi %1, %2, 0x7 \n"
+ " beq $0, %1, 9f \n"
+ " daddiu %2, %2, -1 \n"
+ " dextu %1, %0, 40, 8 \n"
+ "3: sb %1, 0(%2) \n"
+ " andi %1, %2, 0x7 \n"
+ " beq $0, %1, 9f \n"
+ " daddiu %2, %2, -1 \n"
+ " dextu %1, %0, 32, 8 \n"
+ "4: sb %1, 0(%2) \n"
+ " andi %1, %2, 0x7 \n"
+ " beq $0, %1, 9f \n"
+ " daddiu %2, %2, -1 \n"
+ " dext %1, %0, 24, 8 \n"
+ "5: sb %1, 0(%2) \n"
+ " andi %1, %2, 0x7 \n"
+ " beq $0, %1, 9f \n"
+ " daddiu %2, %2, -1 \n"
+ " dext %1, %0, 16, 8 \n"
+ "6: sb %1, 0(%2) \n"
+ " andi %1, %2, 0x7 \n"
+ " beq $0, %1, 9f \n"
+ " daddiu %2, %2, -1 \n"
+ " dext %1, %0, 8, 8 \n"
+ "7: sb %1, 0(%2) \n"
+ " andi %1, %2, 0x7 \n"
+ " beq $0, %1, 9f \n"
+ " daddiu %2, %2, -1 \n"
+ " dext %1, %0, 0, 8 \n"
+ "0: sb %1, 0(%2) \n"
+#else /* !CONFIG_CPU_LITTLE_ENDIAN */
+ " dextu %1, %0, 56, 8 \n"
+ "1: sb %1, 0(%2) \n"
+ " daddiu %2, %2, 1 \n"
+ " andi %1, %2, 0x7 \n"
+ " beq $0, %1, 9f \n"
+ " dextu %1, %0, 48, 8 \n"
+ "2: sb %1, 0(%2) \n"
+ " daddiu %2, %2, 1 \n"
+ " andi %1, %2, 0x7 \n"
+ " beq $0, %1, 9f \n"
+ " dextu %1, %0, 40, 8 \n"
+ "3: sb %1, 0(%2) \n"
+ " daddiu %2, %2, 1 \n"
+ " andi %1, %2, 0x7 \n"
+ " beq $0, %1, 9f \n"
+ " dextu %1, %0, 32, 8 \n"
+ "4: sb %1, 0(%2) \n"
+ " daddiu %2, %2, 1 \n"
+ " andi %1, %2, 0x7 \n"
+ " beq $0, %1, 9f \n"
+ " dext %1, %0, 24, 8 \n"
+ "5: sb %1, 0(%2) \n"
+ " daddiu %2, %2, 1 \n"
+ " andi %1, %2, 0x7 \n"
+ " beq $0, %1, 9f \n"
+ " dext %1, %0, 16, 8 \n"
+ "6: sb %1, 0(%2) \n"
+ " daddiu %2, %2, 1 \n"
+ " andi %1, %2, 0x7 \n"
+ " beq $0, %1, 9f \n"
+ " dext %1, %0, 8, 8 \n"
+ "7: sb %1, 0(%2) \n"
+ " daddiu %2, %2, 1 \n"
+ " andi %1, %2, 0x7 \n"
+ " beq $0, %1, 9f \n"
+ " dext %1, %0, 0, 8 \n"
+ "0: sb %1, 0(%2) \n"
+#endif /* CONFIG_CPU_LITTLE_ENDIAN */
+ "9: \n"
+ " .insn \n"
+ " .section .fixup,\"ax\" \n"
+ "8: li %3,%4 \n"
+ " j 9b \n"
+ " .previous \n"
+ " .section __ex_table,\"a\"\n"
+ __stringify(PTR) " 1b,8b \n"
+ __stringify(PTR) " 2b,8b \n"
+ __stringify(PTR) " 3b,8b \n"
+ __stringify(PTR) " 4b,8b \n"
+ __stringify(PTR) " 5b,8b \n"
+ __stringify(PTR) " 6b,8b \n"
+ __stringify(PTR) " 7b,8b \n"
+ __stringify(PTR) " 0b,8b \n"
+ " .previous \n"
+ " .set pop \n"
+ : "+&r"(rt), "=&r"(rs),
+ "+&r"(vaddr), "+&r"(err)
+ : "i"(SIGSEGV)
+ : "memory");
+
+ MIPS_R2_STATS(stores);
+ break;
+
+ case sdr_op:
+ rt = regs->regs[MIPSInst_RT(inst)];
+ vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
+ if (!access_ok(VERIFY_WRITE, vaddr, 8)) {
+ current->thread.cp0_baduaddr = vaddr;
+ err = SIGSEGV;
+ break;
+ }
+ __asm__ __volatile__(
+ " .set push \n"
+ " .set reorder \n"
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+ " dext %1, %0, 0, 8 \n"
+ "1: sb %1, 0(%2) \n"
+ " daddiu %2, %2, 1 \n"
+ " andi %1, %2, 0x7 \n"
+ " beq $0, %1, 9f \n"
+ " dext %1, %0, 8, 8 \n"
+ "2: sb %1, 0(%2) \n"
+ " daddiu %2, %2, 1 \n"
+ " andi %1, %2, 0x7 \n"
+ " beq $0, %1, 9f \n"
+ " dext %1, %0, 16, 8 \n"
+ "3: sb %1, 0(%2) \n"
+ " daddiu %2, %2, 1 \n"
+ " andi %1, %2, 0x7 \n"
+ " beq $0, %1, 9f \n"
+ " dext %1, %0, 24, 8 \n"
+ "4: sb %1, 0(%2) \n"
+ " daddiu %2, %2, 1 \n"
+ " andi %1, %2, 0x7 \n"
+ " beq $0, %1, 9f \n"
+ " dextu %1, %0, 32, 8 \n"
+ "5: sb %1, 0(%2) \n"
+ " daddiu %2, %2, 1 \n"
+ " andi %1, %2, 0x7 \n"
+ " beq $0, %1, 9f \n"
+ " dextu %1, %0, 40, 8 \n"
+ "6: sb %1, 0(%2) \n"
+ " daddiu %2, %2, 1 \n"
+ " andi %1, %2, 0x7 \n"
+ " beq $0, %1, 9f \n"
+ " dextu %1, %0, 48, 8 \n"
+ "7: sb %1, 0(%2) \n"
+ " daddiu %2, %2, 1 \n"
+ " andi %1, %2, 0x7 \n"
+ " beq $0, %1, 9f \n"
+ " dextu %1, %0, 56, 8 \n"
+ "0: sb %1, 0(%2) \n"
+#else /* !CONFIG_CPU_LITTLE_ENDIAN */
+ " dext %1, %0, 0, 8 \n"
+ "1: sb %1, 0(%2) \n"
+ " andi %1, %2, 0x7 \n"
+ " beq $0, %1, 9f \n"
+ " daddiu %2, %2, -1 \n"
+ " dext %1, %0, 8, 8 \n"
+ "2: sb %1, 0(%2) \n"
+ " andi %1, %2, 0x7 \n"
+ " beq $0, %1, 9f \n"
+ " daddiu %2, %2, -1 \n"
+ " dext %1, %0, 16, 8 \n"
+ "3: sb %1, 0(%2) \n"
+ " andi %1, %2, 0x7 \n"
+ " beq $0, %1, 9f \n"
+ " daddiu %2, %2, -1 \n"
+ " dext %1, %0, 24, 8 \n"
+ "4: sb %1, 0(%2) \n"
+ " andi %1, %2, 0x7 \n"
+ " beq $0, %1, 9f \n"
+ " daddiu %2, %2, -1 \n"
+ " dextu %1, %0, 32, 8 \n"
+ "5: sb %1, 0(%2) \n"
+ " andi %1, %2, 0x7 \n"
+ " beq $0, %1, 9f \n"
+ " daddiu %2, %2, -1 \n"
+ " dextu %1, %0, 40, 8 \n"
+ "6: sb %1, 0(%2) \n"
+ " andi %1, %2, 0x7 \n"
+ " beq $0, %1, 9f \n"
+ " daddiu %2, %2, -1 \n"
+ " dextu %1, %0, 48, 8 \n"
+ "7: sb %1, 0(%2) \n"
+ " andi %1, %2, 0x7 \n"
+ " beq $0, %1, 9f \n"
+ " daddiu %2, %2, -1 \n"
+ " dextu %1, %0, 56, 8 \n"
+ "0: sb %1, 0(%2) \n"
+#endif /* CONFIG_CPU_LITTLE_ENDIAN */
+ "9: \n"
+ " .insn \n"
+ " .section .fixup,\"ax\" \n"
+ "8: li %3,%4 \n"
+ " j 9b \n"
+ " .previous \n"
+ " .section __ex_table,\"a\"\n"
+ __stringify(PTR) " 1b,8b \n"
+ __stringify(PTR) " 2b,8b \n"
+ __stringify(PTR) " 3b,8b \n"
+ __stringify(PTR) " 4b,8b \n"
+ __stringify(PTR) " 5b,8b \n"
+ __stringify(PTR) " 6b,8b \n"
+ __stringify(PTR) " 7b,8b \n"
+ __stringify(PTR) " 0b,8b \n"
+ " .previous \n"
+ " .set pop \n"
+ : "+&r"(rt), "=&r"(rs),
+ "+&r"(vaddr), "+&r"(err)
+ : "i"(SIGSEGV)
+ : "memory");
+
+ MIPS_R2_STATS(stores);
+ break;
+#endif
+ case ll_op:
+ vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
+ if (vaddr & 0x3) {
+ current->thread.cp0_baduaddr = vaddr;
+ err = SIGBUS;
+ break;
+ }
+ if (!access_ok(VERIFY_READ, vaddr, 4)) {
+ current->thread.cp0_baduaddr = vaddr;
+ err = SIGBUS;
+ break;
+ }
+ __asm__ __volatile__(
+ "1: \n"
+ LL " %0, 0(%2) \n"
+ "2: \n"
+ ".insn \n"
+ ".section .fixup,\"ax\" \n"
+ "3: \n"
+ "li %1, %3 \n"
+ "j 2b \n"
+ ".previous \n"
+ ".section __ex_table,\"a\" \n"
+ __stringify(PTR) " 1b,3b \n"
+ ".previous \n"
+ :"=&r"(res), "+&r"(err)
+ :"r"(vaddr), "i"(SIGSEGV)
+ :"memory");
+ if (MIPSInst_RT(inst) && !err)
+ regs->regs[MIPSInst_RT(inst)] = res;
+ MIPS_R2_STATS(llsc);
+ break;
+
+ case sc_op:
+ vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
+ if (vaddr & 0x3) {
+ current->thread.cp0_baduaddr = vaddr;
+ err = SIGBUS;
+ break;
+ }
+ if (!access_ok(VERIFY_WRITE, vaddr, 4)) {
+ current->thread.cp0_baduaddr = vaddr;
+ err = SIGBUS;
+ break;
+ }
+ res = regs->regs[MIPSInst_RT(inst)];
+ __asm__ __volatile__(
+ "1: \n"
+ SC " %0, 0(%2) \n"
+ "2: \n"
+ ".insn \n"
+ ".section .fixup,\"ax\" \n"
+ "3: \n"
+ "li %1, %3 \n"
+ "j 2b \n"
+ ".previous \n"
+ ".section __ex_table,\"a\" \n"
+ __stringify(PTR) " 1b,3b \n"
+ ".previous \n"
+ :"+&r"(res), "+&r"(err)
+ :"r"(vaddr), "i"(SIGSEGV)
+ :"memory");
+ if (MIPSInst_RT(inst) && !err)
+ regs->regs[MIPSInst_RT(inst)] = res;
+ MIPS_R2_STATS(llsc);
+ break;
+
+#ifdef CONFIG_64BIT
+ case lld_op:
+ vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
+ if (vaddr & 0x7) {
+ current->thread.cp0_baduaddr = vaddr;
+ err = SIGBUS;
+ break;
+ }
+ if (!access_ok(VERIFY_READ, vaddr, 8)) {
+ current->thread.cp0_baduaddr = vaddr;
+ err = SIGBUS;
+ break;
+ }
+ __asm__ __volatile__(
+ "1: \n"
+ "lld %0, 0(%2) \n"
+ "2: \n"
+ ".insn \n"
+ ".section .fixup,\"ax\" \n"
+ "3: \n"
+ "li %1, %3 \n"
+ "j 2b \n"
+ ".previous \n"
+ ".section __ex_table,\"a\" \n"
+ __stringify(PTR) " 1b,3b \n"
+ ".previous \n"
+ :"=&r"(res), "+&r"(err)
+ :"r"(vaddr), "i"(SIGSEGV)
+ :"memory");
+ if (MIPSInst_RT(inst) && !err)
+ regs->regs[MIPSInst_RT(inst)] = res;
+ MIPS_R2_STATS(llsc);
+ break;
+
+ case scd_op:
+ vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
+ if (vaddr & 0x7) {
+ current->thread.cp0_baduaddr = vaddr;
+ err = SIGBUS;
+ break;
+ }
+ if (!access_ok(VERIFY_WRITE, vaddr, 8)) {
+ current->thread.cp0_baduaddr = vaddr;
+ err = SIGBUS;
+ break;
+ }
+ res = regs->regs[MIPSInst_RT(inst)];
+ __asm__ __volatile__(
+ "1: \n"
+ "scd %0, 0(%2) \n"
+ "2: \n"
+ ".insn \n"
+ ".section .fixup,\"ax\" \n"
+ "3: \n"
+ "li %1, %3 \n"
+ "j 2b \n"
+ ".previous \n"
+ ".section __ex_table,\"a\" \n"
+ __stringify(PTR) " 1b,3b \n"
+ ".previous \n"
+ :"+&r"(res), "+&r"(err)
+ :"r"(vaddr), "i"(SIGSEGV)
+ :"memory");
+ if (MIPSInst_RT(inst) && !err)
+ regs->regs[MIPSInst_RT(inst)] = res;
+ MIPS_R2_STATS(llsc);
+ break;
+#endif
+ case pref_op:
+ /* skip it */
+ break;
+
+ default:
+ err = SIGILL;
+ break;
+ }
+
+ if ((!err) && (pass++ < TOTAL_PASS)) {
+ regs->cp0_cause &= ~CAUSEF_BD;
+ err = get_user(inst, (u32 __user *)regs->cp0_epc);
+ if (!err)
+ goto repeat;
+ if (err < 0)
+ err = SIGSEGV;
+ }
+ if (err && (err != SIGEMT)) {
+ regs->regs[31] = r31;
+ regs->cp0_epc = epc;
+ }
+ /* it can be MIPS R6 instruction */
+ if (pass && (err == SIGILL))
+ err = 0;
+ return(err);
+}
+
+#ifdef CONFIG_DEBUG_FS
+
+static int mipsr2_stats_show(struct seq_file *s, void *unused)
+{
+
+ seq_printf(s, "Instruction\tTotal\tBDslot\n------------------------------\n");
+ seq_printf(s, "movs\t\t%ld\t%ld\n",(unsigned long)__get_cpu_var(mipsr2emustats).movs.a.counter,
+ (unsigned long)__get_cpu_var(mipsr2bdemustats).movs.a.counter);
+ seq_printf(s, "hilo\t\t%ld\t%ld\n",(unsigned long)__get_cpu_var(mipsr2emustats).hilo.a.counter,
+ (unsigned long)__get_cpu_var(mipsr2bdemustats).hilo.a.counter);
+ seq_printf(s, "muls\t\t%ld\t%ld\n",(unsigned long)__get_cpu_var(mipsr2emustats).muls.a.counter,
+ (unsigned long)__get_cpu_var(mipsr2bdemustats).muls.a.counter);
+ seq_printf(s, "divs\t\t%ld\t%ld\n",(unsigned long)__get_cpu_var(mipsr2emustats).divs.a.counter,
+ (unsigned long)__get_cpu_var(mipsr2bdemustats).divs.a.counter);
+ seq_printf(s, "dsps\t\t%ld\t%ld\n",(unsigned long)__get_cpu_var(mipsr2emustats).dsps.a.counter,
+ (unsigned long)__get_cpu_var(mipsr2bdemustats).dsps.a.counter);
+ seq_printf(s, "bops\t\t%ld\t%ld\n",(unsigned long)__get_cpu_var(mipsr2emustats).bops.a.counter,
+ (unsigned long)__get_cpu_var(mipsr2bdemustats).bops.a.counter);
+ seq_printf(s, "traps\t\t%ld\t%ld\n",(unsigned long)__get_cpu_var(mipsr2emustats).traps.a.counter,
+ (unsigned long)__get_cpu_var(mipsr2bdemustats).traps.a.counter);
+ seq_printf(s, "fpus\t\t%ld\t%ld\n",(unsigned long)__get_cpu_var(mipsr2emustats).fpus.a.counter,
+ (unsigned long)__get_cpu_var(mipsr2bdemustats).fpus.a.counter);
+ seq_printf(s, "loads\t\t%ld\t%ld\n",(unsigned long)__get_cpu_var(mipsr2emustats).loads.a.counter,
+ (unsigned long)__get_cpu_var(mipsr2bdemustats).loads.a.counter);
+ seq_printf(s, "stores\t\t%ld\t%ld\n",(unsigned long)__get_cpu_var(mipsr2emustats).stores.a.counter,
+ (unsigned long)__get_cpu_var(mipsr2bdemustats).stores.a.counter);
+ seq_printf(s, "llsc\t\t%ld\t%ld\n",(unsigned long)__get_cpu_var(mipsr2emustats).llsc.a.counter,
+ (unsigned long)__get_cpu_var(mipsr2bdemustats).llsc.a.counter);
+ seq_printf(s, "dsemul\t\t%ld\t%ld\n",(unsigned long)__get_cpu_var(mipsr2emustats).dsemul.a.counter,
+ (unsigned long)__get_cpu_var(mipsr2bdemustats).dsemul.a.counter);
+
+ seq_printf(s, "jr\t\t%ld\n",(unsigned long)__get_cpu_var(mipsr2bremustats).jrs.a.counter);
+ seq_printf(s, "bltzl\t\t%ld\n",(unsigned long)__get_cpu_var(mipsr2bremustats).bltzl.a.counter);
+ seq_printf(s, "bgezl\t\t%ld\n",(unsigned long)__get_cpu_var(mipsr2bremustats).bgezl.a.counter);
+ seq_printf(s, "bltzll\t\t%ld\n",(unsigned long)__get_cpu_var(mipsr2bremustats).bltzll.a.counter);
+ seq_printf(s, "bgezll\t\t%ld\n",(unsigned long)__get_cpu_var(mipsr2bremustats).bgezll.a.counter);
+ seq_printf(s, "bltzal\t\t%ld\n",(unsigned long)__get_cpu_var(mipsr2bremustats).bltzal.a.counter);
+ seq_printf(s, "bgezal\t\t%ld\n",(unsigned long)__get_cpu_var(mipsr2bremustats).bgezal.a.counter);
+ seq_printf(s, "beql\t\t%ld\n",(unsigned long)__get_cpu_var(mipsr2bremustats).beql.a.counter);
+ seq_printf(s, "bnel\t\t%ld\n",(unsigned long)__get_cpu_var(mipsr2bremustats).bnel.a.counter);
+ seq_printf(s, "blezl\t\t%ld\n",(unsigned long)__get_cpu_var(mipsr2bremustats).blezl.a.counter);
+ seq_printf(s, "bgtzl\t\t%ld\n",(unsigned long)__get_cpu_var(mipsr2bremustats).bgtzl.a.counter);
+
+ return 0;
+}
+
+static int mipsr2_stats_clear_show(struct seq_file *s, void *unused)
+{
+ mipsr2_stats_show(s, unused);
+
+ __get_cpu_var(mipsr2emustats).movs.a.counter = 0;
+ __get_cpu_var(mipsr2bdemustats).movs.a.counter = 0;
+ __get_cpu_var(mipsr2emustats).hilo.a.counter = 0;
+ __get_cpu_var(mipsr2bdemustats).hilo.a.counter = 0;
+ __get_cpu_var(mipsr2emustats).muls.a.counter = 0;
+ __get_cpu_var(mipsr2bdemustats).muls.a.counter = 0;
+ __get_cpu_var(mipsr2emustats).divs.a.counter = 0;
+ __get_cpu_var(mipsr2bdemustats).divs.a.counter = 0;
+ __get_cpu_var(mipsr2emustats).dsps.a.counter = 0;
+ __get_cpu_var(mipsr2bdemustats).dsps.a.counter = 0;
+ __get_cpu_var(mipsr2emustats).bops.a.counter = 0;
+ __get_cpu_var(mipsr2bdemustats).bops.a.counter = 0;
+ __get_cpu_var(mipsr2emustats).traps.a.counter = 0;
+ __get_cpu_var(mipsr2bdemustats).traps.a.counter = 0;
+ __get_cpu_var(mipsr2emustats).fpus.a.counter = 0;
+ __get_cpu_var(mipsr2bdemustats).fpus.a.counter = 0;
+ __get_cpu_var(mipsr2emustats).loads.a.counter = 0;
+ __get_cpu_var(mipsr2bdemustats).loads.a.counter = 0;
+ __get_cpu_var(mipsr2emustats).stores.a.counter = 0;
+ __get_cpu_var(mipsr2bdemustats).stores.a.counter= 0;
+ __get_cpu_var(mipsr2emustats).llsc.a.counter = 0;
+ __get_cpu_var(mipsr2bdemustats).llsc.a.counter = 0;
+ __get_cpu_var(mipsr2emustats).dsemul.a.counter = 0;
+ __get_cpu_var(mipsr2bdemustats).dsemul.a.counter= 0;
+
+ __get_cpu_var(mipsr2bremustats).jrs.a.counter = 0;
+ __get_cpu_var(mipsr2bremustats).bltzl.a.counter = 0;
+ __get_cpu_var(mipsr2bremustats).bgezl.a.counter = 0;
+ __get_cpu_var(mipsr2bremustats).bltzll.a.counter= 0;
+ __get_cpu_var(mipsr2bremustats).bgezll.a.counter= 0;
+ __get_cpu_var(mipsr2bremustats).bltzal.a.counter= 0;
+ __get_cpu_var(mipsr2bremustats).bgezal.a.counter= 0;
+ __get_cpu_var(mipsr2bremustats).beql.a.counter = 0;
+ __get_cpu_var(mipsr2bremustats).bnel.a.counter = 0;
+ __get_cpu_var(mipsr2bremustats).blezl.a.counter = 0;
+ __get_cpu_var(mipsr2bremustats).bgtzl.a.counter = 0;
+
+ return 0;
+}
+
+static int mipsr2_stats_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, mipsr2_stats_show, inode->i_private);
+}
+
+static int mipsr2_stats_clear_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, mipsr2_stats_clear_show, inode->i_private);
+}
+
+static const struct file_operations mipsr2_fops = {
+ .open = mipsr2_stats_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static const struct file_operations mipsr2_clear_fops = {
+ .open = mipsr2_stats_clear_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+
+int mipsr2_init_debugfs(void)
+{
+ struct dentry *root;
+ struct dentry *file;
+ int ret;
+
+ root = debugfs_create_dir("mipsr2-emulation", NULL);
+ if (!root) {
+ ret = -ENOMEM;
+ goto err0;
+ }
+
+ file = debugfs_create_file("stats", S_IRUGO, root, NULL,
+ &mipsr2_fops);
+ if (!file) {
+ ret = -ENOMEM;
+ goto err1;
+ }
+
+ file = debugfs_create_file("stats-clear", S_IRUGO, root, NULL,
+ &mipsr2_clear_fops);
+ if (!file) {
+ ret = -ENOMEM;
+ goto err1;
+ }
+
+ return 0;
+
+err1:
+ debugfs_remove_recursive(root);
+
+err0:
+ return ret;
+}
+__initcall(mipsr2_init_debugfs);
+
+#endif /* CONFIG_DEBUG_FS */
diff --git a/arch/mips/kernel/mips_ksyms.c b/arch/mips/kernel/mips_ksyms.c
index 6e58e97..d89af71 100644
--- a/arch/mips/kernel/mips_ksyms.c
+++ b/arch/mips/kernel/mips_ksyms.c
@@ -16,14 +16,14 @@
#include <asm/ftrace.h>
extern void *__bzero(void *__s, size_t __count);
-extern long __strncpy_from_user_nocheck_asm(char *__to,
+extern long __strncpy_from_kernel_nocheck_asm(char *__to,
const char *__from, long __len);
-extern long __strncpy_from_user_asm(char *__to, const char *__from,
+extern long __strncpy_from_kernel_asm(char *__to, const char *__from,
long __len);
-extern long __strlen_user_nocheck_asm(const char *s);
-extern long __strlen_user_asm(const char *s);
-extern long __strnlen_user_nocheck_asm(const char *s);
-extern long __strnlen_user_asm(const char *s);
+extern long __strlen_kernel_nocheck_asm(const char *s);
+extern long __strlen_kernel_asm(const char *s);
+extern long __strnlen_kernel_nocheck_asm(const char *s);
+extern long __strnlen_kernel_asm(const char *s);
/*
* String functions
@@ -44,16 +44,46 @@
EXPORT_SYMBOL(__copy_user);
EXPORT_SYMBOL(__copy_user_inatomic);
EXPORT_SYMBOL(__bzero);
-EXPORT_SYMBOL(__strncpy_from_user_nocheck_asm);
-EXPORT_SYMBOL(__strncpy_from_user_asm);
+EXPORT_SYMBOL(__strlen_kernel_nocheck_asm);
+EXPORT_SYMBOL(__strlen_kernel_asm);
+EXPORT_SYMBOL(__strnlen_kernel_nocheck_asm);
+EXPORT_SYMBOL(__strnlen_kernel_asm);
+EXPORT_SYMBOL(__strncpy_from_kernel_nocheck_asm);
+EXPORT_SYMBOL(__strncpy_from_kernel_asm);
+
+#ifdef CONFIG_EVA
+extern void *__bzero_user(void *__s, size_t __count);
+extern long __strncpy_from_user_nocheck_asm(char *__to,
+ const char *__from, long __len);
+extern long __strncpy_from_user_asm(char *__to, const char *__from,
+ long __len);
+extern long __strlen_user_nocheck_asm(const char *s);
+extern long __strlen_user_asm(const char *s);
+extern long __strnlen_user_nocheck_asm(const char *s);
+extern long __strnlen_user_asm(const char *s);
+
+EXPORT_SYMBOL(__copy_touser);
+EXPORT_SYMBOL(__copy_fromuser);
+EXPORT_SYMBOL(__copy_fromuser_inatomic);
+EXPORT_SYMBOL(__copy_inuser);
+EXPORT_SYMBOL(__bzero_user);
EXPORT_SYMBOL(__strlen_user_nocheck_asm);
EXPORT_SYMBOL(__strlen_user_asm);
EXPORT_SYMBOL(__strnlen_user_nocheck_asm);
EXPORT_SYMBOL(__strnlen_user_asm);
+EXPORT_SYMBOL(__strncpy_from_user_nocheck_asm);
+EXPORT_SYMBOL(__strncpy_from_user_asm);
+#endif
+#ifndef CONFIG_CPU_MIPSR6
EXPORT_SYMBOL(csum_partial);
EXPORT_SYMBOL(csum_partial_copy_nocheck);
EXPORT_SYMBOL(__csum_partial_copy_user);
+#ifdef CONFIG_EVA
+EXPORT_SYMBOL(__csum_partial_copy_fromuser);
+EXPORT_SYMBOL(__csum_partial_copy_touser);
+#endif
+#endif /* !CONFIG_CPU_MIPSR6 */
EXPORT_SYMBOL(invalid_pte_table);
#ifdef CONFIG_FUNCTION_TRACER
diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c
index 45f1ffc..b94274f 100644
--- a/arch/mips/kernel/perf_event_mipsxx.c
+++ b/arch/mips/kernel/perf_event_mipsxx.c
@@ -805,7 +805,7 @@
}
}
-/* 24K/34K/1004K cores can share the same event map. */
+/* 24K/34K/1004K/interAptiv/loongson1 cores share the same event map. */
static const struct mips_perf_event mipsxxcore_event_map
[PERF_COUNT_HW_MAX] = {
[PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, P },
@@ -814,8 +814,8 @@
[PERF_COUNT_HW_BRANCH_MISSES] = { 0x02, CNTR_ODD, T },
};
-/* 74K core has different branch event code. */
-static const struct mips_perf_event mipsxx74Kcore_event_map
+/* 74K/proAptiv core has different branch event code. */
+static const struct mips_perf_event mipsxxcore_event_map2
[PERF_COUNT_HW_MAX] = {
[PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, P },
[PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T },
@@ -849,7 +849,7 @@
[PERF_COUNT_HW_BRANCH_MISSES] = { 0x1c, CNTR_ALL }, /* PAPI_BR_MSP */
};
-/* 24K/34K/1004K cores can share the same cache event map. */
+/* 24K/34K/1004K/interAptiv/loongson1 cores share the same cache event map. */
static const struct mips_perf_event mipsxxcore_cache_map
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
@@ -930,8 +930,8 @@
},
};
-/* 74K core has completely different cache event map. */
-static const struct mips_perf_event mipsxx74Kcore_cache_map
+/* 74K/proAptiv core has completely different cache event map. */
+static const struct mips_perf_event mipsxxcore_cache_map2
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
@@ -971,13 +971,18 @@
[C(LL)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { 0x1c, CNTR_ODD, P },
- [C(RESULT_MISS)] = { 0x1d, CNTR_EVEN | CNTR_ODD, P },
+ [C(RESULT_MISS)] = { 0x1d, CNTR_EVEN, P },
},
[C(OP_WRITE)] = {
[C(RESULT_ACCESS)] = { 0x1c, CNTR_ODD, P },
- [C(RESULT_MISS)] = { 0x1d, CNTR_EVEN | CNTR_ODD, P },
+ [C(RESULT_MISS)] = { 0x1d, CNTR_EVEN, P },
},
},
+/*
+ * 74K core does not have specific DTLB events. proAptiv core has
+ * "speculative" DTLB events which are numbered 0x63 (even/odd) and
+ * not included here. One can use raw events if really needed.
+ */
[C(ITLB)] = {
[C(OP_READ)] = {
[C(RESULT_ACCESS)] = { 0x04, CNTR_EVEN, T },
@@ -1378,6 +1383,10 @@
#define IS_BOTH_COUNTERS_74K_EVENT(b) \
((b) == 0 || (b) == 1)
+/* proAptiv */
+#define IS_BOTH_COUNTERS_PROAPTIV_EVENT(b) \
+ ((b) == 0 || (b) == 1)
+
/* 1004K */
#define IS_BOTH_COUNTERS_1004K_EVENT(b) \
((b) == 0 || (b) == 1 || (b) == 11)
@@ -1391,6 +1400,20 @@
#define IS_RANGE_V_1004K_EVENT(r) ((r) == 47)
#endif
+/* interAptiv */
+#define IS_BOTH_COUNTERS_INTERAPTIV_EVENT(b) \
+ ((b) == 0 || (b) == 1 || (b) == 11)
+#ifdef CONFIG_MIPS_MT_SMP
+/* The P/V/T info is not provided for "(b) == 38" in SUM, assume P. */
+#define IS_RANGE_P_INTERAPTIV_EVENT(r, b) \
+ ((b) == 0 || (r) == 18 || (b) == 21 || (b) == 22 || \
+ (b) == 25 || (b) == 36 || (b) == 38 || (b) == 39 || \
+ (r) == 44 || (r) == 174 || (r) == 176 || ((b) >= 50 && \
+ (b) <= 59) || (r) == 188 || (b) == 61 || (b) == 62 || \
+ ((b) >= 64 && (b) <= 67))
+#define IS_RANGE_V_INTERAPTIV_EVENT(r) ((r) == 47 || (r) == 175)
+#endif
+
/* BMIPS5000 */
#define IS_BOTH_COUNTERS_BMIPS5000_EVENT(b) \
((b) == 0 || (b) == 1)
@@ -1451,6 +1474,16 @@
raw_event.range = P;
#endif
break;
+ case CPU_PROAPTIV:
+ if (IS_BOTH_COUNTERS_PROAPTIV_EVENT(base_id))
+ raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
+ else
+ raw_event.cntr_mask =
+ raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
+#ifdef CONFIG_MIPS_MT_SMP
+ raw_event.range = P;
+#endif
+ break;
case CPU_1004K:
if (IS_BOTH_COUNTERS_1004K_EVENT(base_id))
raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
@@ -1466,6 +1499,21 @@
raw_event.range = T;
#endif
break;
+ case CPU_INTERAPTIV:
+ if (IS_BOTH_COUNTERS_INTERAPTIV_EVENT(base_id))
+ raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
+ else
+ raw_event.cntr_mask =
+ raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
+#ifdef CONFIG_MIPS_MT_SMP
+ if (IS_RANGE_P_INTERAPTIV_EVENT(raw_id, base_id))
+ raw_event.range = P;
+ else if (unlikely(IS_RANGE_V_INTERAPTIV_EVENT(raw_id)))
+ raw_event.range = V;
+ else
+ raw_event.range = T;
+#endif
+ break;
case CPU_BMIPS5000:
if (IS_BOTH_COUNTERS_BMIPS5000_EVENT(base_id))
raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
@@ -1576,14 +1624,24 @@
break;
case CPU_74K:
mipspmu.name = "mips/74K";
- mipspmu.general_event_map = &mipsxx74Kcore_event_map;
- mipspmu.cache_event_map = &mipsxx74Kcore_cache_map;
+ mipspmu.general_event_map = &mipsxxcore_event_map2;
+ mipspmu.cache_event_map = &mipsxxcore_cache_map2;
break;
case CPU_1004K:
mipspmu.name = "mips/1004K";
mipspmu.general_event_map = &mipsxxcore_event_map;
mipspmu.cache_event_map = &mipsxxcore_cache_map;
break;
+ case CPU_INTERAPTIV:
+ mipspmu.name = "mips/interAptiv";
+ mipspmu.general_event_map = &mipsxxcore_event_map;
+ mipspmu.cache_event_map = &mipsxxcore_cache_map;
+ break;
+ case CPU_PROAPTIV:
+ mipspmu.name = "mips/proAptiv";
+ mipspmu.general_event_map = &mipsxxcore_event_map2;
+ mipspmu.cache_event_map = &mipsxxcore_cache_map2;
+ break;
case CPU_LOONGSON1:
mipspmu.name = "mips/loongson1";
mipspmu.general_event_map = &mipsxxcore_event_map;
diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c
index acb3437..ba55ec9 100644
--- a/arch/mips/kernel/proc.c
+++ b/arch/mips/kernel/proc.c
@@ -35,9 +35,19 @@
*/
if (n == 0) {
seq_printf(m, "system type\t\t: %s\n", get_system_type());
+#ifndef CONFIG_GOLDFISH
if (mips_get_machine_name())
seq_printf(m, "machine\t\t\t: %s\n",
mips_get_machine_name());
+#else
+ /*
+ * This is needed by the Android init process to run
+ * target specific startup code
+ */
+ seq_printf(m, "Hardware\t\t: %s\n", mips_get_machine_name());
+ seq_printf(m, "Revison\t\t\t: %d\n", 1);
+#endif
+
}
seq_printf(m, "processor\t\t: %ld\n", n);
@@ -81,10 +91,14 @@
seq_printf(m, "%s", " mips32r1");
if (cpu_has_mips32r2)
seq_printf(m, "%s", " mips32r2");
+ if (cpu_has_mips32r6)
+ seq_printf(m, "%s", " mips32r6");
if (cpu_has_mips64r1)
seq_printf(m, "%s", " mips64r1");
if (cpu_has_mips64r2)
seq_printf(m, "%s", " mips64r2");
+ if (cpu_has_mips64r6)
+ seq_printf(m, "%s", " mips64r6");
seq_printf(m, "\n");
}
@@ -98,22 +112,53 @@
if (cpu_has_mipsmt) seq_printf(m, "%s", " mt");
if (cpu_has_mmips) seq_printf(m, "%s", " micromips");
if (cpu_has_vz) seq_printf(m, "%s", " vz");
+ if (cpu_has_eva) seq_printf(m, "%s", " eva");
+ if (cpu_has_htw) seq_printf(m, "%s", " htw");
+ if (cpu_has_msa) seq_printf(m, "%s", " msa");
seq_printf(m, "\n");
- if (cpu_has_mmips) {
- seq_printf(m, "micromips kernel\t: %s\n",
- (read_c0_config3() & MIPS_CONF3_ISA_OE) ? "yes" : "no");
- }
+
seq_printf(m, "shadow register sets\t: %d\n",
cpu_data[n].srsets);
seq_printf(m, "kscratch registers\t: %d\n",
hweight8(cpu_data[n].kscratch_mask));
seq_printf(m, "core\t\t\t: %d\n", cpu_data[n].core);
+#if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC)
+ if (cpu_has_mipsmt) {
+ seq_printf(m, "VPE\t\t\t: %d\n", cpu_data[n].vpe_id);
+#if defined(CONFIG_MIPS_MT_SMTC)
+ seq_printf(m, "TC\t\t\t: %d\n", cpu_data[n].tc_id);
+#endif
+ }
+#endif
sprintf(fmt, "VCE%%c exceptions\t\t: %s\n",
cpu_has_vce ? "%u" : "not available");
seq_printf(m, fmt, 'D', vced_count);
seq_printf(m, fmt, 'I', vcei_count);
+
+ seq_printf(m, "kernel modes\t\t:");
+#ifdef CONFIG_64BIT
+ seq_printf(m, " 64bit");
+#else
+ seq_printf(m, " 32bit");
+#endif
+#ifdef CONFIG_64BIT_PHYS_ADDR
+ seq_printf(m, " 64bit-address");
+#endif
+#ifdef CONFIG_EVA
+ seq_printf(m, " eva");
+#endif
+#ifdef CONFIG_HIGHMEM
+ seq_printf(m, " highmem");
+#endif
+ if (cpu_has_mmips && (read_c0_config3() & MIPS_CONF3_ISA_OE))
+ seq_printf(m, " micromips");
+#ifdef CONFIG_SMP
+ seq_printf(m, " smp");
+#endif
+ seq_printf(m, "\n");
+
seq_printf(m, "\n");
return 0;
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index c6a041d..d005082 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -32,6 +32,7 @@
#include <asm/cpu.h>
#include <asm/dsp.h>
#include <asm/fpu.h>
+#include <asm/msa.h>
#include <asm/pgtable.h>
#include <asm/mipsregs.h>
#include <asm/processor.h>
@@ -41,6 +42,7 @@
#include <asm/isadep.h>
#include <asm/inst.h>
#include <asm/stacktrace.h>
+#include <asm/vdso.h>
#ifdef CONFIG_HOTPLUG_CPU
void arch_cpu_idle_dead(void)
@@ -58,29 +60,56 @@
{
unsigned long status;
+ mips_thread_vdso(current_thread_info());
+
/* New thread loses kernel privileges. */
status = regs->cp0_status & ~(ST0_CU0|ST0_CU1|ST0_FR|KU_MASK);
-#ifdef CONFIG_64BIT
- status |= test_thread_flag(TIF_32BIT_REGS) ? 0 : ST0_FR;
-#endif
status |= KU_USER;
regs->cp0_status = status;
clear_used_math();
clear_fpu_owner();
- if (cpu_has_dsp)
- __init_dsp();
+ init_dsp();
+ clear_thread_flag(TIF_USEDMSA);
+ clear_thread_flag(TIF_MSA_CTX_LIVE);
+ disable_msa();
regs->cp0_epc = pc;
regs->regs[29] = sp;
}
void exit_thread(void)
{
+ arch_release_thread_info(current_thread_info());
}
void flush_thread(void)
{
}
+int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
+{
+ /*
+ * Save any process state which is live in hardware registers to the
+ * parent context prior to duplication. This prevents the new child
+ * state becoming stale if the parent is preempted before copy_thread()
+ * gets a chance to save the parent's live hardware registers to the
+ * child context.
+ */
+ preempt_disable();
+
+ if (is_msa_enabled())
+ save_msa(current);
+ else if (is_fpu_owner())
+ _save_fp(current);
+
+ if (cpu_has_dsp)
+ save_dsp(current);
+
+ preempt_enable();
+
+ *dst = *src;
+ return 0;
+}
+
int copy_thread(unsigned long clone_flags, unsigned long usp,
unsigned long arg, struct task_struct *p)
{
@@ -91,15 +120,8 @@
childksp = (unsigned long)task_stack_page(p) + THREAD_SIZE - 32;
- preempt_disable();
-
- if (is_fpu_owner())
- save_fp(p);
-
- if (cpu_has_dsp)
- save_dsp(p);
-
- preempt_enable();
+ ti->vdso_page = NULL;
+ mips_thread_vdso(ti);
/* set up new TSS. */
childregs = (struct pt_regs *) childksp - 1;
@@ -147,6 +169,8 @@
childregs->cp0_tcstatus &= ~(ST0_CU2|ST0_CU1);
#endif
clear_tsk_thread_flag(p, TIF_USEDFPU);
+ clear_tsk_thread_flag(p, TIF_USEDMSA);
+ clear_tsk_thread_flag(p, TIF_MSA_CTX_LIVE);
#ifdef CONFIG_MIPS_MT_FPAFF
clear_tsk_thread_flag(p, TIF_FPUBOUND);
@@ -161,7 +185,13 @@
/* Fill in the fpu structure for a core dump.. */
int dump_fpu(struct pt_regs *regs, elf_fpregset_t *r)
{
- memcpy(r, ¤t->thread.fpu, sizeof(current->thread.fpu));
+ int i;
+ elf_greg_t fr, *p = (elf_greg_t *)r;
+
+ for (i = 0; i < NUM_FPU_REGS; i++) {
+ fr = get_fpr64(¤t->thread.fpu.fpr[i], 0);
+ p[i] = fr;
+ }
return 1;
}
@@ -196,7 +226,13 @@
int dump_task_fpu(struct task_struct *t, elf_fpregset_t *fpr)
{
- memcpy(fpr, &t->thread.fpu, sizeof(current->thread.fpu));
+ int i;
+ elf_greg_t fr, *p = (elf_greg_t *)fpr;
+
+ for (i = 0; i < NUM_FPU_REGS; i++) {
+ fr = get_fpr64(&t->thread.fpu.fpr[i], 0);
+ p[i] = fr;
+ }
return 1;
}
@@ -282,10 +318,20 @@
return 1;
if (ip->j_format.opcode == jal_op)
return 1;
+#ifdef CONFIG_CPU_MIPSR6
+ if (((ip->i_format.opcode == jump_op) || /* jic */
+ (ip->i_format.opcode == jump2_op)) && /* jialc */
+ (ip->i_format.rs == 0))
+ return 1;
+ if (ip->r_format.opcode != spec_op)
+ return 0;
+ return ((ip->r_format.func == jalr_op) && !ip->r_format.rt);
+#else
if (ip->r_format.opcode != spec_op)
return 0;
return ip->r_format.func == jalr_op || ip->r_format.func == jr_op;
#endif
+#endif
}
static inline int is_sp_move_ins(union mips_instruction *ip)
diff --git a/arch/mips/kernel/prom.c b/arch/mips/kernel/prom.c
index 5712bb5..3b0bf5d 100644
--- a/arch/mips/kernel/prom.c
+++ b/arch/mips/kernel/prom.c
@@ -58,8 +58,7 @@
}
#ifdef CONFIG_BLK_DEV_INITRD
-void __init early_init_dt_setup_initrd_arch(unsigned long start,
- unsigned long end)
+void __init early_init_dt_setup_initrd_arch(u64 start, u64 end)
{
initrd_start = (unsigned long)__va(start);
initrd_end = (unsigned long)__va(end);
@@ -102,7 +101,11 @@
void __init __dt_setup_arch(struct boot_param_header *bph)
{
if (be32_to_cpu(bph->magic) != OF_DT_HEADER) {
+#ifdef CONFIG_MIPS_APPENDED_DTB
+ pr_err("DTB has bad magic, ignoring appended OF DTB\n");
+#else
pr_err("DTB has bad magic, ignoring builtin OF DTB\n");
+#endif
return;
}
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
index 9c6299c..bb7523f 100644
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
@@ -105,60 +105,42 @@
int ptrace_getfpregs(struct task_struct *child, __u32 __user *data)
{
int i;
- unsigned int tmp;
if (!access_ok(VERIFY_WRITE, data, 33 * 8))
return -EIO;
if (tsk_used_math(child)) {
- fpureg_t *fregs = get_fpu_regs(child);
- for (i = 0; i < 32; i++)
- __put_user(fregs[i], i + (__u64 __user *) data);
+ union fpureg *fregs = get_fpu_regs(child);
+ for (i = 0; i < NUM_FPU_REGS; i++)
+ __put_user(get_fpr64(&fregs[i], 0),
+ i + (__u64 __user *)data);
} else {
- for (i = 0; i < 32; i++)
+ for (i = 0; i < NUM_FPU_REGS; i++)
__put_user((__u64) -1, i + (__u64 __user *) data);
}
__put_user(child->thread.fpu.fcr31, data + 64);
-
- preempt_disable();
- if (cpu_has_fpu) {
- unsigned int flags;
-
- if (cpu_has_mipsmt) {
- unsigned int vpflags = dvpe();
- flags = read_c0_status();
- __enable_fpu();
- __asm__ __volatile__("cfc1\t%0,$0" : "=r" (tmp));
- write_c0_status(flags);
- evpe(vpflags);
- } else {
- flags = read_c0_status();
- __enable_fpu();
- __asm__ __volatile__("cfc1\t%0,$0" : "=r" (tmp));
- write_c0_status(flags);
- }
- } else {
- tmp = 0;
- }
- preempt_enable();
- __put_user(tmp, data + 65);
+ __put_user(current_cpu_data.fpu_id, data + 65);
return 0;
}
int ptrace_setfpregs(struct task_struct *child, __u32 __user *data)
{
- fpureg_t *fregs;
+ union fpureg *fregs;
+ u64 fpr_val;
int i;
if (!access_ok(VERIFY_READ, data, 33 * 8))
return -EIO;
+ init_fp_ctx(child);
fregs = get_fpu_regs(child);
- for (i = 0; i < 32; i++)
- __get_user(fregs[i], i + (__u64 __user *) data);
+ for (i = 0; i < NUM_FPU_REGS; i++) {
+ __get_user(fpr_val, i + (__u64 __user *)data);
+ set_fpr64(&fregs[i], 0, fpr_val);
+ }
__get_user(child->thread.fpu.fcr31, data + 64);
@@ -222,14 +204,14 @@
for (i = 0; i < current_cpu_data.watch_reg_use_cnt; i++) {
__get_user(lt[i], &addr->WATCH_STYLE.watchlo[i]);
#ifdef CONFIG_32BIT
- if (lt[i] & __UA_LIMIT)
+ if (lt[i] & USER_DS.seg)
return -EINVAL;
#else
if (test_tsk_thread_flag(child, TIF_32BIT_ADDR)) {
if (lt[i] & 0xffffffff80000000UL)
return -EINVAL;
} else {
- if (lt[i] & __UA_LIMIT)
+ if (lt[i] & USER_DS.seg)
return -EINVAL;
}
#endif
@@ -272,6 +254,7 @@
/* Read the word at location addr in the USER area. */
case PTRACE_PEEKUSR: {
struct pt_regs *regs;
+ union fpureg *fregs;
unsigned long tmp = 0;
regs = task_pt_regs(child);
@@ -282,26 +265,26 @@
tmp = regs->regs[addr];
break;
case FPR_BASE ... FPR_BASE + 31:
- if (tsk_used_math(child)) {
- fpureg_t *fregs = get_fpu_regs(child);
+ if (!tsk_used_math(child)) {
+ /* FP not yet used */
+ tmp = -1;
+ break;
+ }
+ fregs = get_fpu_regs(child);
#ifdef CONFIG_32BIT
+ if (!test_thread_local_flags(LTIF_FPU_FR)) {
/*
* The odd registers are actually the high
* order bits of the values stored in the even
* registers - unless we're using r2k_switch.S.
*/
- if (addr & 1)
- tmp = (unsigned long) (fregs[((addr & ~1) - 32)] >> 32);
- else
- tmp = (unsigned long) (fregs[(addr - 32)] & 0xffffffff);
-#endif
-#ifdef CONFIG_64BIT
- tmp = fregs[addr - FPR_BASE];
-#endif
- } else {
- tmp = -1; /* FP not yet used */
+ tmp = get_fpr32(&fregs[(addr & ~1) - FPR_BASE],
+ addr & 1);
+ break;
}
+#endif
+ tmp = get_fpr32(&fregs[addr - FPR_BASE], 0);
break;
case PC:
tmp = regs->cp0_epc;
@@ -326,44 +309,11 @@
case FPC_CSR:
tmp = child->thread.fpu.fcr31;
break;
- case FPC_EIR: { /* implementation / version register */
- unsigned int flags;
-#ifdef CONFIG_MIPS_MT_SMTC
- unsigned long irqflags;
- unsigned int mtflags;
-#endif /* CONFIG_MIPS_MT_SMTC */
-
- preempt_disable();
- if (!cpu_has_fpu) {
- preempt_enable();
- break;
- }
-
-#ifdef CONFIG_MIPS_MT_SMTC
- /* Read-modify-write of Status must be atomic */
- local_irq_save(irqflags);
- mtflags = dmt();
-#endif /* CONFIG_MIPS_MT_SMTC */
- if (cpu_has_mipsmt) {
- unsigned int vpflags = dvpe();
- flags = read_c0_status();
- __enable_fpu();
- __asm__ __volatile__("cfc1\t%0,$0": "=r" (tmp));
- write_c0_status(flags);
- evpe(vpflags);
- } else {
- flags = read_c0_status();
- __enable_fpu();
- __asm__ __volatile__("cfc1\t%0,$0": "=r" (tmp));
- write_c0_status(flags);
- }
-#ifdef CONFIG_MIPS_MT_SMTC
- emt(mtflags);
- local_irq_restore(irqflags);
-#endif /* CONFIG_MIPS_MT_SMTC */
- preempt_enable();
+ case FPC_EIR:
+ /* implementation / version register */
+ tmp = current_cpu_data.fpu_id;
break;
- }
+#ifndef CONFIG_CPU_MIPSR6
case DSP_BASE ... DSP_BASE + 5: {
dspreg_t *dregs;
@@ -384,6 +334,7 @@
}
tmp = child->thread.dsp.dspcontrol;
break;
+#endif /* CONFIG_CPU_MIPSR6 */
default:
tmp = 0;
ret = -EIO;
@@ -409,31 +360,22 @@
regs->regs[addr] = data;
break;
case FPR_BASE ... FPR_BASE + 31: {
- fpureg_t *fregs = get_fpu_regs(child);
+ union fpureg *fregs = get_fpu_regs(child);
- if (!tsk_used_math(child)) {
- /* FP not yet used */
- memset(&child->thread.fpu, ~0,
- sizeof(child->thread.fpu));
- child->thread.fpu.fcr31 = 0;
- }
+ init_fp_ctx(child);
#ifdef CONFIG_32BIT
- /*
- * The odd registers are actually the high order bits
- * of the values stored in the even registers - unless
- * we're using r2k_switch.S.
- */
- if (addr & 1) {
- fregs[(addr & ~1) - FPR_BASE] &= 0xffffffff;
- fregs[(addr & ~1) - FPR_BASE] |= ((unsigned long long) data) << 32;
- } else {
- fregs[addr - FPR_BASE] &= ~0xffffffffLL;
- fregs[addr - FPR_BASE] |= data;
+ if (!test_thread_local_flags(LTIF_FPU_FR)) {
+ /*
+ * The odd registers are actually the high
+ * order bits of the values stored in the even
+ * registers - unless we're using r2k_switch.S.
+ */
+ set_fpr32(&fregs[(addr & ~1) - FPR_BASE],
+ addr & 1, data);
+ break;
}
#endif
-#ifdef CONFIG_64BIT
- fregs[addr - FPR_BASE] = data;
-#endif
+ set_fpr64(&fregs[addr - FPR_BASE], 0, data);
break;
}
case PC:
@@ -453,6 +395,7 @@
case FPC_CSR:
child->thread.fpu.fcr31 = data;
break;
+#ifndef CONFIG_CPU_MIPSR6
case DSP_BASE ... DSP_BASE + 5: {
dspreg_t *dregs;
@@ -472,6 +415,7 @@
}
child->thread.dsp.dspcontrol = data;
break;
+#endif /* CONFIG_CPU_MIPSR6 */
default:
/* The rest are not allowed. */
ret = -EIO;
diff --git a/arch/mips/kernel/ptrace32.c b/arch/mips/kernel/ptrace32.c
index 9486055..10bd16f 100644
--- a/arch/mips/kernel/ptrace32.c
+++ b/arch/mips/kernel/ptrace32.c
@@ -80,6 +80,7 @@
/* Read the word at location addr in the USER area. */
case PTRACE_PEEKUSR: {
struct pt_regs *regs;
+ union fpureg *fregs;
unsigned int tmp;
regs = task_pt_regs(child);
@@ -90,21 +91,23 @@
tmp = regs->regs[addr];
break;
case FPR_BASE ... FPR_BASE + 31:
- if (tsk_used_math(child)) {
- fpureg_t *fregs = get_fpu_regs(child);
-
+ if (!tsk_used_math(child)) {
+ /* FP not yet used */
+ tmp = -1;
+ break;
+ }
+ fregs = get_fpu_regs(child);
+ if (!test_thread_local_flags(LTIF_FPU_FR)) {
/*
* The odd registers are actually the high
* order bits of the values stored in the even
* registers - unless we're using r2k_switch.S.
*/
- if (addr & 1)
- tmp = (unsigned long) (fregs[((addr & ~1) - 32)] >> 32);
- else
- tmp = (unsigned long) (fregs[(addr - 32)] & 0xffffffff);
- } else {
- tmp = -1; /* FP not yet used */
+ tmp = get_fpr32(&fregs[(addr & ~1) - FPR_BASE],
+ addr & 1);
+ break;
}
+ tmp = get_fpr32(&fregs[addr - FPR_BASE], 0);
break;
case PC:
tmp = regs->cp0_epc;
@@ -124,46 +127,11 @@
case FPC_CSR:
tmp = child->thread.fpu.fcr31;
break;
- case FPC_EIR: { /* implementation / version register */
- unsigned int flags;
-#ifdef CONFIG_MIPS_MT_SMTC
- unsigned int irqflags;
- unsigned int mtflags;
-#endif /* CONFIG_MIPS_MT_SMTC */
-
- preempt_disable();
- if (!cpu_has_fpu) {
- preempt_enable();
- tmp = 0;
- break;
- }
-
-#ifdef CONFIG_MIPS_MT_SMTC
- /* Read-modify-write of Status must be atomic */
- local_irq_save(irqflags);
- mtflags = dmt();
-#endif /* CONFIG_MIPS_MT_SMTC */
-
- if (cpu_has_mipsmt) {
- unsigned int vpflags = dvpe();
- flags = read_c0_status();
- __enable_fpu();
- __asm__ __volatile__("cfc1\t%0,$0": "=r" (tmp));
- write_c0_status(flags);
- evpe(vpflags);
- } else {
- flags = read_c0_status();
- __enable_fpu();
- __asm__ __volatile__("cfc1\t%0,$0": "=r" (tmp));
- write_c0_status(flags);
- }
-#ifdef CONFIG_MIPS_MT_SMTC
- emt(mtflags);
- local_irq_restore(irqflags);
-#endif /* CONFIG_MIPS_MT_SMTC */
- preempt_enable();
+ case FPC_EIR:
+ /* implementation / version register */
+ tmp = current_cpu_data.fpu_id;
break;
- }
+#ifndef CONFIG_CPU_MIPSR6
case DSP_BASE ... DSP_BASE + 5: {
dspreg_t *dregs;
@@ -184,6 +152,7 @@
}
tmp = child->thread.dsp.dspcontrol;
break;
+#endif
default:
tmp = 0;
ret = -EIO;
@@ -228,28 +197,21 @@
regs->regs[addr] = data;
break;
case FPR_BASE ... FPR_BASE + 31: {
- fpureg_t *fregs = get_fpu_regs(child);
+ union fpureg *fregs = get_fpu_regs(child);
- if (!tsk_used_math(child)) {
- /* FP not yet used */
- memset(&child->thread.fpu, ~0,
- sizeof(child->thread.fpu));
- child->thread.fpu.fcr31 = 0;
+ init_fp_ctx(child);
+
+ if (!test_thread_local_flags(LTIF_FPU_FR)) {
+ /*
+ * The odd registers are actually the high
+ * order bits of the values stored in the even
+ * registers - unless we're using r2k_switch.S.
+ */
+ set_fpr32(&fregs[(addr & ~1) - FPR_BASE],
+ addr & 1, data);
+ break;
}
- /*
- * The odd registers are actually the high order bits
- * of the values stored in the even registers - unless
- * we're using r2k_switch.S.
- */
- if (addr & 1) {
- fregs[(addr & ~1) - FPR_BASE] &= 0xffffffff;
- fregs[(addr & ~1) - FPR_BASE] |= ((unsigned long long) data) << 32;
- } else {
- fregs[addr - FPR_BASE] &= ~0xffffffffLL;
- /* Must cast, lest sign extension fill upper
- bits! */
- fregs[addr - FPR_BASE] |= (unsigned int)data;
- }
+ set_fpr64(&fregs[addr - FPR_BASE], 0, data);
break;
}
case PC:
@@ -264,6 +226,7 @@
case FPC_CSR:
child->thread.fpu.fcr31 = data;
break;
+#ifndef CONFIG_CPU_MIPSR6
case DSP_BASE ... DSP_BASE + 5: {
dspreg_t *dregs;
@@ -283,6 +246,7 @@
}
child->thread.dsp.dspcontrol = data;
break;
+#endif
default:
/* The rest are not allowed. */
ret = -EIO;
diff --git a/arch/mips/kernel/r2300_fpu.S b/arch/mips/kernel/r2300_fpu.S
index f31063d..5ce3b74 100644
--- a/arch/mips/kernel/r2300_fpu.S
+++ b/arch/mips/kernel/r2300_fpu.S
@@ -28,6 +28,8 @@
.set mips1
/* Save floating point context */
LEAF(_save_fp_context)
+ .set push
+ SET_HARDFLOAT
li v0, 0 # assume success
cfc1 t1,fcr31
EX(swc1 $f0,(SC_FPREGS+0)(a0))
@@ -65,6 +67,7 @@
EX(sw t1,(SC_FPC_CSR)(a0))
cfc1 t0,$0 # implementation/version
jr ra
+ .set pop
.set nomacro
EX(sw t0,(SC_FPC_EIR)(a0))
.set macro
@@ -80,6 +83,8 @@
* stack frame which might have been changed by the user.
*/
LEAF(_restore_fp_context)
+ .set push
+ SET_HARDFLOAT
li v0, 0 # assume success
EX(lw t0,(SC_FPC_CSR)(a0))
EX(lwc1 $f0,(SC_FPREGS+0)(a0))
@@ -116,6 +121,7 @@
EX(lwc1 $f31,(SC_FPREGS+248)(a0))
jr ra
ctc1 t0,fcr31
+ .set pop
END(_restore_fp_context)
.set reorder
diff --git a/arch/mips/kernel/r2300_switch.S b/arch/mips/kernel/r2300_switch.S
index 5266c6e..eaa3cd6 100644
--- a/arch/mips/kernel/r2300_switch.S
+++ b/arch/mips/kernel/r2300_switch.S
@@ -22,6 +22,7 @@
#include <asm/asmmacro.h>
.set mips1
+ .set hardfloat
.align 5
/*
@@ -113,6 +114,9 @@
#define FPU_DEFAULT 0x00000000
+ .set push
+ SET_HARDFLOAT
+
LEAF(_init_fpu)
mfc0 t0, CP0_STATUS
li t1, ST0_CU1
@@ -158,3 +162,5 @@
mtc1 t0, $f31
jr ra
END(_init_fpu)
+
+ .set pop
diff --git a/arch/mips/kernel/r4k_fpu.S b/arch/mips/kernel/r4k_fpu.S
index 55ffe14..42ef55c 100644
--- a/arch/mips/kernel/r4k_fpu.S
+++ b/arch/mips/kernel/r4k_fpu.S
@@ -13,14 +13,19 @@
* Copyright (C) 1999, 2001 Silicon Graphics, Inc.
*/
#include <asm/asm.h>
+#include <asm/asmmacro.h>
#include <asm/errno.h>
#include <asm/fpregdef.h>
#include <asm/mipsregs.h>
#include <asm/asm-offsets.h>
#include <asm/regdef.h>
+/* preprocessor replaces the fp in ".set fp=64" with $30 otherwise */
+#undef fp
+
.macro EX insn, reg, src
.set push
+ SET_HARDFLOAT
.set nomacro
.ex\@: \insn \reg, \src
.set pop
@@ -30,12 +35,18 @@
.endm
.set noreorder
- .set mips3
+#ifdef CONFIG_CPU_MIPSR6
+ .set mips64r6
+#else
+ .set arch=r4000
+#endif
+ SET_HARDFLOAT
LEAF(_save_fp_context)
- cfc1 t1, fcr31
+ PTR_L t2, TI_TASK($28)
+ fpu_get_fcr31 t2 t0 t1
-#ifdef CONFIG_64BIT
+#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPSR6)
/* Store the 16 odd double precision registers */
EX sdc1 $f1, SC_FPREGS+8(a0)
EX sdc1 $f3, SC_FPREGS+24(a0)
@@ -53,6 +64,38 @@
EX sdc1 $f27, SC_FPREGS+216(a0)
EX sdc1 $f29, SC_FPREGS+232(a0)
EX sdc1 $f31, SC_FPREGS+248(a0)
+#else
+#ifdef CONFIG_CPU_MIPS32_R2
+ .set push
+ .set mips32r2
+ .set fp=64
+ SET_HARDFLOAT
+ .set noreorder
+ mfc0 t0, CP0_STATUS
+ sll t0, t0, 31 - _ST0_FR
+ bgez t0, 1f # 16 / 32 register mode?
+ nop
+
+ /* Store the 16 odd double precision registers */
+ EX sdc1 $f1, SC_FPREGS+8(a0)
+ EX sdc1 $f3, SC_FPREGS+24(a0)
+ EX sdc1 $f5, SC_FPREGS+40(a0)
+ EX sdc1 $f7, SC_FPREGS+56(a0)
+ EX sdc1 $f9, SC_FPREGS+72(a0)
+ EX sdc1 $f11, SC_FPREGS+88(a0)
+ EX sdc1 $f13, SC_FPREGS+104(a0)
+ EX sdc1 $f15, SC_FPREGS+120(a0)
+ EX sdc1 $f17, SC_FPREGS+136(a0)
+ EX sdc1 $f19, SC_FPREGS+152(a0)
+ EX sdc1 $f21, SC_FPREGS+168(a0)
+ EX sdc1 $f23, SC_FPREGS+184(a0)
+ EX sdc1 $f25, SC_FPREGS+200(a0)
+ EX sdc1 $f27, SC_FPREGS+216(a0)
+ EX sdc1 $f29, SC_FPREGS+232(a0)
+ EX sdc1 $f31, SC_FPREGS+248(a0)
+1:
+ .set pop
+#endif
#endif
/* Store the 16 even double precision registers */
@@ -80,7 +123,32 @@
#ifdef CONFIG_MIPS32_COMPAT
/* Save 32-bit process floating point context */
LEAF(_save_fp_context32)
- cfc1 t1, fcr31
+ PTR_L t2, TI_TASK($28)
+ fpu_get_fcr31 t2 t0 t1
+
+ mfc0 t0, CP0_STATUS
+ sll t0, t0, 31 - _ST0_FR
+ bgez t0, 1f # 16 / 32 register mode?
+ nop
+
+ /* Store the 16 odd double precision registers */
+ EX sdc1 $f1, SC32_FPREGS+8(a0)
+ EX sdc1 $f3, SC32_FPREGS+24(a0)
+ EX sdc1 $f5, SC32_FPREGS+40(a0)
+ EX sdc1 $f7, SC32_FPREGS+56(a0)
+ EX sdc1 $f9, SC32_FPREGS+72(a0)
+ EX sdc1 $f11, SC32_FPREGS+88(a0)
+ EX sdc1 $f13, SC32_FPREGS+104(a0)
+ EX sdc1 $f15, SC32_FPREGS+120(a0)
+ EX sdc1 $f17, SC32_FPREGS+136(a0)
+ EX sdc1 $f19, SC32_FPREGS+152(a0)
+ EX sdc1 $f21, SC32_FPREGS+168(a0)
+ EX sdc1 $f23, SC32_FPREGS+184(a0)
+ EX sdc1 $f25, SC32_FPREGS+200(a0)
+ EX sdc1 $f27, SC32_FPREGS+216(a0)
+ EX sdc1 $f29, SC32_FPREGS+232(a0)
+ EX sdc1 $f31, SC32_FPREGS+248(a0)
+1:
EX sdc1 $f0, SC32_FPREGS+0(a0)
EX sdc1 $f2, SC32_FPREGS+16(a0)
@@ -114,7 +182,7 @@
*/
LEAF(_restore_fp_context)
EX lw t0, SC_FPC_CSR(a0)
-#ifdef CONFIG_64BIT
+#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPSR6)
EX ldc1 $f1, SC_FPREGS+8(a0)
EX ldc1 $f3, SC_FPREGS+24(a0)
EX ldc1 $f5, SC_FPREGS+40(a0)
@@ -131,6 +199,38 @@
EX ldc1 $f27, SC_FPREGS+216(a0)
EX ldc1 $f29, SC_FPREGS+232(a0)
EX ldc1 $f31, SC_FPREGS+248(a0)
+
+#else
+#ifdef CONFIG_CPU_MIPS32_R2
+ .set push
+ .set mips32r2
+ .set fp=64
+ SET_HARDFLOAT
+ .set noreorder
+ mfc0 t1, CP0_STATUS
+ sll t1, t1, 31 - _ST0_FR
+ bgez t1, 1f # 16 / 32 register mode?
+ nop
+
+ EX ldc1 $f1, SC_FPREGS+8(a0)
+ EX ldc1 $f3, SC_FPREGS+24(a0)
+ EX ldc1 $f5, SC_FPREGS+40(a0)
+ EX ldc1 $f7, SC_FPREGS+56(a0)
+ EX ldc1 $f9, SC_FPREGS+72(a0)
+ EX ldc1 $f11, SC_FPREGS+88(a0)
+ EX ldc1 $f13, SC_FPREGS+104(a0)
+ EX ldc1 $f15, SC_FPREGS+120(a0)
+ EX ldc1 $f17, SC_FPREGS+136(a0)
+ EX ldc1 $f19, SC_FPREGS+152(a0)
+ EX ldc1 $f21, SC_FPREGS+168(a0)
+ EX ldc1 $f23, SC_FPREGS+184(a0)
+ EX ldc1 $f25, SC_FPREGS+200(a0)
+ EX ldc1 $f27, SC_FPREGS+216(a0)
+ EX ldc1 $f29, SC_FPREGS+232(a0)
+ EX ldc1 $f31, SC_FPREGS+248(a0)
+1:
+ .set pop
+#endif
#endif
EX ldc1 $f0, SC_FPREGS+0(a0)
EX ldc1 $f2, SC_FPREGS+16(a0)
@@ -157,7 +257,31 @@
LEAF(_restore_fp_context32)
/* Restore an o32 sigcontext. */
EX lw t0, SC32_FPC_CSR(a0)
- EX ldc1 $f0, SC32_FPREGS+0(a0)
+
+ mfc0 t1, CP0_STATUS
+ sll t1, t1, 31 - _ST0_FR
+ bgez t1, 1f # 16 / 32 register mode?
+ nop
+
+ EX ldc1 $f1, SC32_FPREGS+8(a0)
+ EX ldc1 $f3, SC32_FPREGS+24(a0)
+ EX ldc1 $f5, SC32_FPREGS+40(a0)
+ EX ldc1 $f7, SC32_FPREGS+56(a0)
+ EX ldc1 $f9, SC32_FPREGS+72(a0)
+ EX ldc1 $f11, SC32_FPREGS+88(a0)
+ EX ldc1 $f13, SC32_FPREGS+104(a0)
+ EX ldc1 $f15, SC32_FPREGS+120(a0)
+ EX ldc1 $f17, SC32_FPREGS+136(a0)
+ EX ldc1 $f19, SC32_FPREGS+152(a0)
+ EX ldc1 $f21, SC32_FPREGS+168(a0)
+ EX ldc1 $f23, SC32_FPREGS+184(a0)
+ EX ldc1 $f25, SC32_FPREGS+200(a0)
+ EX ldc1 $f27, SC32_FPREGS+216(a0)
+ EX ldc1 $f29, SC32_FPREGS+232(a0)
+ EX ldc1 $f31, SC32_FPREGS+248(a0)
+1:
+
+ EX ldc1 $f0, SC32_FPREGS+0(a0)
EX ldc1 $f2, SC32_FPREGS+16(a0)
EX ldc1 $f4, SC32_FPREGS+32(a0)
EX ldc1 $f6, SC32_FPREGS+48(a0)
@@ -179,6 +303,285 @@
END(_restore_fp_context32)
#endif
+#ifdef CONFIG_CPU_HAS_MSA
+
+ .macro save_msa_upper wr, off, base
+ .set push
+ .set noat
+#ifdef CONFIG_64BIT
+ copy_u_d \wr, 1
+ EX sd $1, \off(\base)
+#elif defined(CONFIG_CPU_LITTLE_ENDIAN)
+ copy_u_w \wr, 2
+ EX sw $1, \off(\base)
+ copy_u_w \wr, 3
+ EX sw $1, (\off+4)(\base)
+#else /* CONFIG_CPU_BIG_ENDIAN */
+ copy_u_w \wr, 2
+ EX sw $1, (\off+4)(\base)
+ copy_u_w \wr, 3
+ EX sw $1, \off(\base)
+#endif
+ .set pop
+ .endm
+
+LEAF(_save_msa_all_upper)
+ save_msa_upper 0, 0x00, a0
+ save_msa_upper 1, 0x08, a0
+ save_msa_upper 2, 0x10, a0
+ save_msa_upper 3, 0x18, a0
+ save_msa_upper 4, 0x20, a0
+ save_msa_upper 5, 0x28, a0
+ save_msa_upper 6, 0x30, a0
+ save_msa_upper 7, 0x38, a0
+ save_msa_upper 8, 0x40, a0
+ save_msa_upper 9, 0x48, a0
+ save_msa_upper 10, 0x50, a0
+ save_msa_upper 11, 0x58, a0
+ save_msa_upper 12, 0x60, a0
+ save_msa_upper 13, 0x68, a0
+ save_msa_upper 14, 0x70, a0
+ save_msa_upper 15, 0x78, a0
+ save_msa_upper 16, 0x80, a0
+ save_msa_upper 17, 0x88, a0
+ save_msa_upper 18, 0x90, a0
+ save_msa_upper 19, 0x98, a0
+ save_msa_upper 20, 0xa0, a0
+ save_msa_upper 21, 0xa8, a0
+ save_msa_upper 22, 0xb0, a0
+ save_msa_upper 23, 0xb8, a0
+ save_msa_upper 24, 0xc0, a0
+ save_msa_upper 25, 0xc8, a0
+ save_msa_upper 26, 0xd0, a0
+ save_msa_upper 27, 0xd8, a0
+ save_msa_upper 28, 0xe0, a0
+ save_msa_upper 29, 0xe8, a0
+ save_msa_upper 30, 0xf0, a0
+ save_msa_upper 31, 0xf8, a0
+ jr ra
+ li v0, 0
+ END(_save_msa_all_upper)
+
+ .macro restore_msa_upper wr, off, base
+ .set push
+ .set noat
+#ifdef CONFIG_64BIT
+ EX ld $1, \off(\base)
+ insert_d \wr, 1
+#elif defined(CONFIG_CPU_LITTLE_ENDIAN)
+ EX lw $1, \off(\base)
+ insert_w \wr, 2
+ EX lw $1, (\off+4)(\base)
+ insert_w \wr, 3
+#else /* CONFIG_CPU_BIG_ENDIAN */
+ EX lw $1, (\off+4)(\base)
+ insert_w \wr, 2
+ EX lw $1, \off(\base)
+ insert_w \wr, 3
+#endif
+ .set pop
+ .endm
+
+LEAF(_restore_msa_all_upper)
+ restore_msa_upper 0, 0x00, a0
+ restore_msa_upper 1, 0x08, a0
+ restore_msa_upper 2, 0x10, a0
+ restore_msa_upper 3, 0x18, a0
+ restore_msa_upper 4, 0x20, a0
+ restore_msa_upper 5, 0x28, a0
+ restore_msa_upper 6, 0x30, a0
+ restore_msa_upper 7, 0x38, a0
+ restore_msa_upper 8, 0x40, a0
+ restore_msa_upper 9, 0x48, a0
+ restore_msa_upper 10, 0x50, a0
+ restore_msa_upper 11, 0x58, a0
+ restore_msa_upper 12, 0x60, a0
+ restore_msa_upper 13, 0x68, a0
+ restore_msa_upper 14, 0x70, a0
+ restore_msa_upper 15, 0x78, a0
+ restore_msa_upper 16, 0x80, a0
+ restore_msa_upper 17, 0x88, a0
+ restore_msa_upper 18, 0x90, a0
+ restore_msa_upper 19, 0x98, a0
+ restore_msa_upper 20, 0xa0, a0
+ restore_msa_upper 21, 0xa8, a0
+ restore_msa_upper 22, 0xb0, a0
+ restore_msa_upper 23, 0xb8, a0
+ restore_msa_upper 24, 0xc0, a0
+ restore_msa_upper 25, 0xc8, a0
+ restore_msa_upper 26, 0xd0, a0
+ restore_msa_upper 27, 0xd8, a0
+ restore_msa_upper 28, 0xe0, a0
+ restore_msa_upper 29, 0xe8, a0
+ restore_msa_upper 30, 0xf0, a0
+ restore_msa_upper 31, 0xf8, a0
+ jr ra
+ li v0, 0
+ END(_restore_msa_all_upper)
+
+ .macro restore_msa_uppers wr, off, base
+ .set push
+ .set noat
+#ifdef CONFIG_64BIT
+ ld $1, \off(\base)
+ insert_d \wr, 1
+#elif defined(CONFIG_CPU_LITTLE_ENDIAN)
+ lw $1, \off(\base)
+ insert_w \wr, 2
+ lw $1, (\off+4)(\base)
+ insert_w \wr, 3
+#else /* CONFIG_CPU_BIG_ENDIAN */
+ lw $1, (\off+4)(\base)
+ insert_w \wr, 2
+ lw $1, \off(\base)
+ insert_w \wr, 3
+#endif
+ .set pop
+ .endm
+
+LEAF(_restore_msa_uppers_from_thread)
+ restore_msa_uppers 0, 0x08, a0
+ restore_msa_uppers 1, 0x18, a0
+ restore_msa_uppers 2, 0x28, a0
+ restore_msa_uppers 3, 0x38, a0
+ restore_msa_uppers 4, 0x48, a0
+ restore_msa_uppers 5, 0x58, a0
+ restore_msa_uppers 6, 0x68, a0
+ restore_msa_uppers 7, 0x78, a0
+ restore_msa_uppers 8, 0x88, a0
+ restore_msa_uppers 9, 0x98, a0
+ restore_msa_uppers 10, 0xa8, a0
+ restore_msa_uppers 11, 0xb8, a0
+ restore_msa_uppers 12, 0xc8, a0
+ restore_msa_uppers 13, 0xd8, a0
+ restore_msa_uppers 14, 0xe8, a0
+ restore_msa_uppers 15, 0xf8, a0
+ restore_msa_uppers 16, 0x108, a0
+ restore_msa_uppers 17, 0x118, a0
+ restore_msa_uppers 18, 0x128, a0
+ restore_msa_uppers 19, 0x138, a0
+ restore_msa_uppers 20, 0x148, a0
+ restore_msa_uppers 21, 0x158, a0
+ restore_msa_uppers 22, 0x168, a0
+ restore_msa_uppers 23, 0x178, a0
+ restore_msa_uppers 24, 0x188, a0
+ restore_msa_uppers 25, 0x198, a0
+ restore_msa_uppers 26, 0x1a8, a0
+ restore_msa_uppers 27, 0x1b8, a0
+ restore_msa_uppers 28, 0x1c8, a0
+ restore_msa_uppers 29, 0x1d8, a0
+ restore_msa_uppers 30, 0x1e8, a0
+ restore_msa_uppers 31, 0x1f8, a0
+ jr ra
+ li v0, 0
+ END(_restore_msa_uppers_from_thread)
+
+ .macro msa_ld_d wd, base
+ ld_d \wd, 0, \base
+ jalr $0, $31
+ nop
+ .align 4
+ .endm
+
+ .macro msa_st_d wd, base
+ st_d \wd, 0, \base
+ jalr $0, $31
+ nop
+ .align 4
+ .endm
+
+LEAF(msa_to_wd)
+ .set push
+ .set noreorder
+ sll t0, a0, 4
+ PTR_LA t1, Lmsa_to
+ PTR_ADDU t0, t0, t1
+ jalr $0, t0
+ nop
+ .align 4
+Lmsa_to:
+ msa_ld_d 0, a1
+ msa_ld_d 1, a1
+ msa_ld_d 2, a1
+ msa_ld_d 3, a1
+ msa_ld_d 4, a1
+ msa_ld_d 5, a1
+ msa_ld_d 6, a1
+ msa_ld_d 7, a1
+ msa_ld_d 8, a1
+ msa_ld_d 9, a1
+ msa_ld_d 10, a1
+ msa_ld_d 11, a1
+ msa_ld_d 12, a1
+ msa_ld_d 13, a1
+ msa_ld_d 14, a1
+ msa_ld_d 15, a1
+ msa_ld_d 16, a1
+ msa_ld_d 17, a1
+ msa_ld_d 18, a1
+ msa_ld_d 19, a1
+ msa_ld_d 20, a1
+ msa_ld_d 21, a1
+ msa_ld_d 22, a1
+ msa_ld_d 23, a1
+ msa_ld_d 24, a1
+ msa_ld_d 25, a1
+ msa_ld_d 26, a1
+ msa_ld_d 27, a1
+ msa_ld_d 28, a1
+ msa_ld_d 29, a1
+ msa_ld_d 30, a1
+ msa_ld_d 31, a1
+ .set pop
+ END(msa_to_wd)
+
+LEAF(msa_from_wd)
+ .set push
+ .set noreorder
+ sll t0, a0, 4
+ PTR_LA t1, Lmsa_from
+ PTR_ADDU t0, t0, t1
+ jalr $0, t0
+ nop
+ .align 4
+Lmsa_from:
+ msa_st_d 0, a1
+ msa_st_d 1, a1
+ msa_st_d 2, a1
+ msa_st_d 3, a1
+ msa_st_d 4, a1
+ msa_st_d 5, a1
+ msa_st_d 6, a1
+ msa_st_d 7, a1
+ msa_st_d 8, a1
+ msa_st_d 9, a1
+ msa_st_d 10, a1
+ msa_st_d 11, a1
+ msa_st_d 12, a1
+ msa_st_d 13, a1
+ msa_st_d 14, a1
+ msa_st_d 15, a1
+ msa_st_d 16, a1
+ msa_st_d 17, a1
+ msa_st_d 18, a1
+ msa_st_d 19, a1
+ msa_st_d 20, a1
+ msa_st_d 21, a1
+ msa_st_d 22, a1
+ msa_st_d 23, a1
+ msa_st_d 24, a1
+ msa_st_d 25, a1
+ msa_st_d 26, a1
+ msa_st_d 27, a1
+ msa_st_d 28, a1
+ msa_st_d 29, a1
+ msa_st_d 30, a1
+ msa_st_d 31, a1
+ .set pop
+ END(msa_from_wd)
+
+#endif /* CONFIG_CPU_HAS_MSA */
+
.set reorder
.type fault@function
diff --git a/arch/mips/kernel/r4k_switch.S b/arch/mips/kernel/r4k_switch.S
index 5e51219..0d5c84bf 100644
--- a/arch/mips/kernel/r4k_switch.S
+++ b/arch/mips/kernel/r4k_switch.S
@@ -22,6 +22,7 @@
#include <asm/asmmacro.h>
+#define t4 $12
/*
* Offset to the current process status flags, the first 32 bytes of the
* stack are not used.
@@ -29,20 +30,11 @@
#define ST_OFF (_THREAD_SIZE - 32 - PT_SIZE + PT_STATUS)
/*
- * FPU context is saved iff the process has used it's FPU in the current
- * time slice as indicated by _TIF_USEDFPU. In any case, the CU1 bit for user
- * space STATUS register should be 0, so that a process *always* starts its
- * userland with FPU disabled after each context switch.
- *
- * FPU will be enabled as soon as the process accesses FPU again, through
- * do_cpu() trap.
- */
-
-/*
* task_struct *resume(task_struct *prev, task_struct *next,
- * struct thread_info *next_ti, int usedfpu)
+ * struct thread_info *next_ti, s32 fp_save)
*/
.align 5
+ .set hardfloat
LEAF(resume)
mfc0 t1, CP0_STATUS
LONG_S t1, THREAD_STATUS(a0)
@@ -50,24 +42,86 @@
LONG_S ra, THREAD_REG31(a0)
/*
- * check if we need to save FPU registers
+ * Check whether we need to save any FP context. FP context is saved
+ * iff the process has used the context with the scalar FPU or the MSA
+ * ASE in the current time slice, as indicated by _TIF_USEDFPU and
+ * _TIF_USEDMSA respectively. switch_to will have set fp_save
+ * accordingly to an FP_SAVE_ enum value.
*/
+ beqz a3, 3f
- beqz a3, 1f
-
- PTR_L t3, TASK_THREAD_INFO(a0)
/*
- * clear saved user stack CU1 bit
+ * We do. Clear the saved CU1 bit for prev, such that next time it is
+ * scheduled it will start in userland with the FPU disabled. If the
+ * task uses the FPU then it will be enabled again via the do_cpu trap.
+ * This allows us to lazily restore the FP context.
*/
+ PTR_L t3, TASK_THREAD_INFO(a0)
LONG_L t0, ST_OFF(t3)
li t1, ~ST0_CU1
and t0, t0, t1
LONG_S t0, ST_OFF(t3)
+ /* Now copy FR from it */
+
+#if defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS32_R6) || defined(CONFIG_64BIT)
+#ifdef CONFIG_MIPS_MT_SMTC
+
+ li t3, ST0_FR
+ mfc0 t2, CP0_TCSTATUS
+ nor t1, $0, t3
+ and t0, t0, t3 # extract FR from prev
+ and t3, t2, t1
+ or t0, t0, t3
+ mtc0 t0, CP0_TCSTATUS
+ enable_fpu_hazard
+
+ fpu_save_double a0 t0 t1 # c0_status passed in t0
+ # clobbers t1
+ mtc0 t2, CP0_TCSTATUS
+#else
+ li t3, ST0_FR
+ mfc0 t2, CP0_STATUS
+ nor t1, $0, t3
+ and t0, t0, t3 # extract FR from prev
+ and t3, t2, t1
+ or t0, t0, t3
+ mtc0 t0, CP0_STATUS
+ enable_fpu_hazard
+
+ /* Check whether we're saving scalar or vector context. */
+ andi a3, a3, 0x2
+ beqz a3, 1f
+
+ /* Save 128b MSA vector context + scalar FP control & status. */
+ .set push
+ SET_HARDFLOAT
+ fpu_get_fcr31 a0 t0 t1
+ msa_save_all a0
+ .set pop /* SET_HARDFLOAT */
+ li t4, MIPS_CONF5_MSAEN
+ mfc0 t3, CP0_CONFIG, 5
+ or t3, t3, t4
+ xor t3, t3, t4
+ mtc0 t3, CP0_CONFIG, 5
+
+ sw t1, THREAD_FCR31(a0)
+ b 2f
+
+1: /* Save 32b/64b scalar FP context. */
fpu_save_double a0 t0 t1 # c0_status passed in t0
# clobbers t1
-1:
+2:
+ mtc0 t2, CP0_STATUS
+#endif /* CONFIG_MIPS_MT_SMTC */
+#else
+
+ fpu_save_double a0 t0 t1 # c0_status passed in t0
+ # clobbers t1
+#endif
+
+3:
/*
* The order of restoring the registers takes care of the race
* updating $28, $29 and kernelsp without disabling ints.
@@ -107,17 +161,21 @@
xori t1, t1, TCSTATUS_IXMT
or t1, t1, t2
mtc0 t1, CP0_TCSTATUS
- _ehb
#endif /* CONFIG_MIPS_MT_SMTC */
move v0, a0
- jr ra
+#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
+ jr.hb ra
+#else
+ _ehb
+ jr ra
+#endif
END(resume)
/*
* Save a thread's fp context.
*/
LEAF(_save_fp)
-#ifdef CONFIG_64BIT
+#if defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS32_R6) || defined(CONFIG_64BIT)
mfc0 t0, CP0_STATUS
#endif
fpu_save_double a0 t0 t1 # clobbers t1
@@ -128,13 +186,38 @@
* Restore a thread's fp context.
*/
LEAF(_restore_fp)
-#ifdef CONFIG_64BIT
+#if defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS32_R6) || defined(CONFIG_64BIT)
mfc0 t0, CP0_STATUS
#endif
fpu_restore_double a0 t0 t1 # clobbers t1
jr ra
END(_restore_fp)
+#ifdef CONFIG_CPU_HAS_MSA
+
+/*
+ * Save a thread's MSA vector context.
+ */
+LEAF(_save_msa)
+ msa_save_all a0
+ jr ra
+ END(_save_msa)
+
+/*
+ * Restore a thread's MSA vector context.
+ */
+LEAF(_restore_msa)
+ msa_restore_all a0
+ jr ra
+ END(_restore_msa)
+
+LEAF(_init_msa_upper)
+ msa_init_all_upper
+ jr ra
+ END(_init_msa_upper)
+
+#endif
+
/*
* Load the FPU with signalling NANS. This bit pattern we're using has
* the property that no matter whether considered as single or as double
@@ -143,7 +226,8 @@
* We initialize fcr31 to rounding to nearest, no exceptions.
*/
-#define FPU_DEFAULT 0x00000000
+ .set push
+ SET_HARDFLOAT
LEAF(_init_fpu)
#ifdef CONFIG_MIPS_MT_SMTC
@@ -161,15 +245,32 @@
#endif /* CONFIG_MIPS_MT_SMTC */
enable_fpu_hazard
- li t1, FPU_DEFAULT
- ctc1 t1, fcr31
+#if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64)
+ li t2, MIPS_FPIR_HAS2008
+ cfc1 t1, CP1_REVISION
+ and t2, t2, t1
+ li t1, FPU_CSR_DEFAULT
+ beq t2, $0, 3f
+ li t1, FPU_CSR_DEFAULT|FPU_CSR_MAC2008|FPU_CSR_ABS2008|FPU_CSR_NAN2008
+3:
+#endif
+ ctc1 t1, fcr31
- li t1, -1 # SNaN
+ li t1, -1 # SNaN MIPS, DP or SP or DP+SP
#ifdef CONFIG_64BIT
- sll t0, t0, 5
+ sll t0, t0, 31 - _ST0_FR
bgez t0, 1f # 16 / 32 register mode?
+#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
+ enable_fpu_hazard
+ li t2, FPU_CSR_NAN2008
+ cfc1 t3, fcr31
+ and t2, t2, t3
+ beq t2, $0, 2f
+ dli t1, 0x7ff000007fa00000 # SNaN 2008, DP + SP
+2:
+#endif
dmtc1 t1, $f1
dmtc1 t1, $f3
dmtc1 t1, $f5
@@ -187,9 +288,31 @@
dmtc1 t1, $f29
dmtc1 t1, $f31
1:
-#endif
+#endif /* CONFIG_64BIT */
#ifdef CONFIG_CPU_MIPS32
+
+/* preprocessor replaces the fp in ".set fp=64" with $30 otherwise */
+#undef fp
+
+#if defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS32_R6)
+ .set push
+ .set mips32r2
+ .set fp=64
+ sll t0, t0, 31 - _ST0_FR
+ bgez t0, 2f # 16 / 32 register mode?
+
+ enable_fpu_hazard
+ li t2, FPU_CSR_NAN2008
+ cfc1 t3, fcr31
+ and t2, t2, t3
+ move t3, t1 # SNaN MIPS, DP high word
+ beq t2, $0, 2f
+ li t1, 0x7fa00000 # SNaN 2008, SP
+ li t3, 0x7ff00000 # SNaN 2008, DP high word
+ .set pop
+2:
+#endif
mtc1 t1, $f0
mtc1 t1, $f1
mtc1 t1, $f2
@@ -222,8 +345,55 @@
mtc1 t1, $f29
mtc1 t1, $f30
mtc1 t1, $f31
+
+#if defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS32_R6)
+ bgez t0, 1f # 16 / 32 register mode?
+
+ move t1, t3 # move SNaN, DP high word
+ .set push
+ .set mips32r2
+ .set fp=64
+ mthc1 t1, $f0
+ mthc1 t1, $f1
+ mthc1 t1, $f2
+ mthc1 t1, $f3
+ mthc1 t1, $f4
+ mthc1 t1, $f5
+ mthc1 t1, $f6
+ mthc1 t1, $f7
+ mthc1 t1, $f8
+ mthc1 t1, $f9
+ mthc1 t1, $f10
+ mthc1 t1, $f11
+ mthc1 t1, $f12
+ mthc1 t1, $f13
+ mthc1 t1, $f14
+ mthc1 t1, $f15
+ mthc1 t1, $f16
+ mthc1 t1, $f17
+ mthc1 t1, $f18
+ mthc1 t1, $f19
+ mthc1 t1, $f20
+ mthc1 t1, $f21
+ mthc1 t1, $f22
+ mthc1 t1, $f23
+ mthc1 t1, $f24
+ mthc1 t1, $f25
+ mthc1 t1, $f26
+ mthc1 t1, $f27
+ mthc1 t1, $f28
+ mthc1 t1, $f29
+ mthc1 t1, $f30
+ mthc1 t1, $f31
+ .set pop
+1:
+#endif /* CONFIG_CPU_MIPS32_R2 || CONFIG_CPU_MIPS32_R6 */
+#else /* !CONFIG_CPU_MIPS32 */
+#ifdef CONFIG_CPU_MIPS64_R6
+ .set mips64r6
#else
- .set mips3
+ .set arch=r4000
+#endif
dmtc1 t1, $f0
dmtc1 t1, $f2
dmtc1 t1, $f4
@@ -240,6 +410,8 @@
dmtc1 t1, $f26
dmtc1 t1, $f28
dmtc1 t1, $f30
-#endif
+#endif /* CONFIG_CPU_MIPS32 */
jr ra
END(_init_fpu)
+
+ .set pop /* SET_HARDFLOAT */
diff --git a/arch/mips/kernel/r6000_fpu.S b/arch/mips/kernel/r6000_fpu.S
index da0fbe4..1d97207 100644
--- a/arch/mips/kernel/r6000_fpu.S
+++ b/arch/mips/kernel/r6000_fpu.S
@@ -17,7 +17,11 @@
#include <asm/regdef.h>
.set noreorder
+ .set hardfloat
.set mips2
+ .set push
+ SET_HARDFLOAT
+
/* Save floating point context */
LEAF(_save_fp_context)
mfc0 t0,CP0_STATUS
@@ -85,3 +89,5 @@
1: jr ra
nop
END(_restore_fp_context)
+
+ .set pop /* SET_HARDFLOAT */
diff --git a/arch/mips/kernel/relocate_kernel.S b/arch/mips/kernel/relocate_kernel.S
index 43d2d78..74bab9d 100644
--- a/arch/mips/kernel/relocate_kernel.S
+++ b/arch/mips/kernel/relocate_kernel.S
@@ -26,6 +26,12 @@
PTR_L s2, (s0)
PTR_ADD s0, s0, SZREG
+ /*
+ * In case of a kdump/crash kernel, the indirection page is not
+ * populated as the kernel is directly copied to a reserved location
+ */
+ beqz s2, done
+
/* destination page */
and s3, s2, 0x1
beq s3, zero, 1f
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
index 9b36424..d9eac28 100644
--- a/arch/mips/kernel/scall32-o32.S
+++ b/arch/mips/kernel/scall32-o32.S
@@ -125,17 +125,39 @@
la t1, 5f # load up to 3 arguments
subu t1, t3
-1: lw t5, 16(t0) # argument #5 from usp
- .set push
- .set noreorder
+#ifndef CONFIG_EVA
+1: lw t5, 16(t0) # argument #5 from usp
+ .set push
+ .set noreorder
.set nomacro
jr t1
addiu t1, 6f - 5f
-2: lw t8, 28(t0) # argument #8 from usp
-3: lw t7, 24(t0) # argument #7 from usp
-4: lw t6, 20(t0) # argument #6 from usp
-5: jr t1
+2: .insn
+ lw t8, 28(t0) # argument #8 from usp
+3: .insn
+ lw t7, 24(t0) # argument #7 from usp
+4: .insn
+ lw t6, 20(t0) # argument #6 from usp
+5: .insn
+#else
+ .set eva
+1: lwe t5, 16(t0) # argument #5 from usp
+ .set push
+ .set noreorder
+ .set nomacro
+ jr t1
+ addiu t1, 6f - 5f
+
+2: .insn
+ lwe t8, 28(t0) # argument #8 from usp
+3: .insn
+ lwe t7, 24(t0) # argument #7 from usp
+4: .insn
+ lwe t6, 20(t0) # argument #6 from usp
+5: .insn
+#endif /* CONFIG_EVA */
+ jr t1
sw t5, 16(sp) # argument #5 to ksp
#ifdef CONFIG_CPU_MICROMIPS
@@ -150,7 +172,7 @@
sw t7, 24(sp) # argument #7 to ksp
sw t6, 20(sp) # argument #6 to ksp
#endif
-6: j stack_done # go back
+6: j stack_done # go back
nop
.set pop
@@ -593,6 +615,12 @@
sys sys_process_vm_writev 6
sys sys_kcmp 5
sys sys_finit_module 3
+ /* Backporting seccomp, skip a few ... */
+ sys sys_ni_syscall 0 /* sys_sched_setattr */
+ sys sys_ni_syscall 0 /* sys_sched_getattr */ /* 4350 */
+ sys sys_ni_syscall 0 /* sys_renameat2 */
+ sys sys_seccomp 3
+
.endm
/* We pre-compute the number of _instruction_ bytes needed to
diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
index 97a5909..127abe9 100644
--- a/arch/mips/kernel/scall64-64.S
+++ b/arch/mips/kernel/scall64-64.S
@@ -424,4 +424,8 @@
PTR sys_kcmp
PTR sys_finit_module
PTR sys_getdents64
+ PTR sys_ni_syscall /* sys_sched_setattr */
+ PTR sys_ni_syscall /* sys_sched_getattr */ /* 5310 */
+ PTR sys_ni_syscall /* sys_renameat2 */
+ PTR sys_seccomp
.size sys_call_table,.-sys_call_table
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index edcb659..4c99d34 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -417,4 +417,8 @@
PTR compat_sys_process_vm_writev /* 6310 */
PTR sys_kcmp
PTR sys_finit_module
+ PTR sys_ni_syscall /* sys_sched_setattr */
+ PTR sys_ni_syscall /* sys_sched_getattr */
+ PTR sys_ni_syscall /* sys_renameat2 */ /* 6315 */
+ PTR sys_seccomp
.size sysn32_call_table,.-sysn32_call_table
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index 74f485d..ab93427 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -52,35 +52,13 @@
sll a2, a2, 0
sll a3, a3, 0
- dsll t0, v0, 3 # offset into table
- ld t2, (sys_call_table - (__NR_O32_Linux * 8))(t0)
-
+ dsll t0, v0, 4 # offset into table
+ ld t2, (sys_call_table - (__NR_O32_Linux * 16))(t0)
+ ld t3, (sys_call_table - (__NR_O32_Linux * 16) + 8)(t0) # >= 0 if we need stack arguments
sd a3, PT_R26(sp) # save a3 for syscall restarting
+ bgez t3, stackargs
- /*
- * More than four arguments. Try to deal with it by copying the
- * stack arguments from the user stack to the kernel stack.
- * This Sucks (TM).
- *
- * We intentionally keep the kernel stack a little below the top of
- * userspace so we don't have to do a slower byte accurate check here.
- */
- ld t0, PT_R29(sp) # get old user stack pointer
- daddu t1, t0, 32
- bltz t1, bad_stack
-
-1: lw a4, 16(t0) # argument #5 from usp
-2: lw a5, 20(t0) # argument #6 from usp
-3: lw a6, 24(t0) # argument #7 from usp
-4: lw a7, 28(t0) # argument #8 from usp (for indirect syscalls)
-
- .section __ex_table,"a"
- PTR 1b, bad_stack
- PTR 2b, bad_stack
- PTR 3b, bad_stack
- PTR 4b, bad_stack
- .previous
-
+stack_done:
li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
and t0, t1, t0
@@ -103,6 +81,50 @@
/* ------------------------------------------------------------------------ */
+ /*
+ * More than four arguments. Try to deal with it by copying the
+ * stack arguments from the user stack to the kernel stack.
+ * This Sucks (TM).
+ *
+ */
+stackargs:
+ ld t0, PT_R29(sp) # get old user stack pointer
+
+ /*
+ * We intentionally keep the kernel stack a little below the top of
+ * userspace so we don't have to do a slower byte accurate check here.
+ */
+ daddu t1, t0, 32
+ bltz t1, bad_stack
+
+ /* Ok, copy the args from the luser stack to the kernel stack.
+ * t3 is the precomputed number of instruction bytes needed to
+ * load or store arguments 6-8.
+ */
+
+ dla t1, 5f # load up to 4 additional arguments
+ subu t1, t3
+ .set push
+ .set noreorder
+ .set nomacro
+ jr t1
+ nop
+2: lw a7, 28(t0) # argument #8 from usp
+3: lw a6, 24(t0) # argument #7 from usp
+4: lw a5, 20(t0) # argument #6 from usp
+5:
+ b stack_done
+1: lw a4, 16(t0) # argument #5 from usp
+ .set pop
+
+ .section __ex_table,"a"
+ PTR 1b, bad_stack
+ PTR 2b, bad_stack
+ PTR 3b, bad_stack
+ PTR 4b, bad_stack
+ .previous
+/* ------------------------------------------------------------------------ */
+
trace_a_syscall:
SAVE_STATIC
sd a4, PT_R8(sp) # Save argument registers
@@ -166,7 +188,7 @@
subu t0, a0, __NR_O32_Linux # check syscall number
sltiu v0, t0, __NR_O32_Linux_syscalls + 1
beqz t0, einval # do not recurse
- dsll t1, t0, 3
+ dsll t1, t0, 4
beqz v0, einval
ld t2, sys_call_table(t1) # syscall routine
@@ -189,356 +211,382 @@
jr ra
END(sys32_syscall)
+ /* We pre-compute the number of _instruction_ bytes needed to
+ load or store the arguments 6-8. Negative values are ignored. */
+
+ .macro sys function, nargs
+ PTR \function
+ LONG (\nargs << 2) - (5 << 2)
+ .endm
+
.align 3
.type sys_call_table,@object
sys_call_table:
- PTR sys32_syscall /* 4000 */
- PTR sys_exit
- PTR __sys_fork
- PTR sys_read
- PTR sys_write
- PTR compat_sys_open /* 4005 */
- PTR sys_close
- PTR sys_waitpid
- PTR sys_creat
- PTR sys_link
- PTR sys_unlink /* 4010 */
- PTR compat_sys_execve
- PTR sys_chdir
- PTR compat_sys_time
- PTR sys_mknod
- PTR sys_chmod /* 4015 */
- PTR sys_lchown
- PTR sys_ni_syscall
- PTR sys_ni_syscall /* was sys_stat */
- PTR sys_lseek
- PTR sys_getpid /* 4020 */
- PTR compat_sys_mount
- PTR sys_oldumount
- PTR sys_setuid
- PTR sys_getuid
- PTR compat_sys_stime /* 4025 */
- PTR compat_sys_ptrace
- PTR sys_alarm
- PTR sys_ni_syscall /* was sys_fstat */
- PTR sys_pause
- PTR compat_sys_utime /* 4030 */
- PTR sys_ni_syscall
- PTR sys_ni_syscall
- PTR sys_access
- PTR sys_nice
- PTR sys_ni_syscall /* 4035 */
- PTR sys_sync
- PTR sys_kill
- PTR sys_rename
- PTR sys_mkdir
- PTR sys_rmdir /* 4040 */
- PTR sys_dup
- PTR sysm_pipe
- PTR compat_sys_times
- PTR sys_ni_syscall
- PTR sys_brk /* 4045 */
- PTR sys_setgid
- PTR sys_getgid
- PTR sys_ni_syscall /* was signal 2 */
- PTR sys_geteuid
- PTR sys_getegid /* 4050 */
- PTR sys_acct
- PTR sys_umount
- PTR sys_ni_syscall
- PTR compat_sys_ioctl
- PTR compat_sys_fcntl /* 4055 */
- PTR sys_ni_syscall
- PTR sys_setpgid
- PTR sys_ni_syscall
- PTR sys_olduname
- PTR sys_umask /* 4060 */
- PTR sys_chroot
- PTR compat_sys_ustat
- PTR sys_dup2
- PTR sys_getppid
- PTR sys_getpgrp /* 4065 */
- PTR sys_setsid
- PTR sys_32_sigaction
- PTR sys_sgetmask
- PTR sys_ssetmask
- PTR sys_setreuid /* 4070 */
- PTR sys_setregid
- PTR sys32_sigsuspend
- PTR compat_sys_sigpending
- PTR sys_sethostname
- PTR compat_sys_setrlimit /* 4075 */
- PTR compat_sys_getrlimit
- PTR compat_sys_getrusage
- PTR compat_sys_gettimeofday
- PTR compat_sys_settimeofday
- PTR sys_getgroups /* 4080 */
- PTR sys_setgroups
- PTR sys_ni_syscall /* old_select */
- PTR sys_symlink
- PTR sys_ni_syscall /* was sys_lstat */
- PTR sys_readlink /* 4085 */
- PTR sys_uselib
- PTR sys_swapon
- PTR sys_reboot
- PTR compat_sys_old_readdir
- PTR sys_mips_mmap /* 4090 */
- PTR sys_munmap
- PTR compat_sys_truncate
- PTR compat_sys_ftruncate
- PTR sys_fchmod
- PTR sys_fchown /* 4095 */
- PTR sys_getpriority
- PTR sys_setpriority
- PTR sys_ni_syscall
- PTR compat_sys_statfs
- PTR compat_sys_fstatfs /* 4100 */
- PTR sys_ni_syscall /* sys_ioperm */
- PTR compat_sys_socketcall
- PTR sys_syslog
- PTR compat_sys_setitimer
- PTR compat_sys_getitimer /* 4105 */
- PTR compat_sys_newstat
- PTR compat_sys_newlstat
- PTR compat_sys_newfstat
- PTR sys_uname
- PTR sys_ni_syscall /* sys_ioperm *//* 4110 */
- PTR sys_vhangup
- PTR sys_ni_syscall /* was sys_idle */
- PTR sys_ni_syscall /* sys_vm86 */
- PTR compat_sys_wait4
- PTR sys_swapoff /* 4115 */
- PTR compat_sys_sysinfo
- PTR compat_sys_ipc
- PTR sys_fsync
- PTR sys32_sigreturn
- PTR __sys_clone /* 4120 */
- PTR sys_setdomainname
- PTR sys_newuname
- PTR sys_ni_syscall /* sys_modify_ldt */
- PTR compat_sys_adjtimex
- PTR sys_mprotect /* 4125 */
- PTR compat_sys_sigprocmask
- PTR sys_ni_syscall /* was creat_module */
- PTR sys_init_module
- PTR sys_delete_module
- PTR sys_ni_syscall /* 4130, get_kernel_syms */
- PTR sys_quotactl
- PTR sys_getpgid
- PTR sys_fchdir
- PTR sys_bdflush
- PTR sys_sysfs /* 4135 */
- PTR sys_32_personality
- PTR sys_ni_syscall /* for afs_syscall */
- PTR sys_setfsuid
- PTR sys_setfsgid
- PTR sys_32_llseek /* 4140 */
- PTR compat_sys_getdents
- PTR compat_sys_select
- PTR sys_flock
- PTR sys_msync
- PTR compat_sys_readv /* 4145 */
- PTR compat_sys_writev
- PTR sys_cacheflush
- PTR sys_cachectl
- PTR sys_sysmips
- PTR sys_ni_syscall /* 4150 */
- PTR sys_getsid
- PTR sys_fdatasync
- PTR compat_sys_sysctl
- PTR sys_mlock
- PTR sys_munlock /* 4155 */
- PTR sys_mlockall
- PTR sys_munlockall
- PTR sys_sched_setparam
- PTR sys_sched_getparam
- PTR sys_sched_setscheduler /* 4160 */
- PTR sys_sched_getscheduler
- PTR sys_sched_yield
- PTR sys_sched_get_priority_max
- PTR sys_sched_get_priority_min
- PTR compat_sys_sched_rr_get_interval /* 4165 */
- PTR compat_sys_nanosleep
- PTR sys_mremap
- PTR sys_accept
- PTR sys_bind
- PTR sys_connect /* 4170 */
- PTR sys_getpeername
- PTR sys_getsockname
- PTR sys_getsockopt
- PTR sys_listen
- PTR compat_sys_recv /* 4175 */
- PTR compat_sys_recvfrom
- PTR compat_sys_recvmsg
- PTR sys_send
- PTR compat_sys_sendmsg
- PTR sys_sendto /* 4180 */
- PTR compat_sys_setsockopt
- PTR sys_shutdown
- PTR sys_socket
- PTR sys_socketpair
- PTR sys_setresuid /* 4185 */
- PTR sys_getresuid
- PTR sys_ni_syscall /* was query_module */
- PTR sys_poll
- PTR sys_ni_syscall /* was nfsservctl */
- PTR sys_setresgid /* 4190 */
- PTR sys_getresgid
- PTR sys_prctl
- PTR sys32_rt_sigreturn
- PTR compat_sys_rt_sigaction
- PTR compat_sys_rt_sigprocmask /* 4195 */
- PTR compat_sys_rt_sigpending
- PTR compat_sys_rt_sigtimedwait
- PTR compat_sys_rt_sigqueueinfo
- PTR compat_sys_rt_sigsuspend
- PTR sys_32_pread /* 4200 */
- PTR sys_32_pwrite
- PTR sys_chown
- PTR sys_getcwd
- PTR sys_capget
- PTR sys_capset /* 4205 */
- PTR compat_sys_sigaltstack
- PTR compat_sys_sendfile
- PTR sys_ni_syscall
- PTR sys_ni_syscall
- PTR sys_mips_mmap2 /* 4210 */
- PTR sys_32_truncate64
- PTR sys_32_ftruncate64
- PTR sys_newstat
- PTR sys_newlstat
- PTR sys_newfstat /* 4215 */
- PTR sys_pivot_root
- PTR sys_mincore
- PTR sys_madvise
- PTR sys_getdents64
- PTR compat_sys_fcntl64 /* 4220 */
- PTR sys_ni_syscall
- PTR sys_gettid
- PTR sys32_readahead
- PTR sys_setxattr
- PTR sys_lsetxattr /* 4225 */
- PTR sys_fsetxattr
- PTR sys_getxattr
- PTR sys_lgetxattr
- PTR sys_fgetxattr
- PTR sys_listxattr /* 4230 */
- PTR sys_llistxattr
- PTR sys_flistxattr
- PTR sys_removexattr
- PTR sys_lremovexattr
- PTR sys_fremovexattr /* 4235 */
- PTR sys_tkill
- PTR sys_sendfile64
- PTR compat_sys_futex
- PTR compat_sys_sched_setaffinity
- PTR compat_sys_sched_getaffinity /* 4240 */
- PTR compat_sys_io_setup
- PTR sys_io_destroy
- PTR compat_sys_io_getevents
- PTR compat_sys_io_submit
- PTR sys_io_cancel /* 4245 */
- PTR sys_exit_group
- PTR compat_sys_lookup_dcookie
- PTR sys_epoll_create
- PTR sys_epoll_ctl
- PTR sys_epoll_wait /* 4250 */
- PTR sys_remap_file_pages
- PTR sys_set_tid_address
- PTR sys_restart_syscall
- PTR sys32_fadvise64_64
- PTR compat_sys_statfs64 /* 4255 */
- PTR compat_sys_fstatfs64
- PTR compat_sys_timer_create
- PTR compat_sys_timer_settime
- PTR compat_sys_timer_gettime
- PTR sys_timer_getoverrun /* 4260 */
- PTR sys_timer_delete
- PTR compat_sys_clock_settime
- PTR compat_sys_clock_gettime
- PTR compat_sys_clock_getres
- PTR compat_sys_clock_nanosleep /* 4265 */
- PTR sys_tgkill
- PTR compat_sys_utimes
- PTR sys_ni_syscall /* sys_mbind */
- PTR sys_ni_syscall /* sys_get_mempolicy */
- PTR sys_ni_syscall /* 4270 sys_set_mempolicy */
- PTR compat_sys_mq_open
- PTR sys_mq_unlink
- PTR compat_sys_mq_timedsend
- PTR compat_sys_mq_timedreceive
- PTR compat_sys_mq_notify /* 4275 */
- PTR compat_sys_mq_getsetattr
- PTR sys_ni_syscall /* sys_vserver */
- PTR compat_sys_waitid
- PTR sys_ni_syscall /* available, was setaltroot */
- PTR sys_add_key /* 4280 */
- PTR sys_request_key
- PTR sys_keyctl
- PTR sys_set_thread_area
- PTR sys_inotify_init
- PTR sys_inotify_add_watch /* 4285 */
- PTR sys_inotify_rm_watch
- PTR sys_migrate_pages
- PTR compat_sys_openat
- PTR sys_mkdirat
- PTR sys_mknodat /* 4290 */
- PTR sys_fchownat
- PTR compat_sys_futimesat
- PTR sys_newfstatat
- PTR sys_unlinkat
- PTR sys_renameat /* 4295 */
- PTR sys_linkat
- PTR sys_symlinkat
- PTR sys_readlinkat
- PTR sys_fchmodat
- PTR sys_faccessat /* 4300 */
- PTR compat_sys_pselect6
- PTR compat_sys_ppoll
- PTR sys_unshare
- PTR sys_splice
- PTR sys32_sync_file_range /* 4305 */
- PTR sys_tee
- PTR compat_sys_vmsplice
- PTR compat_sys_move_pages
- PTR compat_sys_set_robust_list
- PTR compat_sys_get_robust_list /* 4310 */
- PTR compat_sys_kexec_load
- PTR sys_getcpu
- PTR compat_sys_epoll_pwait
- PTR sys_ioprio_set
- PTR sys_ioprio_get /* 4315 */
- PTR compat_sys_utimensat
- PTR compat_sys_signalfd
- PTR sys_ni_syscall /* was timerfd */
- PTR sys_eventfd
- PTR sys32_fallocate /* 4320 */
- PTR sys_timerfd_create
- PTR compat_sys_timerfd_gettime
- PTR compat_sys_timerfd_settime
- PTR compat_sys_signalfd4
- PTR sys_eventfd2 /* 4325 */
- PTR sys_epoll_create1
- PTR sys_dup3
- PTR sys_pipe2
- PTR sys_inotify_init1
- PTR compat_sys_preadv /* 4330 */
- PTR compat_sys_pwritev
- PTR compat_sys_rt_tgsigqueueinfo
- PTR sys_perf_event_open
- PTR sys_accept4
- PTR compat_sys_recvmmsg /* 4335 */
- PTR sys_fanotify_init
- PTR compat_sys_fanotify_mark
- PTR sys_prlimit64
- PTR sys_name_to_handle_at
- PTR compat_sys_open_by_handle_at /* 4340 */
- PTR compat_sys_clock_adjtime
- PTR sys_syncfs
- PTR compat_sys_sendmmsg
- PTR sys_setns
- PTR compat_sys_process_vm_readv /* 4345 */
- PTR compat_sys_process_vm_writev
- PTR sys_kcmp
- PTR sys_finit_module
+ sys sys32_syscall 8 /* 4000 */
+ sys sys_exit 1
+ sys __sys_fork 0
+ sys sys_read 3
+ sys sys_write 3
+ sys compat_sys_open 3 /* 4005 */
+ sys sys_close 1
+ sys sys_waitpid 3
+ sys sys_creat 2
+ sys sys_link 2
+ sys sys_unlink 1 /* 4010 */
+ sys compat_sys_execve 0
+ sys sys_chdir 1
+ sys compat_sys_time 1
+ sys sys_mknod 3
+ sys sys_chmod 2 /* 4015 */
+ sys sys_lchown 3
+ sys sys_ni_syscall 0
+ sys sys_ni_syscall 0 /* was sys_stat */
+ sys sys_lseek 3
+ sys sys_getpid 0 /* 4020 */
+ sys compat_sys_mount 5
+ sys sys_oldumount 1
+ sys sys_setuid 1
+ sys sys_getuid 0
+ sys compat_sys_stime 1 /* 4025 */
+ sys compat_sys_ptrace 4
+ sys sys_alarm 1
+ sys sys_ni_syscall 0 /* was sys_fstat */
+ sys sys_pause 0
+ sys compat_sys_utime 2 /* 4030 */
+ sys sys_ni_syscall 0
+ sys sys_ni_syscall 0
+ sys sys_access 2
+ sys sys_nice 1
+ sys sys_ni_syscall 0 /* 4035 */
+ sys sys_sync 0
+ sys sys_kill 2
+ sys sys_rename 2
+ sys sys_mkdir 2
+ sys sys_rmdir 1 /* 4040 */
+ sys sys_dup 1
+ sys sysm_pipe 0
+ sys compat_sys_times 1
+ sys sys_ni_syscall 0
+ sys sys_brk 1 /* 4045 */
+ sys sys_setgid 1
+ sys sys_getgid 0
+ sys sys_ni_syscall 0 /* was signal(2) */
+ sys sys_geteuid 0
+ sys sys_getegid 0 /* 4050 */
+ sys sys_acct 1
+ sys sys_umount 2
+ sys sys_ni_syscall 0
+ sys compat_sys_ioctl 3
+ sys compat_sys_fcntl 3 /* 4055 */
+ sys sys_ni_syscall 2
+ sys sys_setpgid 2
+ sys sys_ni_syscall 0
+ sys sys_olduname 1
+ sys sys_umask 1 /* 4060 */
+ sys sys_chroot 1
+ sys compat_sys_ustat 2
+ sys sys_dup2 2
+ sys sys_getppid 0
+ sys sys_getpgrp 0 /* 4065 */
+ sys sys_setsid 0
+ sys sys_32_sigaction 3
+ sys sys_sgetmask 0
+ sys sys_ssetmask 1
+ sys sys_setreuid 2 /* 4070 */
+ sys sys_setregid 2
+ sys sys32_sigsuspend 0
+ sys compat_sys_sigpending 1
+ sys sys_sethostname 2
+ sys compat_sys_setrlimit 2 /* 4075 */
+ sys compat_sys_getrlimit 2
+ sys compat_sys_getrusage 2
+ sys compat_sys_gettimeofday 2
+ sys compat_sys_settimeofday 2
+ sys sys_getgroups 2 /* 4080 */
+ sys sys_setgroups 2
+ sys sys_ni_syscall 0 /* old_select */
+ sys sys_symlink 2
+ sys sys_ni_syscall 0 /* was sys_lstat */
+ sys sys_readlink 3 /* 4085 */
+ sys sys_uselib 1
+ sys sys_swapon 2
+ sys sys_reboot 3
+ sys compat_sys_old_readdir 3
+ sys sys_mips_mmap 6 /* 4090 */
+ sys sys_munmap 2
+ sys compat_sys_truncate 2
+ sys compat_sys_ftruncate 2
+ sys sys_fchmod 2
+ sys sys_fchown 3 /* 4095 */
+ sys sys_getpriority 2
+ sys sys_setpriority 3
+ sys sys_ni_syscall 0
+ sys compat_sys_statfs 2
+ sys compat_sys_fstatfs 2 /* 4100 */
+ sys sys_ni_syscall 0 /* was ioperm(2) */
+ sys compat_sys_socketcall 2
+ sys sys_syslog 3
+ sys compat_sys_setitimer 3
+ sys compat_sys_getitimer 2 /* 4105 */
+ sys compat_sys_newstat 2
+ sys compat_sys_newlstat 2
+ sys compat_sys_newfstat 2
+ sys sys_uname 1
+ sys sys_ni_syscall 0 /* 4110 was iopl(2) */
+ sys sys_vhangup 0
+ sys sys_ni_syscall 0 /* was sys_idle() */
+ sys sys_ni_syscall 0 /* was sys_vm86 */
+ sys compat_sys_wait4 4
+ sys sys_swapoff 1 /* 4115 */
+ sys compat_sys_sysinfo 1
+ sys compat_sys_ipc 6
+ sys sys_fsync 1
+ sys sys32_sigreturn 0
+ sys __sys_clone 6 /* 4120 */
+ sys sys_setdomainname 2
+ sys sys_newuname 1
+ sys sys_ni_syscall 0 /* sys_modify_ldt */
+ sys compat_sys_adjtimex 1
+ sys sys_mprotect 3 /* 4125 */
+ sys compat_sys_sigprocmask 3
+ sys sys_ni_syscall 0 /* was create_module */
+ sys sys_init_module 5
+ sys sys_delete_module 1
+ sys sys_ni_syscall 0 /* 4130 was get_kernel_syms */
+ sys sys_quotactl 4
+ sys sys_getpgid 1
+ sys sys_fchdir 1
+ sys sys_bdflush 2
+ sys sys_sysfs 3 /* 4135 */
+ sys sys_32_personality 1
+ sys sys_ni_syscall 0 /* for afs_syscall */
+ sys sys_setfsuid 1
+ sys sys_setfsgid 1
+ sys sys_32_llseek 5 /* 4140 */
+ sys compat_sys_getdents 3
+ sys compat_sys_select 5
+ sys sys_flock 2
+ sys sys_msync 3
+ sys compat_sys_readv 3 /* 4145 */
+ sys compat_sys_writev 3
+ sys sys_cacheflush 3
+ sys sys_cachectl 3
+ sys sys_sysmips 4
+ sys sys_ni_syscall 0 /* 4150 */
+ sys sys_getsid 1
+ sys sys_fdatasync 1
+ sys compat_sys_sysctl 1
+ sys sys_mlock 2
+ sys sys_munlock 2 /* 4155 */
+ sys sys_mlockall 1
+ sys sys_munlockall 0
+ sys sys_sched_setparam 2
+ sys sys_sched_getparam 2
+ sys sys_sched_setscheduler 3 /* 4160 */
+ sys sys_sched_getscheduler 1
+ sys sys_sched_yield 0
+ sys sys_sched_get_priority_max 1
+ sys sys_sched_get_priority_min 1
+ sys compat_sys_sched_rr_get_interval 2 /* 4165 */
+ sys compat_sys_nanosleep 2
+ sys sys_mremap 5
+ sys sys_accept 3
+ sys sys_bind 3
+ sys sys_connect 3 /* 4170 */
+ sys sys_getpeername 3
+ sys sys_getsockname 3
+ sys sys_getsockopt 5
+ sys sys_listen 2
+ sys compat_sys_recv 4 /* 4175 */
+ sys compat_sys_recvfrom 6
+ sys compat_sys_recvmsg 3
+ sys sys_send 4
+ sys compat_sys_sendmsg 3
+ sys sys_sendto 6 /* 4180 */
+ sys compat_sys_setsockopt 5
+ sys sys_shutdown 2
+ sys sys_socket 3
+ sys sys_socketpair 4
+ sys sys_setresuid 3 /* 4185 */
+ sys sys_getresuid 3
+ sys sys_ni_syscall 0 /* was sys_query_module */
+ sys sys_poll 3
+ sys sys_ni_syscall 0 /* was nfsservctl */
+ sys sys_setresgid 3 /* 4190 */
+ sys sys_getresgid 3
+ sys sys_prctl 5
+ sys sys32_rt_sigreturn 0
+ sys compat_sys_rt_sigaction 4
+ sys compat_sys_rt_sigprocmask 4 /* 4195 */
+ sys compat_sys_rt_sigpending 2
+ sys compat_sys_rt_sigtimedwait 4
+ sys compat_sys_rt_sigqueueinfo 3
+ sys compat_sys_rt_sigsuspend 0
+ sys sys_32_pread 6 /* 4200 */
+ sys sys_32_pwrite 6
+ sys sys_chown 3
+ sys sys_getcwd 2
+ sys sys_capget 2
+ sys sys_capset 2 /* 4205 */
+ sys compat_sys_sigaltstack 0
+ sys compat_sys_sendfile 4
+ sys sys_ni_syscall 0
+ sys sys_ni_syscall 0
+ sys sys_mips_mmap2 6 /* 4210 */
+ sys sys_32_truncate64 4
+ sys sys_32_ftruncate64 4
+ sys sys_newstat 2
+ sys sys_newlstat 2
+ sys sys_newfstat 2 /* 4215 */
+ sys sys_pivot_root 2
+ sys sys_mincore 3
+ sys sys_madvise 3
+ sys sys_getdents64 3
+ sys compat_sys_fcntl64 3 /* 4220 */
+ sys sys_ni_syscall 0
+ sys sys_gettid 0
+ sys sys32_readahead 5
+ sys sys_setxattr 5
+ sys sys_lsetxattr 5 /* 4225 */
+ sys sys_fsetxattr 5
+ sys sys_getxattr 4
+ sys sys_lgetxattr 4
+ sys sys_fgetxattr 4
+ sys sys_listxattr 3 /* 4230 */
+ sys sys_llistxattr 3
+ sys sys_flistxattr 3
+ sys sys_removexattr 2
+ sys sys_lremovexattr 2
+ sys sys_fremovexattr 2 /* 4235 */
+ sys sys_tkill 2
+ sys sys_sendfile64 5
+ sys compat_sys_futex 6
+#ifdef CONFIG_MIPS_MT_FPAFF
+ /*
+ * For FPU affinity scheduling on MIPS MT processors, we need to
+ * intercept sys_sched_xxxaffinity() calls until we get a proper hook
+ * in kernel/sched.c. Considered only temporary we only support these
+ * hooks for the 32-bit kernel - there is no MIPS64 MT processor atm.
+ */
+ // FIXME These entry points do not exist
+ sys compat_mipsmt_sys_sched_setaffinity 3
+ sys compat_mipsmt_sys_sched_getaffinity 3
+#else
+ sys compat_sys_sched_setaffinity 3
+ sys compat_sys_sched_getaffinity 3 /* 4240 */
+#endif /* CONFIG_MIPS_MT_FPAFF */
+ sys compat_sys_io_setup 2
+ sys sys_io_destroy 1
+ sys compat_sys_io_getevents 5
+ sys compat_sys_io_submit 3
+ sys sys_io_cancel 3 /* 4245 */
+ sys sys_exit_group 1
+ sys compat_sys_lookup_dcookie 4
+ sys sys_epoll_create 1
+ sys sys_epoll_ctl 4
+ sys sys_epoll_wait 4 /* 4250 */
+ sys sys_remap_file_pages 5
+ sys sys_set_tid_address 1
+ sys sys_restart_syscall 0
+ sys sys32_fadvise64_64 7
+ sys compat_sys_statfs64 3 /* 4255 */
+ sys compat_sys_fstatfs64 2
+ sys compat_sys_timer_create 3
+ sys compat_sys_timer_settime 4
+ sys compat_sys_timer_gettime 2
+ sys sys_timer_getoverrun 1 /* 4260 */
+ sys sys_timer_delete 1
+ sys compat_sys_clock_settime 2
+ sys compat_sys_clock_gettime 2
+ sys compat_sys_clock_getres 2
+ sys compat_sys_clock_nanosleep 4 /* 4265 */
+ sys sys_tgkill 3
+ sys compat_sys_utimes 2
+ sys sys_ni_syscall 0 /* FIXME: scall32-o32 implements
+ this system call with the wron number of args?! */
+ sys sys_ni_syscall 0 /* sys_get_mempolicy */
+ sys sys_ni_syscall 0 /* 4270 sys_set_mempolicy */
+ sys compat_sys_mq_open 4
+ sys sys_mq_unlink 1
+ sys compat_sys_mq_timedsend 5
+ sys compat_sys_mq_timedreceive 5
+ sys compat_sys_mq_notify 2 /* 4275 */
+ sys compat_sys_mq_getsetattr 3
+ sys sys_ni_syscall 0 /* sys_vserver */
+ sys compat_sys_waitid 5
+ sys sys_ni_syscall 0 /* available, was setaltroot */
+ sys sys_add_key 5 /* 4280 */
+ sys sys_request_key 4
+ sys sys_keyctl 5
+ sys sys_set_thread_area 1
+ sys sys_inotify_init 0
+ sys sys_inotify_add_watch 3 /* 4285 */
+ sys sys_inotify_rm_watch 2
+ sys sys_migrate_pages 4
+ sys compat_sys_openat 4
+ sys sys_mkdirat 3
+ sys sys_mknodat 4 /* 4290 */
+ sys sys_fchownat 5
+ sys compat_sys_futimesat 3
+ sys sys_newfstatat 4
+ sys sys_unlinkat 3
+ sys sys_renameat 4 /* 4295 */
+ sys sys_linkat 5
+ sys sys_symlinkat 3
+ sys sys_readlinkat 4
+ sys sys_fchmodat 3
+ sys sys_faccessat 3 /* 4300 */
+ sys compat_sys_pselect6 6
+ sys compat_sys_ppoll 5
+ sys sys_unshare 1
+ sys sys_splice 6
+ sys sys32_sync_file_range 7 /* 4305 */
+ sys sys_tee 4
+ sys compat_sys_vmsplice 4
+ sys compat_sys_move_pages 6
+ sys compat_sys_set_robust_list 2
+ sys compat_sys_get_robust_list 3 /* 4310 */
+ sys compat_sys_kexec_load 4
+ sys sys_getcpu 3
+ sys compat_sys_epoll_pwait 6
+ sys sys_ioprio_set 3
+ sys sys_ioprio_get 2 /* 4315 */
+ sys compat_sys_utimensat 4
+ sys compat_sys_signalfd 3
+ sys sys_ni_syscall 0 /* was timerfd */
+ sys sys_eventfd 1
+ sys sys32_fallocate 6 /* 4320 */
+ sys sys_timerfd_create 2
+ sys compat_sys_timerfd_gettime 2
+ sys compat_sys_timerfd_settime 4
+ sys compat_sys_signalfd4 4
+ sys sys_eventfd2 2 /* 4325 */
+ sys sys_epoll_create1 1
+ sys sys_dup3 3
+ sys sys_pipe2 2
+ sys sys_inotify_init1 1
+ sys compat_sys_preadv 6 /* 4330 */
+ sys compat_sys_pwritev 6
+ sys compat_sys_rt_tgsigqueueinfo 4
+ sys sys_perf_event_open 5
+ sys sys_accept4 4
+ sys compat_sys_recvmmsg 5 /* 4335 */
+ sys sys_fanotify_init 2
+ sys compat_sys_fanotify_mark 6
+ sys sys_prlimit64 4
+ sys sys_name_to_handle_at 5
+ sys compat_sys_open_by_handle_at 3 /* 4340 */
+ sys compat_sys_clock_adjtime 2
+ sys sys_syncfs 1
+ sys compat_sys_sendmmsg 4
+ sys sys_setns 2
+ sys compat_sys_process_vm_readv 6 /* 4345 */
+ sys compat_sys_process_vm_writev 6
+ sys sys_kcmp 5
+ sys sys_finit_module 3
+ /* Backporting seccomp, skip a few ... */
+ sys sys_ni_syscall 0 /* sys_sched_setattr */
+ sys sys_ni_syscall 0 /* sys_sched_getattr */ /* 4350 */
+ sys sys_ni_syscall 0 /* sys_renameat2 */
+ sys sys_seccomp 3
.size sys_call_table,.-sys_call_table
diff --git a/arch/mips/kernel/segment.c b/arch/mips/kernel/segment.c
new file mode 100644
index 0000000..c3ceb77
--- /dev/null
+++ b/arch/mips/kernel/segment.c
@@ -0,0 +1,103 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2011 MIPS Technologies, Inc.
+ */
+#include <linux/kernel.h>
+#include <linux/proc_fs.h>
+#include <asm/mipsregs.h>
+
+#ifdef CONFIG_PROC_FS
+static struct proc_dir_entry *segments;
+
+static void proc_build_segment_config(char *str, unsigned int cfg)
+{
+ unsigned int am;
+ int len = 0;
+ static const char * const am_str[] = {
+ "UK ", "MK ", "MSK ", "MUSK ", "MUSUK ", "USK ",
+ "*Reserved* ", "UUSK "};
+
+ /* Segment access mode. */
+ am = (cfg & MIPS_SEGCFG_AM) >> MIPS_SEGCFG_AM_SHIFT;
+ len += sprintf(str + len, "%s", am_str[am]);
+
+ /*
+ * Access modes MK, MSK and MUSK are mapped segments. Therefore
+ * there is no direct physical address mapping.
+ */
+ if ((am == 0) || (am > 3))
+ len += sprintf(str + len, " %03lx",
+ ((cfg & MIPS_SEGCFG_PA) >> MIPS_SEGCFG_PA_SHIFT));
+ else
+ len += sprintf(str + len, " UND");
+
+ /*
+ * Access modes MK, MSK and MUSK are mapped segments. Therefore
+ * there is no defined cache mode setting.
+ */
+ if ((am == 0) || (am > 3))
+ len += sprintf(str + len, " %01ld",
+ ((cfg & MIPS_SEGCFG_C) >> MIPS_SEGCFG_C_SHIFT));
+ else
+ len += sprintf(str + len, " U");
+
+ /* Exception configuration. */
+ len += sprintf(str + len, " %01ld\n",
+ ((cfg & MIPS_SEGCFG_EU) >> MIPS_SEGCFG_EU_SHIFT));
+}
+
+static int proc_read_segments(char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+ int len = 0;
+ unsigned int segcfg;
+ char str[42];
+
+ len += sprintf(page + len, "\nSegment Virtual Size Access Mode Physical Caching EU\n");
+
+ len += sprintf(page + len, "------- -------- ---- ----------- ---------- ------- ------\n");
+
+ segcfg = read_c0_segctl0();
+ proc_build_segment_config(str, segcfg);
+ len += sprintf(page + len, " 0 e0000000 512M %s", str);
+
+ segcfg >>= 16;
+ proc_build_segment_config(str, segcfg);
+ len += sprintf(page + len, " 1 c0000000 512M %s", str);
+
+ segcfg = read_c0_segctl1();
+ proc_build_segment_config(str, segcfg);
+ len += sprintf(page + len, " 2 a0000000 512M %s", str);
+
+ segcfg >>= 16;
+ proc_build_segment_config(str, segcfg);
+ len += sprintf(page + len, " 3 80000000 512M %s", str);
+
+ segcfg = read_c0_segctl2();
+ proc_build_segment_config(str, segcfg);
+ len += sprintf(page + len, " 4 40000000 1G %s", str);
+
+ segcfg >>= 16;
+ proc_build_segment_config(str, segcfg);
+ len += sprintf(page + len, " 5 00000000 1G %s\n", str);
+
+ page += len;
+ return len;
+}
+
+static int __init segments_info(void)
+{
+ if (cpu_has_segments) {
+ segments = create_proc_read_entry("segments", 0444, NULL,
+ proc_read_segments, NULL);
+ if (!segments)
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+__initcall(segments_info);
+#endif /* CONFIG_PROC_FS */
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index c7f9051..11c3f78 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -167,6 +167,9 @@
case BOOT_MEM_RESERVED:
printk(KERN_CONT "(reserved)\n");
break;
+ case BOOT_MEM_INUSE:
+ printk(KERN_CONT "(in-use, reserved)\n");
+ break;
default:
printk(KERN_CONT "type %lu\n", boot_mem_map.map[i].type);
break;
@@ -292,6 +295,148 @@
#else /* !CONFIG_SGI_IP27 */
+static int maar_last = -2;
+static int nomaar_flag;
+static unsigned long maar_regs[MIPS_MAAR_MAX * 2];
+
+static int __init early_parse_nomaar(char *p)
+{
+ nomaar_flag = 1;
+ return(0);
+}
+early_param("nomaar", early_parse_nomaar);
+
+static void __init maar_reset(void)
+{
+ int maar = 0;
+
+ do {
+ write_c0_maarindex(maar);
+ back_to_back_c0_hazard();
+ if (read_c0_maarindex() != maar)
+ return;
+ write_c0_maar(0);
+ back_to_back_c0_hazard();
+ } while (++maar < MIPS_MAAR_MAX);
+}
+
+void __init maar_setup(void)
+{
+ int maar = 0;
+ phys_t low;
+ phys_t upper;
+
+ if (nomaar_flag || !cpu_has_maar)
+ return;
+
+ pr_info("MAAR setup:\n");
+ maar_reset();
+
+ for (maar=0; maar<(maar_last+2); maar++) {
+ write_c0_maarindex(maar);
+ back_to_back_c0_hazard();
+ if (read_c0_maarindex() != maar) {
+ pr_err("CPU has only %d MAARs, resetting...\n",maar - 1);
+ maar_reset();
+ return;
+ }
+ write_c0_maar(maar_regs[maar]);
+#if defined(CONFIG_CPU_MIPS32) && defined(CONFIG_64BIT_PHYS_ADDR)
+ write_c0_hi_maar(maar_regs[maar + MIPS_MAAR_MAX]);
+#endif
+ back_to_back_c0_hazard();
+ if (maar & 1) {
+ low = (((phys_t)maar_regs[maar]) << 4) & ~0xffff;
+ upper = (((phys_t)maar_regs[maar - 1]) << 4) & ~0xffff;
+#if defined(CONFIG_CPU_MIPS32) && defined(CONFIG_64BIT_PHYS_ADDR)
+ low += (((phys_t)maar_regs[maar + MIPS_MAAR_MAX]) << 36) & ~MIPS_MAAR_HI_V;
+ upper += (((phys_t)maar_regs[maar - 1 + MIPS_MAAR_MAX]) << 36) & ~MIPS_MAAR_HI_V;
+#endif
+ upper = (upper & ~0xffff) + 0xffff;
+ pr_info(" [%0#10lx-%0#10lx] %s\n", low, upper,
+ (maar_regs[maar -1] & MIPS_MAAR_S)?"speculative":"");
+ }
+ }
+}
+
+static void __init maar_update(phys_t begin, phys_t end, int speculative)
+{
+ phys_t start;
+
+ /* rounding, let's be conservative if speculative */
+ if (speculative) {
+ if (begin & 0xffff)
+ start = (begin + 0x10000) & ~0xffff;
+ else
+ start = begin;
+ end = (end - 0x10000) & ~0xffff;
+ } else {
+ start = begin & ~0xffff;
+ end = (end - 1) & ~0xffff;
+ }
+ if (speculative && (end == start))
+ return;
+
+ maar_regs[maar_last + 1] = ((start >> 4) | MIPS_MAAR_V | (speculative?MIPS_MAAR_S:0));
+#if defined(CONFIG_CPU_MIPS32) && defined(CONFIG_64BIT_PHYS_ADDR)
+ maar_regs[maar_last + 1 + MIPS_MAAR_MAX] = MIPS_MAAR_HI_V | (start >> 36);
+#endif
+ maar_regs[maar_last] = ((end >> 4) | MIPS_MAAR_V | (speculative?MIPS_MAAR_S:0));
+#if defined(CONFIG_CPU_MIPS32) && defined(CONFIG_64BIT_PHYS_ADDR)
+ maar_regs[maar_last + MIPS_MAAR_MAX] = MIPS_MAAR_HI_V | (end >> 36);
+#endif
+ return;
+}
+
+void __init add_maar_region(phys_t start, phys_t end, int speculative)
+{
+ phys_t upper;
+ unsigned sbit;
+ int i;
+
+ if (nomaar_flag || !cpu_has_maar)
+ return;
+
+ if (maar_last < 0) {
+ maar_last = 0;
+ maar_update(start, end, speculative);
+ return;
+ }
+
+ /* try merge with previous region */
+ upper = maar_regs[maar_last];
+#if defined(CONFIG_CPU_MIPS32) && defined(CONFIG_64BIT_PHYS_ADDR)
+ upper |= (((phys_t)maar_regs[maar_last + MIPS_MAAR_MAX] << 32) & ~MIPS_MAAR_HI_V);
+#endif
+ sbit = (upper & MIPS_MAAR_S)? MIPS_MAAR_S : 0;
+ speculative = speculative? MIPS_MAAR_S : 0;
+ upper = ((upper << 4) + 0x10000) & ~0xffffUL;
+ if (((upper == (start & ~0xffffUL)) ||
+ (upper == ((start + 0xffffUL) & ~0xffffUL))) &&
+ (sbit == speculative)) {
+ if (speculative)
+ end = (end - 0x10000) & ~0xffff;
+ else
+ end = (end - 1) & ~0xffff;
+ maar_regs[maar_last] = (end >> 4) | MIPS_MAAR_V | sbit;
+#if defined(CONFIG_CPU_MIPS32) && defined(CONFIG_64BIT_PHYS_ADDR)
+ maar_regs[maar_last + MIPS_MAAR_MAX] = MIPS_MAAR_HI_V | (end >> 36);
+#endif
+ return;
+ }
+
+ maar_last += 2;
+ if (maar_last >= MIPS_MAAR_MAX) {
+ pr_err("Attempt to initialize more than %d MAARs\n", MIPS_MAAR_MAX);
+ for (i=0; i<MIPS_MAAR_MAX; i++) {
+ maar_regs[i] = 0;
+ maar_regs[i + MIPS_MAAR_MAX] = 0;
+ }
+ return;
+ }
+ maar_update(start, end, speculative);
+}
+
static void __init bootmem_init(void)
{
unsigned long reserved_end;
@@ -368,7 +513,6 @@
bootmap_size = init_bootmem_node(NODE_DATA(0), mapstart,
min_low_pfn, max_low_pfn);
-
for (i = 0; i < boot_mem_map.nr_map; i++) {
unsigned long start, end;
@@ -391,9 +535,17 @@
if (end <= start)
continue;
#endif
+ if ((!nomaar_flag) && cpu_has_maar &&
+ ((boot_mem_map.map[i].type == BOOT_MEM_RAM) ||
+ (boot_mem_map.map[i].type == BOOT_MEM_ROM_DATA) ||
+ (boot_mem_map.map[i].type == BOOT_MEM_INUSE) ||
+ (boot_mem_map.map[i].type == BOOT_MEM_INIT_RAM)))
+ add_maar_region(PFN_PHYS(start), PFN_PHYS(end), 1);
memblock_add_node(PFN_PHYS(start), PFN_PHYS(end - start), 0);
}
+ if (cpu_has_maar && !nomaar_flag)
+ maar_setup();
/*
* Register fully available low RAM pages with the bootmem allocator.
@@ -552,6 +704,52 @@
add_memory_region(mem, size, type);
}
+#ifdef CONFIG_KEXEC
+static inline unsigned long long get_total_mem(void)
+{
+ unsigned long long total;
+
+ total = max_pfn - min_low_pfn;
+ return total << PAGE_SHIFT;
+}
+
+static void __init mips_parse_crashkernel(void)
+{
+ unsigned long long total_mem;
+ unsigned long long crash_size, crash_base;
+ int ret;
+
+ total_mem = get_total_mem();
+ ret = parse_crashkernel(boot_command_line, total_mem,
+ &crash_size, &crash_base);
+ if (ret != 0 || crash_size <= 0)
+ return;
+
+ crashk_res.start = crash_base;
+ crashk_res.end = crash_base + crash_size - 1;
+}
+
+static void __init request_crashkernel(struct resource *res)
+{
+ int ret;
+
+ ret = request_resource(res, &crashk_res);
+ if (!ret)
+ pr_info("Reserving %ldMB of memory at %ldMB for crashkernel\n",
+ (unsigned long)((crashk_res.end -
+ crashk_res.start + 1) >> 20),
+ (unsigned long)(crashk_res.start >> 20));
+}
+#else /* !defined(CONFIG_KEXEC) */
+static void __init mips_parse_crashkernel(void)
+{
+}
+
+static void __init request_crashkernel(struct resource *res)
+{
+}
+#endif /* !defined(CONFIG_KEXEC) */
+
static void __init arch_mem_init(char **cmdline_p)
{
extern void plat_mem_setup(void);
@@ -608,6 +806,8 @@
BOOTMEM_DEFAULT);
}
#endif
+
+ mips_parse_crashkernel();
#ifdef CONFIG_KEXEC
if (crashk_res.start != crashk_res.end)
reserve_bootmem(crashk_res.start,
@@ -620,52 +820,6 @@
paging_init();
}
-#ifdef CONFIG_KEXEC
-static inline unsigned long long get_total_mem(void)
-{
- unsigned long long total;
-
- total = max_pfn - min_low_pfn;
- return total << PAGE_SHIFT;
-}
-
-static void __init mips_parse_crashkernel(void)
-{
- unsigned long long total_mem;
- unsigned long long crash_size, crash_base;
- int ret;
-
- total_mem = get_total_mem();
- ret = parse_crashkernel(boot_command_line, total_mem,
- &crash_size, &crash_base);
- if (ret != 0 || crash_size <= 0)
- return;
-
- crashk_res.start = crash_base;
- crashk_res.end = crash_base + crash_size - 1;
-}
-
-static void __init request_crashkernel(struct resource *res)
-{
- int ret;
-
- ret = request_resource(res, &crashk_res);
- if (!ret)
- pr_info("Reserving %ldMB of memory at %ldMB for crashkernel\n",
- (unsigned long)((crashk_res.end -
- crashk_res.start + 1) >> 20),
- (unsigned long)(crashk_res.start >> 20));
-}
-#else /* !defined(CONFIG_KEXEC) */
-static void __init mips_parse_crashkernel(void)
-{
-}
-
-static void __init request_crashkernel(struct resource *res)
-{
-}
-#endif /* !defined(CONFIG_KEXEC) */
-
static void __init resource_init(void)
{
int i;
@@ -678,11 +832,6 @@
data_resource.start = __pa_symbol(&_etext);
data_resource.end = __pa_symbol(&_edata) - 1;
- /*
- * Request address space for all standard RAM.
- */
- mips_parse_crashkernel();
-
for (i = 0; i < boot_mem_map.nr_map; i++) {
struct resource *res;
unsigned long start, end;
@@ -701,6 +850,9 @@
case BOOT_MEM_ROM_DATA:
res->name = "System RAM";
break;
+ case BOOT_MEM_INUSE:
+ res->name = "InUse memory";
+ break;
case BOOT_MEM_RESERVED:
default:
res->name = "reserved";
diff --git a/arch/mips/kernel/signal-common.c b/arch/mips/kernel/signal-common.c
new file mode 100644
index 0000000..51953fd
--- /dev/null
+++ b/arch/mips/kernel/signal-common.c
@@ -0,0 +1,237 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ * Copyright (C) 1994 - 2000 Ralf Baechle
+ * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2014, Imagination Technologies Ltd.
+ */
+
+/*
+ * Thread saved context copy to/from a signal context presumed to be on the
+ * user stack, and therefore accessed with appropriate macros from uaccess.h.
+ */
+static int SUFFIX(copy_fp_to_sigcontext)(user_sigcontext_t __user *sc)
+{
+ int i;
+ int err = 0;
+ int inc = (!test_thread_local_flags(LTIF_FPU_FR)) ? 2 : 1;
+
+ for (i = 0; i < NUM_FPU_REGS; i += inc) {
+ err |=
+ __put_user(get_fpr64(¤t->thread.fpu.fpr[i], 0),
+ &sc->sc_fpregs[i]);
+ }
+ err |= __put_user(current->thread.fpu.fcr31, &sc->sc_fpc_csr);
+
+ return err;
+}
+
+static int SUFFIX(copy_fp_from_sigcontext)(user_sigcontext_t __user *sc)
+{
+ int i;
+ int err = 0;
+ int inc = (!test_thread_local_flags(LTIF_FPU_FR)) ? 2 : 1;
+ u64 fpr_val;
+
+ for (i = 0; i < NUM_FPU_REGS; i += inc) {
+ err |= __get_user(fpr_val, &sc->sc_fpregs[i]);
+ set_fpr64(¤t->thread.fpu.fpr[i], 0, fpr_val);
+ }
+ err |= __get_user(current->thread.fpu.fcr31, &sc->sc_fpc_csr);
+
+ return err;
+}
+
+/*
+ * Helper routines
+ */
+static int SUFFIX(protected_save_fp_context)(user_sigcontext_t __user *sc)
+{
+ int err;
+#ifndef CONFIG_EVA
+ while (1) {
+ lock_fpu_owner();
+ if (is_fpu_owner()) {
+ err = SUFFIX(save_fp_context)(sc);
+ unlock_fpu_owner();
+ } else {
+ unlock_fpu_owner();
+ err = SUFFIX(copy_fp_to_sigcontext)(sc);
+ }
+ if (likely(!err))
+ break;
+ /* touch the sigcontext and try again */
+ err = __put_user(0, &sc->sc_fpregs[0]) |
+ __put_user(0, &sc->sc_fpregs[31]) |
+ __put_user(0, &sc->sc_fpc_csr);
+ if (err)
+ break; /* really bad sigcontext */
+ }
+#else
+ /*
+ * EVA does not have FPU EVA instructions so saving fpu context directly
+ * does not work.
+ */
+ lose_fpu(1);
+ err = SUFFIX(save_fp_context)(sc); /* this might fail */
+#endif
+ return err;
+}
+
+static int SUFFIX(protected_restore_fp_context)(user_sigcontext_t __user *sc)
+{
+ int err, tmp __maybe_unused;
+#ifndef CONFIG_EVA
+ while (1) {
+ lock_fpu_owner();
+ if (is_fpu_owner()) {
+ err = SUFFIX(restore_fp_context)(sc);
+ unlock_fpu_owner();
+ } else {
+ unlock_fpu_owner();
+ err = SUFFIX(copy_fp_from_sigcontext)(sc);
+ }
+ if (likely(!err))
+ break;
+ /* touch the sigcontext and try again */
+ err = __get_user(tmp, &sc->sc_fpregs[0]) |
+ __get_user(tmp, &sc->sc_fpregs[31]) |
+ __get_user(tmp, &sc->sc_fpc_csr);
+ if (err)
+ break; /* really bad sigcontext */
+ }
+#else
+ /*
+ * EVA does not have FPU EVA instructions so restoring fpu context
+ * directly does not work.
+ */
+ lose_fpu(0);
+ err = SUFFIX(restore_fp_context)(sc); /* this might fail */
+#endif
+ return err;
+}
+
+int SUFFIX(setup_sigcontext)(struct pt_regs *regs, user_sigcontext_t __user *sc)
+{
+ int err = 0;
+ int i;
+ unsigned int used_math;
+
+ err |= __put_user(regs->cp0_epc, &sc->sc_pc);
+
+ err |= __put_user(0, &sc->sc_regs[0]);
+ for (i = 1; i < 32; i++)
+ err |= __put_user(regs->regs[i], &sc->sc_regs[i]);
+
+#ifdef CONFIG_CPU_HAS_SMARTMIPS
+ err |= __put_user(regs->acx, &sc->sc_acx);
+#endif
+ err |= __put_user(regs->hi, &sc->sc_mdhi);
+ err |= __put_user(regs->lo, &sc->sc_mdlo);
+#ifndef CONFIG_CPU_MIPSR6
+ if (cpu_has_dsp) {
+ err |= __put_user(mfhi1(), &sc->sc_hi1);
+ err |= __put_user(mflo1(), &sc->sc_lo1);
+ err |= __put_user(mfhi2(), &sc->sc_hi2);
+ err |= __put_user(mflo2(), &sc->sc_lo2);
+ err |= __put_user(mfhi3(), &sc->sc_hi3);
+ err |= __put_user(mflo3(), &sc->sc_lo3);
+ err |= __put_user(rddsp(DSP_MASK), &sc->sc_dsp);
+ }
+#endif
+ used_math = used_math() ? USED_FP : 0;
+
+ if (used_math) {
+ if (test_thread_local_flags(LTIF_FPU_FR)) {
+ used_math |= USED_FR1;
+
+ if (test_thread_local_flags(LTIF_FPU_FRE))
+ used_math |= USED_HYBRID_FPRS;
+ }
+ }
+
+ if (used_math) {
+ /*
+ * Save FPU state to signal context. Signal handler
+ * will "inherit" current FPU state.
+ */
+ err |= SUFFIX(protected_save_fp_context)(sc);
+ }
+
+ if (!err) {
+ err = save_extcontext(sc_to_extcontext(sc));
+ if (err > 0) {
+ used_math |= USED_EXTCONTEXT;
+ err = 0;
+ }
+ }
+
+ err |= __put_user(used_math, &sc->sc_used_math);
+
+ return err;
+}
+
+static int SUFFIX(check_and_restore_fp_context)(user_sigcontext_t __user *sc)
+{
+ int err, sig;
+
+ err = sig = fpcsr_pending(&sc->sc_fpc_csr);
+ if (err > 0)
+ err = 0;
+ err |= SUFFIX(protected_restore_fp_context)(sc);
+ return err ?: sig;
+}
+
+int SUFFIX(restore_sigcontext)(struct pt_regs *regs, user_sigcontext_t __user *sc)
+{
+ unsigned int used_math;
+#ifndef CONFIG_CPU_MIPSR6
+ unsigned int treg;
+#endif
+ int err = 0;
+ int i;
+
+ /* Always make any pending restarted system calls return -EINTR */
+ current_thread_info()->restart_block.fn = do_no_restart_syscall;
+
+ err |= __get_user(regs->cp0_epc, &sc->sc_pc);
+
+#ifdef CONFIG_CPU_HAS_SMARTMIPS
+ err |= __get_user(regs->acx, &sc->sc_acx);
+#endif
+ err |= __get_user(regs->hi, &sc->sc_mdhi);
+ err |= __get_user(regs->lo, &sc->sc_mdlo);
+#ifndef CONFIG_CPU_MIPSR6
+ if (cpu_has_dsp) {
+ err |= __get_user(treg, &sc->sc_hi1); mthi1(treg);
+ err |= __get_user(treg, &sc->sc_lo1); mtlo1(treg);
+ err |= __get_user(treg, &sc->sc_hi2); mthi2(treg);
+ err |= __get_user(treg, &sc->sc_lo2); mtlo2(treg);
+ err |= __get_user(treg, &sc->sc_hi3); mthi3(treg);
+ err |= __get_user(treg, &sc->sc_lo3); mtlo3(treg);
+ err |= __get_user(treg, &sc->sc_dsp); wrdsp(treg, DSP_MASK);
+ }
+#endif
+ for (i = 1; i < 32; i++)
+ err |= __get_user(regs->regs[i], &sc->sc_regs[i]);
+
+ err |= __get_user(used_math, &sc->sc_used_math);
+ conditional_used_math(used_math & USED_FP);
+
+ if (used_math & USED_FP) {
+ /* restore fpu context if we have used it before */
+ if (!err)
+ err = SUFFIX(check_and_restore_fp_context)(sc);
+ } else {
+ /* signal handler may have used FPU. Give it up. */
+ lose_fpu(0);
+ }
+
+ if (used_math & USED_EXTCONTEXT)
+ err |= restore_extcontext(sc_to_extcontext(sc));
+
+ return err;
+}
diff --git a/arch/mips/kernel/signal-common.h b/arch/mips/kernel/signal-common.h
index 9c60d09..37205f8 100644
--- a/arch/mips/kernel/signal-common.h
+++ b/arch/mips/kernel/signal-common.h
@@ -27,6 +27,22 @@
/* Check and clear pending FPU exceptions in saved CSR */
extern int fpcsr_pending(unsigned int __user *fpcsr);
+static inline void __user *sc_to_extcontext(void __user *sc)
+{
+ struct ucontext __user *uc;
+
+ /*
+ * We can just pretend the sigcontext is always embedded in a struct
+ * ucontext here, because the offset from sigcontext to extended
+ * context is the same in the struct sigframe case.
+ */
+ uc = container_of(sc, struct ucontext, uc_mcontext);
+ return &uc->uc_extcontext;
+}
+
+extern int save_extcontext(void __user *buf);
+extern int restore_extcontext(void __user *buf);
+
/* Make sure we will not lose FPU ownership */
#ifdef CONFIG_PREEMPT
#define lock_fpu_owner() preempt_disable()
diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c
index fd3ef2c..4e7ae8a 100644
--- a/arch/mips/kernel/signal.c
+++ b/arch/mips/kernel/signal.c
@@ -36,6 +36,7 @@
#include <asm/vdso.h>
#include <asm/dsp.h>
#include <asm/inst.h>
+#include <asm/msa.h>
#include "signal-common.h"
@@ -45,14 +46,17 @@
extern asmlinkage int _save_fp_context(struct sigcontext __user *sc);
extern asmlinkage int _restore_fp_context(struct sigcontext __user *sc);
-extern asmlinkage int fpu_emulator_save_context(struct sigcontext __user *sc);
-extern asmlinkage int fpu_emulator_restore_context(struct sigcontext __user *sc);
+extern asmlinkage int _save_msa_all_upper(void __user *buf);
+extern asmlinkage int _restore_msa_all_upper(void __user *buf);
struct sigframe {
u32 sf_ass[4]; /* argument save space for o32 */
u32 sf_pad[2]; /* Was: signal trampoline */
+
+ /* Matches struct ucontext from its uc_mcontext field onwards */
struct sigcontext sf_sc;
sigset_t sf_mask;
+ unsigned long long sf_extcontext[0];
};
struct rt_sigframe {
@@ -62,89 +66,177 @@
struct ucontext rs_uc;
};
-/*
- * Helper routines
- */
-static int protected_save_fp_context(struct sigcontext __user *sc)
+#define SUFFIX(n) n
+typedef struct sigcontext user_sigcontext_t;
+#include "signal-common.c"
+
+static size_t extcontext_max_size(void)
{
- int err;
- while (1) {
- lock_fpu_owner();
- own_fpu_inatomic(1);
- err = save_fp_context(sc); /* this might fail */
- unlock_fpu_owner();
- if (likely(!err))
- break;
- /* touch the sigcontext and try again */
- err = __put_user(0, &sc->sc_fpregs[0]) |
- __put_user(0, &sc->sc_fpregs[31]) |
- __put_user(0, &sc->sc_fpc_csr);
- if (err)
- break; /* really bad sigcontext */
- }
- return err;
+ size_t sz = 0;
+
+ /*
+ * The assumption here is that between this point & the point at which
+ * the extended context is saved the size of the context should only
+ * ever be able to shrink (if the task is preempted), but never grow.
+ * That is, what this function returns is an upper bound on the size of
+ * the extended context for the current task at the current time.
+ */
+
+ if (thread_msa_context_live())
+ sz += sizeof(struct msa_extcontext);
+
+ /* If any context is saved then we'll append the end marker */
+ if (sz)
+ sz += sizeof(((struct extcontext *)NULL)->magic);
+
+ return sz;
}
-static int protected_restore_fp_context(struct sigcontext __user *sc)
+static int save_msa_extcontext(void __user *buf)
{
- int err, tmp __maybe_unused;
- while (1) {
- lock_fpu_owner();
- own_fpu_inatomic(0);
- err = restore_fp_context(sc); /* this might fail */
- unlock_fpu_owner();
- if (likely(!err))
- break;
- /* touch the sigcontext and try again */
- err = __get_user(tmp, &sc->sc_fpregs[0]) |
- __get_user(tmp, &sc->sc_fpregs[31]) |
- __get_user(tmp, &sc->sc_fpc_csr);
- if (err)
- break; /* really bad sigcontext */
- }
- return err;
-}
+ struct msa_extcontext __user *msa = buf;
+ uint64_t val;
+ int i, err;
-int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
-{
- int err = 0;
- int i;
- unsigned int used_math;
+ if (!thread_msa_context_live())
+ return 0;
- err |= __put_user(regs->cp0_epc, &sc->sc_pc);
+ /*
+ * Ensure that we can't lose the live MSA context between checking
+ * for it & writing it to memory.
+ */
+ preempt_disable();
- err |= __put_user(0, &sc->sc_regs[0]);
- for (i = 1; i < 32; i++)
- err |= __put_user(regs->regs[i], &sc->sc_regs[i]);
-
-#ifdef CONFIG_CPU_HAS_SMARTMIPS
- err |= __put_user(regs->acx, &sc->sc_acx);
-#endif
- err |= __put_user(regs->hi, &sc->sc_mdhi);
- err |= __put_user(regs->lo, &sc->sc_mdlo);
- if (cpu_has_dsp) {
- err |= __put_user(mfhi1(), &sc->sc_hi1);
- err |= __put_user(mflo1(), &sc->sc_lo1);
- err |= __put_user(mfhi2(), &sc->sc_hi2);
- err |= __put_user(mflo2(), &sc->sc_lo2);
- err |= __put_user(mfhi3(), &sc->sc_hi3);
- err |= __put_user(mflo3(), &sc->sc_lo3);
- err |= __put_user(rddsp(DSP_MASK), &sc->sc_dsp);
- }
-
- used_math = !!used_math();
- err |= __put_user(used_math, &sc->sc_used_math);
-
- if (used_math) {
+ if (is_msa_enabled()) {
/*
- * Save FPU state to signal context. Signal handler
- * will "inherit" current FPU state.
+ * There are no EVA versions of the vector register load/store
+ * instructions, so MSA context has to be saved to kernel memory
+ * and then copied to user memory. The save to kernel memory
+ * should already have been done when handling scalar FP
+ * context.
*/
- err |= protected_save_fp_context(sc);
+ BUG_ON(config_enabled(CONFIG_EVA));
+
+ err = __put_user(read_msa_csr(), &msa->csr);
+ err |= _save_msa_all_upper(&msa->wr);
+
+ preempt_enable();
+ } else {
+ preempt_enable();
+
+ err = __put_user(current->thread.fpu.msacsr, &msa->csr);
+
+ for (i = 0; i < NUM_FPU_REGS; i++) {
+ val = get_fpr64(¤t->thread.fpu.fpr[i], 1);
+ err |= __put_user(val, &msa->wr[i]);
+ }
}
+
+ err |= __put_user(MSA_EXTCONTEXT_MAGIC, &msa->ext.magic);
+ err |= __put_user(sizeof(*msa), &msa->ext.size);
+
+ return err ? -EFAULT : sizeof(*msa);
+}
+
+static int restore_msa_extcontext(void __user *buf, unsigned int size)
+{
+ struct msa_extcontext __user *msa = buf;
+ unsigned long long val;
+ unsigned int csr;
+ int i, err;
+
+ if (size != sizeof(*msa))
+ return -EINVAL;
+
+ err = get_user(csr, &msa->csr);
+ if (err)
+ return err;
+
+ preempt_disable();
+
+ if (is_msa_enabled()) {
+ /*
+ * There are no EVA versions of the vector register load/store
+ * instructions, so MSA context has to be copied to kernel
+ * memory and later loaded to registers. The same is true of
+ * scalar FP context, so FPU & MSA should have already been
+ * disabled whilst handling scalar FP context.
+ */
+ BUG_ON(config_enabled(CONFIG_EVA));
+
+ write_msa_csr(csr);
+ err |= _restore_msa_all_upper(&msa->wr);
+ preempt_enable();
+ } else {
+ preempt_enable();
+
+ current->thread.fpu.msacsr = csr;
+
+ for (i = 0; i < NUM_FPU_REGS; i++) {
+ err |= __get_user(val, &msa->wr[i]);
+ set_fpr64(¤t->thread.fpu.fpr[i], 1, val);
+ }
+ }
+
return err;
}
+int save_extcontext(void __user *buf)
+{
+ int sz;
+
+ sz = save_msa_extcontext(buf);
+ if (sz < 0)
+ return sz;
+ buf += sz;
+
+ /* If no context was saved then trivially return */
+ if (!sz)
+ return 0;
+
+ /* Write the end marker */
+ if (__put_user(END_EXTCONTEXT_MAGIC, (u32 *)buf))
+ return -EFAULT;
+
+ sz += sizeof(((struct extcontext *)NULL)->magic);
+ return sz;
+}
+
+int restore_extcontext(void __user *buf)
+{
+ struct extcontext ext;
+ int err;
+
+ while (1) {
+ err = __get_user(ext.magic, (unsigned int *)buf);
+ if (err)
+ return err;
+
+ if (ext.magic == END_EXTCONTEXT_MAGIC)
+ return 0;
+
+ err = __get_user(ext.size, (unsigned int *)(buf
+ + offsetof(struct extcontext, size)));
+ if (err)
+ return err;
+
+ switch (ext.magic) {
+ case MSA_EXTCONTEXT_MAGIC:
+ err = restore_msa_extcontext(buf, ext.size);
+ break;
+
+ default:
+ err = -EINVAL;
+ break;
+ }
+
+ if (err)
+ return err;
+
+ buf += ext.size;
+ }
+}
+
int fpcsr_pending(unsigned int __user *fpcsr)
{
int err, sig = 0;
@@ -164,68 +256,14 @@
return err ?: sig;
}
-static int
-check_and_restore_fp_context(struct sigcontext __user *sc)
-{
- int err, sig;
-
- err = sig = fpcsr_pending(&sc->sc_fpc_csr);
- if (err > 0)
- err = 0;
- err |= protected_restore_fp_context(sc);
- return err ?: sig;
-}
-
-int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
-{
- unsigned int used_math;
- unsigned long treg;
- int err = 0;
- int i;
-
- /* Always make any pending restarted system calls return -EINTR */
- current_thread_info()->restart_block.fn = do_no_restart_syscall;
-
- err |= __get_user(regs->cp0_epc, &sc->sc_pc);
-
-#ifdef CONFIG_CPU_HAS_SMARTMIPS
- err |= __get_user(regs->acx, &sc->sc_acx);
-#endif
- err |= __get_user(regs->hi, &sc->sc_mdhi);
- err |= __get_user(regs->lo, &sc->sc_mdlo);
- if (cpu_has_dsp) {
- err |= __get_user(treg, &sc->sc_hi1); mthi1(treg);
- err |= __get_user(treg, &sc->sc_lo1); mtlo1(treg);
- err |= __get_user(treg, &sc->sc_hi2); mthi2(treg);
- err |= __get_user(treg, &sc->sc_lo2); mtlo2(treg);
- err |= __get_user(treg, &sc->sc_hi3); mthi3(treg);
- err |= __get_user(treg, &sc->sc_lo3); mtlo3(treg);
- err |= __get_user(treg, &sc->sc_dsp); wrdsp(treg, DSP_MASK);
- }
-
- for (i = 1; i < 32; i++)
- err |= __get_user(regs->regs[i], &sc->sc_regs[i]);
-
- err |= __get_user(used_math, &sc->sc_used_math);
- conditional_used_math(used_math);
-
- if (used_math) {
- /* restore fpu context if we have used it before */
- if (!err)
- err = check_and_restore_fp_context(sc);
- } else {
- /* signal handler may have used FPU. Give it up. */
- lose_fpu(0);
- }
-
- return err;
-}
-
void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
size_t frame_size)
{
unsigned long sp;
+ /* Leave space for potential extended context */
+ frame_size += extcontext_max_size();
+
/* Default to using normal stack */
sp = regs->regs[29];
@@ -512,6 +550,10 @@
regs->regs[0] = 0; /* Don't deal with this again. */
}
+ /* adjust emulation stack if signal happens during emulation */
+ if (current_thread_info()->vdso_page)
+ vdso_epc_adjust(regs);
+
if (sig_uses_siginfo(ka))
ret = abi->setup_rt_frame(vdso + abi->rt_signal_return_offset,
ka, regs, sig, oldset, info);
@@ -584,23 +626,36 @@
}
#ifdef CONFIG_SMP
+#ifndef CONFIG_EVA
static int smp_save_fp_context(struct sigcontext __user *sc)
{
return raw_cpu_has_fpu
? _save_fp_context(sc)
- : fpu_emulator_save_context(sc);
+ : copy_fp_to_sigcontext(sc);
}
static int smp_restore_fp_context(struct sigcontext __user *sc)
{
return raw_cpu_has_fpu
? _restore_fp_context(sc)
- : fpu_emulator_restore_context(sc);
+ : copy_fp_from_sigcontext(sc);
}
#endif
+#endif
static int signal_setup(void)
{
+ /*
+ * The offset from sigcontext to extended context should be the same
+ * regardless of the type of signal, such that userland can always know
+ * where to look if it wishes to find the extended context structures.
+ */
+ BUILD_BUG_ON((offsetof(struct sigframe, sf_extcontext) -
+ offsetof(struct sigframe, sf_sc)) !=
+ (offsetof(struct rt_sigframe, rs_uc.uc_extcontext) -
+ offsetof(struct rt_sigframe, rs_uc.uc_mcontext)));
+
+#ifndef CONFIG_EVA
#ifdef CONFIG_SMP
/* For now just do the cpu_has_fpu check when the functions are invoked */
save_fp_context = smp_save_fp_context;
@@ -610,10 +665,14 @@
save_fp_context = _save_fp_context;
restore_fp_context = _restore_fp_context;
} else {
- save_fp_context = fpu_emulator_save_context;
- restore_fp_context = fpu_emulator_restore_context;
+ save_fp_context = copy_fp_to_sigcontext;
+ restore_fp_context = copy_fp_from_sigcontext;
}
-#endif
+#endif /* CONFIG_SMP */
+#else
+ save_fp_context = copy_fp_to_sigcontext;
+ restore_fp_context = copy_fp_from_sigcontext;
+#endif /* CONFIG_EVA */
return 0;
}
diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c
index 57de8b7..0c824a3 100644
--- a/arch/mips/kernel/signal32.c
+++ b/arch/mips/kernel/signal32.c
@@ -42,9 +42,6 @@
extern asmlinkage int _save_fp_context32(struct sigcontext32 __user *sc);
extern asmlinkage int _restore_fp_context32(struct sigcontext32 __user *sc);
-extern asmlinkage int fpu_emulator_save_context32(struct sigcontext32 __user *sc);
-extern asmlinkage int fpu_emulator_restore_context32(struct sigcontext32 __user *sc);
-
/*
* Including <asm/unistd.h> would give use the 64-bit syscall numbers ...
*/
@@ -77,191 +74,9 @@
struct ucontext32 rs_uc;
};
-/*
- * sigcontext handlers
- */
-static int protected_save_fp_context32(struct sigcontext32 __user *sc)
-{
- int err;
- while (1) {
- lock_fpu_owner();
- own_fpu_inatomic(1);
- err = save_fp_context32(sc); /* this might fail */
- unlock_fpu_owner();
- if (likely(!err))
- break;
- /* touch the sigcontext and try again */
- err = __put_user(0, &sc->sc_fpregs[0]) |
- __put_user(0, &sc->sc_fpregs[31]) |
- __put_user(0, &sc->sc_fpc_csr);
- if (err)
- break; /* really bad sigcontext */
- }
- return err;
-}
-
-static int protected_restore_fp_context32(struct sigcontext32 __user *sc)
-{
- int err, tmp __maybe_unused;
- while (1) {
- lock_fpu_owner();
- own_fpu_inatomic(0);
- err = restore_fp_context32(sc); /* this might fail */
- unlock_fpu_owner();
- if (likely(!err))
- break;
- /* touch the sigcontext and try again */
- err = __get_user(tmp, &sc->sc_fpregs[0]) |
- __get_user(tmp, &sc->sc_fpregs[31]) |
- __get_user(tmp, &sc->sc_fpc_csr);
- if (err)
- break; /* really bad sigcontext */
- }
- return err;
-}
-
-static int setup_sigcontext32(struct pt_regs *regs,
- struct sigcontext32 __user *sc)
-{
- int err = 0;
- int i;
- u32 used_math;
-
- err |= __put_user(regs->cp0_epc, &sc->sc_pc);
-
- err |= __put_user(0, &sc->sc_regs[0]);
- for (i = 1; i < 32; i++)
- err |= __put_user(regs->regs[i], &sc->sc_regs[i]);
-
- err |= __put_user(regs->hi, &sc->sc_mdhi);
- err |= __put_user(regs->lo, &sc->sc_mdlo);
- if (cpu_has_dsp) {
- err |= __put_user(rddsp(DSP_MASK), &sc->sc_dsp);
- err |= __put_user(mfhi1(), &sc->sc_hi1);
- err |= __put_user(mflo1(), &sc->sc_lo1);
- err |= __put_user(mfhi2(), &sc->sc_hi2);
- err |= __put_user(mflo2(), &sc->sc_lo2);
- err |= __put_user(mfhi3(), &sc->sc_hi3);
- err |= __put_user(mflo3(), &sc->sc_lo3);
- }
-
- used_math = !!used_math();
- err |= __put_user(used_math, &sc->sc_used_math);
-
- if (used_math) {
- /*
- * Save FPU state to signal context. Signal handler
- * will "inherit" current FPU state.
- */
- err |= protected_save_fp_context32(sc);
- }
- return err;
-}
-
-static int
-check_and_restore_fp_context32(struct sigcontext32 __user *sc)
-{
- int err, sig;
-
- err = sig = fpcsr_pending(&sc->sc_fpc_csr);
- if (err > 0)
- err = 0;
- err |= protected_restore_fp_context32(sc);
- return err ?: sig;
-}
-
-static int restore_sigcontext32(struct pt_regs *regs,
- struct sigcontext32 __user *sc)
-{
- u32 used_math;
- int err = 0;
- s32 treg;
- int i;
-
- /* Always make any pending restarted system calls return -EINTR */
- current_thread_info()->restart_block.fn = do_no_restart_syscall;
-
- err |= __get_user(regs->cp0_epc, &sc->sc_pc);
- err |= __get_user(regs->hi, &sc->sc_mdhi);
- err |= __get_user(regs->lo, &sc->sc_mdlo);
- if (cpu_has_dsp) {
- err |= __get_user(treg, &sc->sc_hi1); mthi1(treg);
- err |= __get_user(treg, &sc->sc_lo1); mtlo1(treg);
- err |= __get_user(treg, &sc->sc_hi2); mthi2(treg);
- err |= __get_user(treg, &sc->sc_lo2); mtlo2(treg);
- err |= __get_user(treg, &sc->sc_hi3); mthi3(treg);
- err |= __get_user(treg, &sc->sc_lo3); mtlo3(treg);
- err |= __get_user(treg, &sc->sc_dsp); wrdsp(treg, DSP_MASK);
- }
-
- for (i = 1; i < 32; i++)
- err |= __get_user(regs->regs[i], &sc->sc_regs[i]);
-
- err |= __get_user(used_math, &sc->sc_used_math);
- conditional_used_math(used_math);
-
- if (used_math) {
- /* restore fpu context if we have used it before */
- if (!err)
- err = check_and_restore_fp_context32(sc);
- } else {
- /* signal handler may have used FPU. Give it up. */
- lose_fpu(0);
- }
-
- return err;
-}
-
-/*
- *
- */
-extern void __put_sigset_unknown_nsig(void);
-extern void __get_sigset_unknown_nsig(void);
-
-static inline int put_sigset(const sigset_t *kbuf, compat_sigset_t __user *ubuf)
-{
- int err = 0;
-
- if (!access_ok(VERIFY_WRITE, ubuf, sizeof(*ubuf)))
- return -EFAULT;
-
- switch (_NSIG_WORDS) {
- default:
- __put_sigset_unknown_nsig();
- case 2:
- err |= __put_user(kbuf->sig[1] >> 32, &ubuf->sig[3]);
- err |= __put_user(kbuf->sig[1] & 0xffffffff, &ubuf->sig[2]);
- case 1:
- err |= __put_user(kbuf->sig[0] >> 32, &ubuf->sig[1]);
- err |= __put_user(kbuf->sig[0] & 0xffffffff, &ubuf->sig[0]);
- }
-
- return err;
-}
-
-static inline int get_sigset(sigset_t *kbuf, const compat_sigset_t __user *ubuf)
-{
- int err = 0;
- unsigned long sig[4];
-
- if (!access_ok(VERIFY_READ, ubuf, sizeof(*ubuf)))
- return -EFAULT;
-
- switch (_NSIG_WORDS) {
- default:
- __get_sigset_unknown_nsig();
- case 2:
- err |= __get_user(sig[3], &ubuf->sig[3]);
- err |= __get_user(sig[2], &ubuf->sig[2]);
- kbuf->sig[1] = sig[2] | (sig[3] << 32);
- case 1:
- err |= __get_user(sig[1], &ubuf->sig[1]);
- err |= __get_user(sig[0], &ubuf->sig[0]);
- kbuf->sig[0] = sig[0] | (sig[1] << 32);
- }
-
- return err;
-}
+#define SUFFIX(n) n##32
+typedef struct sigcontext32 user_sigcontext_t;
+#include "signal-common.c"
/*
* Atomically swap in the new signal mask, and wait for a signal.
@@ -558,15 +373,42 @@
.restart = __NR_O32_restart_syscall
};
+#ifdef CONFIG_SMP
+static int smp_save_fp_context32(struct sigcontext32 __user *sc)
+{
+ return raw_cpu_has_fpu
+ ? _save_fp_context32(sc)
+ : copy_fp_to_sigcontext32(sc);
+}
+
+static int smp_restore_fp_context32(struct sigcontext32 __user *sc)
+{
+ return raw_cpu_has_fpu
+ ? _restore_fp_context32(sc)
+ : copy_fp_from_sigcontext32(sc);
+}
+#endif
+
static int signal32_init(void)
{
+#ifndef CONFIG_EVA
+#ifdef CONFIG_SMP
+ /* For now just do the cpu_has_fpu check when the functions are invoked */
+ save_fp_context32 = smp_save_fp_context32;
+ restore_fp_context32 = smp_restore_fp_context32;
+#else
if (cpu_has_fpu) {
save_fp_context32 = _save_fp_context32;
restore_fp_context32 = _restore_fp_context32;
} else {
- save_fp_context32 = fpu_emulator_save_context32;
- restore_fp_context32 = fpu_emulator_restore_context32;
+ save_fp_context32 = copy_fp_to_sigcontext32;
+ restore_fp_context32 = copy_fp_from_sigcontext32;
}
+#endif /* CONFIG_SMP */
+#else
+ save_fp_context32 = copy_fp_to_sigcontext32;
+ restore_fp_context32 = copy_fp_from_sigcontext32;
+#endif /* CONFIG_EVA */
return 0;
}
diff --git a/arch/mips/kernel/smp-cmp.c b/arch/mips/kernel/smp-cmp.c
index c2e5d74..4c2f9ed1 100644
--- a/arch/mips/kernel/smp-cmp.c
+++ b/arch/mips/kernel/smp-cmp.c
@@ -24,6 +24,7 @@
#include <linux/cpumask.h>
#include <linux/interrupt.h>
#include <linux/compiler.h>
+#include <linux/cpu.h>
#include <linux/atomic.h>
#include <asm/cacheflush.h>
@@ -38,6 +39,16 @@
#include <asm/mips_mt.h>
#include <asm/amon.h>
#include <asm/gic.h>
+#include <asm/gcmpregs.h>
+#include <asm/bootinfo.h>
+#include <asm/irq_cpu.h>
+
+/* we need to keep _gcmp_base in a separate cacheline for uncoherent read */
+unsigned long _gcmp_base __cacheline_aligned;
+int gcmp_present __cacheline_aligned = -1;
+int gcmp3_present __cacheline_aligned;
+
+DEFINE_PER_CPU_ALIGNED(spinlock_t, mips_gcr_lock);
static void ipi_call_function(unsigned int cpu)
{
@@ -87,22 +98,143 @@
cmp_send_ipi_single(i, action);
}
+#ifdef CONFIG_EVA
+static unsigned long bev_location = -1;
+
+static int rd_bev_location(char *p)
+{
+ if (p && strlen(p)) {
+ bev_location = memparse(p, &p);
+ } else
+ bev_location = 0xbfc00000;
+ return 0;
+}
+early_param("force-bev-location", rd_bev_location);
+
+static void BEV_overlay_segment_map_check(unsigned long excBase,
+ unsigned long excMask, unsigned long excSize)
+{
+ unsigned long addr;
+
+ if ((excBase == (IO_BASE + IO_SHIFT)) && (excSize == IO_SIZE))
+ return;
+
+ printk("WARNING: BEV overlay segment doesn't fit whole I/O reg space, NMI/EJTAG/sRESET may not work\n");
+
+ if ((MAP_BASE < (excBase + excSize)) && (excBase < VMALLOC_END))
+ panic("BEV Overlay segment overlaps VMALLOC area\n");
+#ifdef CONFIG_HIGHMEM
+ if ((PKMAP_BASE < (excBase + excSize)) &&
+ (excBase < (PKMAP_BASE + (PAGE_SIZE*(LAST_PKMAP-1)))))
+ panic("BEV Overlay segment overlaps HIGHMEM/PKMAP area\n");
+#endif
+ for (addr = excBase; addr < (excBase + excSize); addr += PAGE_SIZE) {
+ if (page_is_ram(__pa(addr>>PAGE_SHIFT)))
+ panic("BEV Overlay segment overlaps memory at %lx\n",addr);
+ }
+}
+
+void BEV_overlay_segment(void)
+{
+ unsigned long RExcBase;
+ unsigned long RExcExtBase;
+ unsigned long excBase;
+ unsigned long excMask;
+ unsigned long excSize;
+ unsigned long addr;
+ char *p;
+
+ printk("IO: BASE = 0x%lx, SHIFT = 0x%lx, SIZE = 0x%lx\n",IO_BASE, IO_SHIFT, IO_SIZE);
+ RExcBase = GCMPCLCB(RESETBASE);
+ RExcExtBase = GCMPCLCB(RESETBASEEXT);
+ printk("GCMP base addr = 0x%lx, CLB: ResetExcBase = 0x%lx, ResetExcExtBase = 0x%lx\n",
+ _gcmp_base,RExcBase,RExcExtBase);
+ if ( !(RExcExtBase & 0x1) )
+ return;
+
+ if (bev_location == -1) {
+ if ((p = strstr(arcs_cmdline, "force-bev-location")))
+ rd_bev_location(p);
+ }
+ if (bev_location != -1) {
+ addr = fls((IO_BASE + IO_SHIFT) ^ bev_location);
+nextSize:
+ if (addr > 28)
+ panic("enforced BEV location is too far from I/O reg space\n");
+
+ excMask = (0xffffffffUL >> (32 - addr));
+ excBase = bev_location & ~excMask;
+ if (((IO_BASE + IO_SHIFT + IO_SIZE - 1) & ~excMask) != excBase) {
+ addr++;
+ goto nextSize;
+ }
+ excSize = ((excBase | excMask) + 1) - excBase;
+ printk("Setting BEV = 0x%lx, Overlay segment = 0x%lx, size = 0x%lx\n",
+ bev_location, excBase, excSize);
+
+ BEV_overlay_segment_map_check(excBase, excMask, excSize);
+
+ GCMPCLCB(RESETBASEEXT) = (GCMPCLCB(RESETBASEEXT) &
+ ~GCMP_CCB_RESETEXTBASE_BEV_MASK_MSK) |
+ (excMask & GCMP_CCB_RESETEXTBASE_BEV_MASK_MSK);
+ GCMPCLCB(RESETBASE) = (GCMPCLCB(RESETBASE) & ~GCMP_CCB_RESETBASE_BEV_MSK) |
+ bev_location;
+ RExcBase = GCMPCLCB(RESETBASE);
+ RExcExtBase = GCMPCLCB(RESETBASEEXT);
+
+ return;
+ }
+
+ excBase = RExcBase & GCMP_CCB_RESETBASE_BEV_MSK;
+ excMask = (RExcExtBase & GCMP_CCB_RESETEXTBASE_BEV_MASK_MSK) |
+ GCMP_CCB_RESETEXTBASE_BEV_MASK_LOWBITS;
+ excBase &= ~excMask;
+ excSize = ((excBase | excMask) + 1) - excBase;
+ printk("BEV Overlay segment = 0x%lx, size = 0x%lx\n",excBase, excSize);
+
+ BEV_overlay_segment_map_check(excBase, excMask, excSize);
+}
+#endif
+
static void cmp_init_secondary(void)
{
struct cpuinfo_mips *c = ¤t_cpu_data;
+ unsigned int gnr;
- /* Assume GIC is present */
- change_c0_status(ST0_IM, STATUSF_IP3 | STATUSF_IP4 | STATUSF_IP6 |
- STATUSF_IP7);
+ if (!cpu_has_veic) {
+ set_c0_status(mips_smp_c0_status_mask);
+ back_to_back_c0_hazard();
+ printk("CPU%d: status register %08x\n", smp_processor_id(), read_c0_status());
+ }
- /* Enable per-cpu interrupts: platform specific */
-
- c->core = (read_c0_ebase() >> 1) & 0x1ff;
+ if (cpu_has_vcmt) {
+ gnr = read_c0_gnr();
+ c->core = (gnr & MIPS_GNR_CORE) >> MIPS_GNR_CORE_SHIFT;
+ c->vpe_id = (gnr & MIPS_GNR_VPID) >> MIPS_GNR_VPID_SHIFT;
+ } else if (gcmp3_present) {
+ c->core = GCMPCLCB(ID);
+ } else {
+ c->core = (read_c0_ebase() & 0x3ff) >> (fls(smp_num_siblings)-1);
#if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC)
- c->vpe_id = (read_c0_tcbind() >> TCBIND_CURVPE_SHIFT) & TCBIND_CURVPE;
+ if (cpu_has_mipsmt)
+ c->vpe_id = (read_c0_tcbind() >> TCBIND_CURVPE_SHIFT) &
+ TCBIND_CURVPE;
#endif
#ifdef CONFIG_MIPS_MT_SMTC
- c->tc_id = (read_c0_tcbind() & TCBIND_CURTC) >> TCBIND_CURTC_SHIFT;
+ c->tc_id = (read_c0_tcbind() & TCBIND_CURTC) >> TCBIND_CURTC_SHIFT;
+#endif
+ }
+ if (gic_present) {
+ c->g_vpe = GIC_REG(VPE_LOCAL,GIC_VPE_ID);
+ vpe_gic_setup();
+ }
+#ifdef CONFIG_CPU_MIPSR6
+ pr_info("BEVVA = %lx\n", read_c0_bevva());
+#endif
+
+#ifdef CONFIG_EVA
+ if (gcmp_present)
+ BEV_overlay_segment();
#endif
}
@@ -145,7 +277,7 @@
#if 0
/* Needed? */
- flush_icache_range((unsigned long)gp,
+ local_flush_icache_range((unsigned long)gp,
(unsigned long)(gp + sizeof(struct thread_info)));
#endif
@@ -177,10 +309,24 @@
}
if (cpu_has_mipsmt) {
- unsigned int nvpe, mvpconf0 = read_c0_mvpconf0();
+ unsigned int nvpe = 1;
+#ifdef CONFIG_MIPS_MT_SMP
+ unsigned int mvpconf0 = read_c0_mvpconf0();
+
+ nvpe = ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1;
+#elif defined(CONFIG_MIPS_MT_SMTC)
+ unsigned int mvpconf0 = read_c0_mvpconf0();
nvpe = ((mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1;
+#endif
smp_num_siblings = nvpe;
+ } else if (cpu_has_vcmt) {
+ smp_num_siblings =
+ (GCMPGCB(SYSCONF2) & GCMP_GCB_SYSCONF2_VPWIDTH_MASK) >>
+ GCMP_GCB_SYSCONF2_VPWIDTH_SHF;
+ if ((!smp_num_siblings) ||
+ (smp_num_siblings == GCMP_GCB_SYSCONF2_VPWIDTH_MASK))
+ smp_num_siblings = 4;
}
pr_info("Detected %i available secondary CPU(s)\n", ncpu);
}
@@ -207,3 +353,520 @@
.smp_setup = cmp_smp_setup,
.prepare_cpus = cmp_prepare_cpus,
};
+
+/*
+ * GCMP needs to be detected before any SMP initialisation
+ */
+
+int __init gcmp_probe(unsigned long addr, unsigned long size)
+{
+ unsigned long confaddr = 0;
+
+ if (gcmp_present >= 0)
+ return gcmp_present;
+
+ if ((cpu_has_mips_r2 || cpu_has_mips_r6) &&
+ (read_c0_config3() & MIPS_CONF3_CMGCR)) {
+ /* try CMGCRBase */
+ confaddr = read_c0_cmgcrbase() << 4;
+ _gcmp_base = (unsigned long) ioremap_nocache(confaddr, size);
+ gcmp_present = ((GCMPGCBaddr(GCMPB) & GCMP_GCB_GCMPB_GCMPBASE_MSK) == confaddr)? 1 : 0;
+ if (gcmp_present) {
+ /* reassign it to 'addr' */
+ if (addr != confaddr) {
+ unsigned long tmp = (GCMPGCBaddr(GCMPB) & ~GCMP_GCB_GCMPB_GCMPBASE_MSK) | addr;
+
+ GCMPGCBaddrWrite(GCMPB, tmp);
+ }
+ _gcmp_base = (unsigned long) ioremap_nocache(addr , size);
+ gcmp_present = ((GCMPGCBaddr(GCMPB) & GCMP_GCB_GCMPB_GCMPBASE_MSK) == confaddr)? 1 : 0;
+ confaddr = addr;
+ if (!gcmp_present) {
+ /* reassignment failed, try CMGCRBase again */
+ confaddr = read_c0_cmgcrbase() << 4;
+ _gcmp_base = (unsigned long) ioremap_nocache(confaddr, size);
+ gcmp_present =
+ ((GCMPGCBaddr(GCMPB) & GCMP_GCB_GCMPB_GCMPBASE_MSK) == confaddr)? 1 : 0;
+ }
+ }
+ }
+ if (addr && (gcmp_present <= 0)) {
+ /* try addr */
+ _gcmp_base = (unsigned long) ioremap_nocache(addr, size);
+ gcmp_present = ((GCMPGCBaddr(GCMPB) & GCMP_GCB_GCMPB_GCMPBASE_MSK) == addr)? 1 : 0;
+ confaddr = addr;
+ }
+
+ if (gcmp_present > 0) {
+ if (((GCMPGCB(GCMPREV) & GCMP_GCB_GCMPREV_MAJOR_MSK) >>
+ GCMP_GCB_GCMPREV_MAJOR_SHF) >= 8) {
+
+ cpu_data[0].options2 |= MIPS_CPU_CM3_INCLUSIVE_CACHES;
+ gcmp3_present = 1;
+ printk("GCMP3 available, GCRBase = %lx\n",_gcmp_base);
+ } else {
+ if (((GCMPGCB(GCMPREV) & GCMP_GCB_GCMPREV_MAJOR_MSK) >>
+ GCMP_GCB_GCMPREV_MAJOR_SHF) >= 6)
+ cpu_data[0].options |= MIPS_CPU_CM2;
+ printk("GCMP available\n");
+ if (cpu_has_cm2 && (size > 0x8000)) {
+ GCMPGCB(GCML2S) = (confaddr + 0x8000) | GCMP_GCB_GCML2S_EN_MSK;
+ cpu_data[0].options |= MIPS_CPU_CM2_L2SYNC;
+ printk("L2-only SYNC available\n");
+ }
+ if (cpu_has_cm2) {
+ unsigned int l2p;
+
+ l2p = GCMPGCB(GCML2P);
+ if (l2p & GCMP_GCB_GCML2P_NPFT) {
+ GCMPGCB(GCML2P) = (l2p & ~GCMP_GCB_GCML2P_PAGE_MASK) |
+ PAGE_MASK | GCMP_GCB_GCML2P_PFTEN;
+ GCMPGCB(GCML2PB) |= GCMP_GCB_GCML2PB_CODE_PFTEN;
+ }
+ }
+ }
+
+ return gcmp_present;
+ }
+
+ gcmp_present = 0;
+ return gcmp_present;
+}
+
+/* Return the number of IOCU's present */
+int __init gcmp_niocu(void)
+{
+ return (gcmp_present > 0) ?
+ (GCMPGCB(GC) & GCMP_GCB_GC_NUMIOCU_MSK) >> GCMP_GCB_GC_NUMIOCU_SHF :
+ 0;
+}
+
+/* Set GCMP region attributes */
+void __init gcmp_setregion(int region, unsigned long base,
+ unsigned long mask, int type)
+{
+ GCMPGCBn(CMxBASE, region) = base;
+ GCMPGCBn(CMxMASK, region) = mask | type;
+}
+
+#ifdef CONFIG_SYSFS
+static ssize_t show_gcr_global(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int n = 0;
+
+ n = snprintf(buf, PAGE_SIZE,
+ "Global Config Register\t\t\t%08x\n"
+ "GCR Base Register\t\t\t%08x\n"
+ "Global CM Control Register\t\t%08x\n"
+ "Global CM Control2 Register\t\t%08x\n"
+ "Global CSR Access Privilege Register\t%08x\n"
+ "GCR Revision Register\t\t\t%08x\n"
+ "Global CM Error Mask Register\t\t%08x\n"
+ "Global CM Error Cause Register\t\t%08x\n"
+ "Global CM Error Address Register\t%08x\n"
+ "Global CM Error Multiple Register\t%08x\n"
+ "GCR Custom Base Register\t\t%08x\n"
+ "GCR Custom Status Register\t\t%x\n"
+ "Global L2 only Sync Register\t\t%08x\n"
+ "GIC Base Address Register\t\t%08x\n"
+ "CPC Base Address Register\t\t%08x\n"
+ "Region0 Base Address Register\t\t%08x,\tMask\t%08x\n"
+ "Region1 Base Address Register\t\t%08x,\tMask\t%08x\n"
+ "Region2 Base Address Register\t\t%08x,\tMask\t%08x\n"
+ "Region3 Base Address Register\t\t%08x,\tMask\t%08x\n"
+ "GIC Status Register\t\t\t%x\n"
+ "Cache Revision Register\t\t\t%08x\n"
+ "CPC Status Register\t\t\t%x\n"
+ "Attribute Region0 Base Address Register\t%08x,\tMask\t%08x\n"
+ "Attribute Region1 Base Address Register\t%08x,\tMask\t%08x\n"
+ "IOCU Revision Register\t\t\t%08x\n"
+ "Attribute Region2 Base Address Register\t%08x,\tMask\t%08x\n"
+ "Attribute Region3 Base Address Register\t%08x,\tMask\t%08x\n"
+ "L2 Prefetch Control Register\t\t%08x\n"
+ "L2 Prefetch Control B Register\t\t%08x\n"
+ ,
+ GCMPGCB(GC),
+ GCMPGCB(GCMPB),
+ GCMPGCB(GCMC),
+ GCMPGCB(GCMC2),
+ GCMPGCB(GCSRAP),
+ GCMPGCB(GCMPREV),
+ GCMPGCB(GCMEM),
+ GCMPGCB(GCMEC),
+ GCMPGCB(GCMEA),
+ GCMPGCB(GCMEO),
+ GCMPGCB(GCMCUS),
+ GCMPGCB(GCMCST),
+ GCMPGCB(GCML2S),
+ GCMPGCB(GICBA),
+ GCMPGCB(CPCBA),
+ GCMPGCBn(CMxBASE, 0), GCMPGCBn(CMxMASK, 0),
+ GCMPGCBn(CMxBASE, 1), GCMPGCBn(CMxMASK, 1),
+ GCMPGCBn(CMxBASE, 2), GCMPGCBn(CMxMASK, 2),
+ GCMPGCBn(CMxBASE, 3), GCMPGCBn(CMxMASK, 3),
+ GCMPGCB(GICST),
+ GCMPGCB(GCSHREV),
+ GCMPGCB(CPCST),
+ GCMPGCB(GAOR0BA), GCMPGCB(GAOR0MASK),
+ GCMPGCB(GAOR1BA), GCMPGCB(GAOR1MASK),
+ GCMPGCB(IOCUREV),
+ GCMPGCB(GAOR2BA), GCMPGCB(GAOR2MASK),
+ GCMPGCB(GAOR3BA), GCMPGCB(GAOR3MASK),
+ GCMPGCB(GCML2P),
+ GCMPGCB(GCML2PB)
+ );
+
+ return n;
+}
+
+static ssize_t show_gcr3_global(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int n = 0;
+
+ n = snprintf(buf, PAGE_SIZE,
+ "Global Config Register\t\t\t\t%08x\n"
+ "GCR Base Register\t\t\t%08x%08x\n"
+ "Global CM Control Register\t\t\t%08x\n"
+ "Global CM3 Alternate Control Register\t%08x%08x\n"
+ "GCR Revision Register\t\t\t\t%08x\n"
+ "Global CM3 Error Control Register\t\t%08x\n"
+ "Global CM Error Mask Register\t\t%08x%08x\n"
+ "Global CM Error Cause Register\t\t%08x%08x\n"
+ "Global CM Error Address Register\t%016x\n"
+ "Global CM Error Multiple Register\t\t%08x\n"
+ "GCR Custom Base Register\t\t%16x\n"
+ "GCR Custom Status Register\t\t\t%08x\n"
+ "GIC Base Address Register\t\t%016x\n"
+ "CPC Base Address Register\t\t%016x\n"
+ "GIC Status Register\t\t\t\t%08x\n"
+ "Cache Revision Register\t\t\t\t%08x\n"
+ "CPC Status Register\t\t\t\t%08x\n"
+ "CM3 IOCU Base Address of IOMMUs\t\t%16x\n"
+ "CM3 IOMMU Status Register\t\t\t%08x\n"
+ "CM3 Global CSR Access Privilege Register\t%08x\n"
+ "CM3 L2 Config Register\t\t\t\t%08x\n"
+ "CM3 Sys Config Register\t\t\t\t%08x\n"
+ "IOCU Revision Register\t\t\t\t%08x\n"
+ "CM3 L2 RAM Config Register\t\t%08x%08x\n"
+ "CM3 Scratch0 Register\t\t\t%08x%08x\n"
+ "CM3 Scratch1 Register\t\t\t%08x%08x\n"
+ "L2 Prefetch Control Register\t\t\t%08x\n"
+ "L2 Prefetch Control B Register\t\t\t%08x\n"
+ "CM3 L2 Prefetch Tuning Register\t\t%08x%08x\n"
+ "CM3 L2 Prefetch Tuning Register A Tier 0:\t%08x\n"
+ "CM3 L2 Prefetch Tuning Register B Tier 0:\t%08x\n"
+ "CM3 L2 Prefetch Tuning Register A Tier 1:\t%08x\n"
+ "CM3 L2 Prefetch Tuning Register B Tier 1:\t%08x\n"
+ "CM3 L2 Prefetch Tuning Register A Tier 2:\t%08x\n"
+ "CM3 L2 Prefetch Tuning Register B Tier 2:\t%08x\n"
+ "CM3 L2 Prefetch Tuning Register A Tier 3:\t%08x\n"
+ "CM3 L2 Prefetch Tuning Register B Tier 3:\t%08x\n"
+ "CM3 L2 Prefetch Tuning Register A Tier 4:\t%08x\n"
+ "CM3 L2 Prefetch Tuning Register B Tier 4:\t%08x\n"
+ "CM3 L2 Tag RAM Cache Op Address Reg\t%016x\n"
+ "CM3 L2 Tag RAM Cache Op State Register\t%08x%08x\n"
+ "CM3 L2 Data RAM Cache Op Register\t%08x%08x\n"
+ "CM3 L2 Tag and Data ECC Cache Op Reg\t%08x%08x\n"
+ "CM3 BEV Base\t\t\t\t%08x%08x\n"
+ ,
+ GCMPGCB(GC),
+ GCMPGCBhi(GCMPB), GCMPGCBlo(GCMPB),
+ GCMPGCB(GCMC),
+ GCMPGCBhi(GCMC2), GCMPGCBlo(GCMC2),
+ GCMPGCB(GCMPREV),
+ GCMPGCB(GCMECTL),
+ GCMPGCBhi(GCMEM), GCMPGCBlo(GCMEM),
+ GCMPGCBhi(GCMEC), GCMPGCBlo(GCMEC),
+ GCMPGCB(GCMEA),
+ GCMPGCB(GCMEO),
+ GCMPGCB(GCMCUS),
+ GCMPGCB(GCMCST),
+ GCMPGCB(GICBA),
+ GCMPGCB(CPCBA),
+ GCMPGCB(GICST),
+ GCMPGCB(GCSHREV),
+ GCMPGCB(CPCST),
+ GCMPGCB(IOCBASE),
+ GCMPGCB(IOST),
+ GCMPGCB(G3CSRAP),
+ GCMPGCB(L2CONFIG),
+ GCMPGCB(SYSCONF),
+ GCMPGCB(IOCUREV),
+ GCMPGCBhi(L2RAMCONF),GCMPGCBlo(L2RAMCONF),
+ GCMPGCBhi(SCRATCH0),GCMPGCBlo(SCRATCH0),
+ GCMPGCBhi(SCRATCH1),GCMPGCBlo(SCRATCH1),
+ GCMPGCB(GCML2P),
+ GCMPGCB(GCML2PB),
+ GCMPGCBhi(L2PREF), GCMPGCBlo(L2PREF),
+ GCMPGCB(L2PREFAT0),
+ GCMPGCB(L2PREFBT0),
+ GCMPGCB(L2PREFAT1),
+ GCMPGCB(L2PREFBT1),
+ GCMPGCB(L2PREFAT2),
+ GCMPGCB(L2PREFBT2),
+ GCMPGCB(L2PREFAT3),
+ GCMPGCB(L2PREFBT3),
+ GCMPGCB(L2PREFAT4),
+ GCMPGCB(L2PREFBT4),
+ GCMPGCB(L2TRCADDR),
+ GCMPGCBhi(L2TRCST), GCMPGCBlo(L2TRCST),
+ GCMPGCBhi(L2DRCOP), GCMPGCBlo(L2DRCOP),
+ GCMPGCBhi(L2TDECCOP), GCMPGCBlo(L2TDECCOP),
+ GCMPGCBhi(BEVBASE), GCMPGCBlo(BEVBASE)
+ );
+
+ return n;
+}
+
+static ssize_t show_gcr_local(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long irq_flags;
+ int corenum;
+ int n = 0;
+ unsigned int cohctl, cfg, other;
+ unsigned int rbasehi, rbaselo, id;
+ unsigned int rbaseext, resetr;
+
+ preempt_disable();
+ corenum = current_cpu_data.core;
+ spin_lock_irqsave(&per_cpu(mips_gcr_lock, corenum),irq_flags);
+
+ GCMPCLCB(OTHER) = (dev->id)<<16;
+
+ resetr = GCMPCOCB(RESETR);
+ cohctl = GCMPCOCB(COHCTL);
+ cfg = GCMPCOCB(CFG);
+ other = GCMPCOCB(OTHER);
+ rbasehi = GCMPCOCBhi(RESETBASE);
+ rbaselo = GCMPCOCBlo(RESETBASE);
+ id = GCMPCOCB(ID);
+ rbaseext = GCMPCOCB(RESETBASEEXT);
+
+ spin_unlock_irqrestore(&per_cpu(mips_gcr_lock, corenum),irq_flags);
+ preempt_enable();
+
+ n += snprintf(buf+n, PAGE_SIZE-n,
+ "Local Reset Release Register\t\t%08x\n"
+ "Local Coherence Control Register\t%08x\n"
+ "Local Config Register\t\t\t%08x\n"
+ "Other Addressing Register\t\t%08x\n"
+ "Local Reset Exception Base Register\t%08x%08x\n"
+ "Local Identification Register\t\t%08x\n"
+ "Local Reset Exception Extended Base\t%08x\n"
+ ,
+ resetr,
+ cohctl,
+ cfg,
+ other,
+ rbasehi, rbaselo,
+ id,
+ rbaseext
+ );
+
+ return n;
+}
+
+static ssize_t show_gcr3_local(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ unsigned long irq_flags;
+ int n = 0;
+ unsigned int cohenb, cfg, id;
+ unsigned int other;
+ unsigned int rbasehi, rbaselo;
+ unsigned int rbaseext;
+
+ local_irq_save(irq_flags);
+
+ GCMPCLCB(OTHER) = dev->id;
+
+ cohenb = GCMPCOCB(COHENB);
+ cfg = GCMPCOCB(CFG);
+ other = GCMPCOCB(OTHER);
+ rbasehi = GCMPCOCBhi(RESETBASE);
+ rbaselo = GCMPCOCBlo(RESETBASE);
+ id = GCMPCOCB(ID);
+ rbaseext = GCMPCOCB(RESETBASEEXT);
+
+ local_irq_restore(irq_flags);
+
+ n += snprintf(buf+n, PAGE_SIZE-n,
+ "CM3 Coherence Enable Register\t\t\t%08x\n"
+ "CM3 Core Config Register\t\t\t%08x\n"
+ "CM3 Other Addressing Register\t\t\t%08x\n"
+ "Reset Exception Base Register\t\t%08x%08x\n"
+ "Core Local Identification Register\t\t%08x\n"
+ "Reset Exception Extended Base\t\t\t%08x\n"
+ ,
+ cohenb,
+ cfg,
+ other,
+ rbasehi, rbaselo,
+ id,
+ rbaseext
+ );
+
+ return n;
+}
+
+static DEVICE_ATTR(gcr3_global, 0444, show_gcr3_global, NULL);
+static DEVICE_ATTR(gcr3_local, 0444, show_gcr3_local, NULL);
+static DEVICE_ATTR(gcr_global, 0444, show_gcr_global, NULL);
+static DEVICE_ATTR(gcr_local, 0444, show_gcr_local, NULL);
+
+static struct bus_type gcmp_subsys = {
+ .name = "gcmp",
+ .dev_name = "gcmp",
+};
+
+static __cpuinit int gcmp3_add_core(int cpu)
+{
+ unsigned long irq_flags;
+ struct device *dev;
+ int err;
+ int vpe, vpeN;
+ char name[16];
+
+ local_irq_save(irq_flags);
+ GCMPCLCB(OTHER) = GCR3_OTHER(cpu, 0);
+ vpeN = ((GCMPCOCB(CFG) & GCMP_CCB_CFG_NUMVPE_MSK) >> GCMP_CCB_CFG_NUMVPE_SHF) + 1;
+ local_irq_restore(irq_flags);
+
+ for (vpe=0; vpe<vpeN; vpe++) {
+ dev = kzalloc(sizeof *dev, GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ dev->id = GCR3_OTHER(cpu, vpe);
+ dev->bus = &gcmp_subsys;
+ snprintf(name, sizeof name, "core%d_vc%d",cpu,vpe);
+ dev->init_name = name;
+
+ err = device_register(dev);
+ if (err)
+ return err;
+
+ err = device_create_file(dev, &dev_attr_gcr3_local);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static __cpuinit int gcmp_add_core(int cpu)
+{
+ struct device *dev;
+ int err;
+ char name[16];
+
+ dev = kzalloc(sizeof *dev, GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ dev->id = cpu;
+ dev->bus = &gcmp_subsys;
+ snprintf(name, sizeof name, "core%d",cpu);
+ dev->init_name = name;
+
+ err = device_register(dev);
+ if (err)
+ return err;
+
+ err = device_create_file(dev, &dev_attr_gcr_local);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static __cpuinit int gcmp_add_iocu(int cpu,int totcpu)
+{
+ struct device *dev;
+ int err;
+ char name[16];
+
+ dev = kzalloc(sizeof *dev, GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ /* Ask Tom Berg @ IMGTec about more generic formula. LY22 */
+ if (gcmp3_present) {
+ totcpu = 16;
+ dev->id = GCR3_OTHER((cpu + totcpu), 0);
+ } else {
+ if (totcpu <= 4 )
+ totcpu = 4;
+ else
+ totcpu = 6;
+ dev->id = cpu + totcpu;
+ }
+
+ dev->bus = &gcmp_subsys;
+ snprintf(name, sizeof name, "iocu%d",cpu);
+ dev->init_name = name;
+
+ err = device_register(dev);
+ if (err)
+ return err;
+
+ err = device_create_file(dev, &dev_attr_gcr_local);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int __init init_gcmp_sysfs(void)
+{
+ int rc;
+ int cpuN, iocuN;
+ int cpu;
+
+ if (gcmp_present <= 0)
+ return 0;
+
+ rc = subsys_system_register(&gcmp_subsys, NULL);
+ if (rc)
+ return rc;
+
+ cpuN = ((GCMPGCB(GC) & GCMP_GCB_GC_NUMCORES_MSK) >> GCMP_GCB_GC_NUMCORES_SHF) + 1;
+
+ if (gcmp3_present) {
+ rc = device_create_file(gcmp_subsys.dev_root, &dev_attr_gcr3_global);
+ if (rc)
+ return rc;
+
+ for (cpu=0; cpu<cpuN; cpu++) {
+ rc = gcmp3_add_core(cpu);
+ if (rc)
+ return rc;
+ }
+ } else {
+ rc = device_create_file(gcmp_subsys.dev_root, &dev_attr_gcr_global);
+ if (rc)
+ return rc;
+
+ for (cpu=0; cpu<cpuN; cpu++) {
+ rc = gcmp_add_core(cpu);
+ if (rc)
+ return rc;
+ }
+ }
+
+ iocuN = ((GCMPGCB(GC) & GCMP_GCB_GC_NUMIOCU_MSK) >> GCMP_GCB_GC_NUMIOCU_SHF);
+ for (cpu=0; cpu<iocuN; cpu++) {
+ rc = gcmp_add_iocu(cpu,cpuN);
+ if (rc)
+ return rc;
+ }
+
+ return 0;
+}
+
+device_initcall_sync(init_gcmp_sysfs);
+
+#endif /* CONFIG_SYSFS */
diff --git a/arch/mips/kernel/smp-mt.c b/arch/mips/kernel/smp-mt.c
index 3e5164c..985e263 100644
--- a/arch/mips/kernel/smp-mt.c
+++ b/arch/mips/kernel/smp-mt.c
@@ -35,6 +35,7 @@
#include <asm/mipsmtregs.h>
#include <asm/mips_mt.h>
#include <asm/gic.h>
+#include <asm/irq_cpu.h>
static void __init smvp_copy_vpe_config(void)
{
@@ -71,6 +72,7 @@
/* Record this as available CPU */
set_cpu_possible(tc, true);
+ set_cpu_present(tc, true);
__cpu_number_map[tc] = ++ncpu;
__cpu_logical_map[ncpu] = tc;
}
@@ -112,12 +114,35 @@
write_tc_c0_tchalt(TCHALT_H);
}
+static void mp_send_ipi_single(int cpu, unsigned int action)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ switch (action) {
+ case SMP_CALL_FUNCTION:
+ gic_send_ipi(plat_ipi_call_int_xlate(cpu));
+ break;
+
+ case SMP_RESCHEDULE_YOURSELF:
+ gic_send_ipi(plat_ipi_resched_int_xlate(cpu));
+ break;
+ }
+
+ local_irq_restore(flags);
+}
+
static void vsmp_send_ipi_single(int cpu, unsigned int action)
{
int i;
unsigned long flags;
int vpflags;
+ if (gic_present) {
+ mp_send_ipi_single(cpu, action);
+ return;
+ }
local_irq_save(flags);
vpflags = dvpe(); /* can't access the other CPU's registers whilst MVPE enabled */
@@ -151,19 +176,24 @@
static void __cpuinit vsmp_init_secondary(void)
{
-#ifdef CONFIG_IRQ_GIC
- /* This is Malta specific: IPI,performance and timer interrupts */
- if (gic_present)
- change_c0_status(ST0_IM, STATUSF_IP3 | STATUSF_IP4 |
- STATUSF_IP6 | STATUSF_IP7);
- else
+ struct cpuinfo_mips *c = ¤t_cpu_data;
+
+ if (!cpu_has_veic) {
+ set_c0_status(mips_smp_c0_status_mask);
+ back_to_back_c0_hazard();
+ printk("CPU%d: status register %08x\n", smp_processor_id(), read_c0_status());
+ }
+ c->core = (read_c0_ebase() & 0x3ff) >> (fls(smp_num_siblings)-1);
+#if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC)
+ if (cpu_has_mipsmt)
+ c->vpe_id = (read_c0_tcbind() >> TCBIND_CURVPE_SHIFT) & TCBIND_CURVPE;
#endif
- change_c0_status(ST0_IM, STATUSF_IP0 | STATUSF_IP1 |
- STATUSF_IP6 | STATUSF_IP7);
}
static void __cpuinit vsmp_smp_finish(void)
{
+ pr_debug("SMPMT: CPU%d: vsmp_smp_finish\n", smp_processor_id());
+
/* CDFIXME: remove this? */
write_c0_compare(read_c0_count() + (8* mips_hpt_frequency/HZ));
@@ -178,6 +208,7 @@
static void vsmp_cpus_done(void)
{
+ pr_debug("SMPMT: CPU%d: vsmp_cpus_done\n", smp_processor_id());
}
/*
@@ -191,6 +222,8 @@
static void __cpuinit vsmp_boot_secondary(int cpu, struct task_struct *idle)
{
struct thread_info *gp = task_thread_info(idle);
+ pr_debug("SMPMT: CPU%d: vsmp_boot_secondary cpu %d\n",
+ smp_processor_id(), cpu);
dvpe();
set_c0_mvpcontrol(MVPCONTROL_VPC);
@@ -213,8 +246,8 @@
/* global pointer */
write_tc_gpr_gp((unsigned long)gp);
- flush_icache_range((unsigned long)gp,
- (unsigned long)(gp + sizeof(struct thread_info)));
+ local_flush_icache_range((unsigned long)gp,
+ (unsigned long)(gp + sizeof(struct thread_info)));
/* finally out of configuration and into chaos */
clear_c0_mvpcontrol(MVPCONTROL_VPC);
@@ -232,6 +265,7 @@
unsigned int mvpconf0, ntc, tc, ncpu = 0;
unsigned int nvpe;
+ pr_debug("SMPMT: CPU%d: vsmp_smp_setup\n", smp_processor_id());
#ifdef CONFIG_MIPS_MT_FPAFF
/* If we have an FPU, enroll ourselves in the FPU-full mask */
if (cpu_has_fpu)
@@ -272,6 +306,8 @@
static void __init vsmp_prepare_cpus(unsigned int max_cpus)
{
+ pr_debug("SMPMT: CPU%d: vsmp_prepare_cpus %d\n",
+ smp_processor_id(), max_cpus);
mips_mt_set_cpuoptions();
}
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index 6e7862a..6de21fd 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -60,12 +60,19 @@
EXPORT_SYMBOL(smp_num_siblings);
/* representing the TCs (or siblings in Intel speak) of each logical CPU */
-cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly;
-EXPORT_SYMBOL(cpu_sibling_map);
+DEFINE_PER_CPU_SHARED_ALIGNED(struct cpumask, cpu_sibling_map);
+EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
/* representing cpus for which sibling maps can be computed */
static cpumask_t cpu_sibling_setup_map;
+extern void maar_setup(void);
+
+/* CPU siblings in MIPS:
+ *
+ * SMVP kernel - VPEs on common core are siblings
+ * SMTC kernel - TC's on common core are siblings
+ */
static inline void set_cpu_sibling_map(int cpu)
{
int i;
@@ -75,12 +82,12 @@
if (smp_num_siblings > 1) {
for_each_cpu_mask(i, cpu_sibling_setup_map) {
if (cpu_data[cpu].core == cpu_data[i].core) {
- cpu_set(i, cpu_sibling_map[cpu]);
- cpu_set(cpu, cpu_sibling_map[i]);
+ cpu_set(i, per_cpu(cpu_sibling_map, cpu));
+ cpu_set(cpu, per_cpu(cpu_sibling_map, i));
}
}
} else
- cpu_set(cpu, cpu_sibling_map[cpu]);
+ cpu_set(cpu, per_cpu(cpu_sibling_map, cpu));
}
struct plat_smp_ops *mp_ops;
@@ -110,6 +117,7 @@
#endif /* CONFIG_MIPS_MT_SMTC */
cpu_probe();
cpu_report();
+ maar_setup();
per_cpu_trap_init(false);
mips_clockevent_init();
mp_ops->init_secondary();
diff --git a/arch/mips/kernel/spram.c b/arch/mips/kernel/spram.c
index 6af08d8..a021201 100644
--- a/arch/mips/kernel/spram.c
+++ b/arch/mips/kernel/spram.c
@@ -204,8 +204,11 @@
switch (c->cputype) {
case CPU_24K:
case CPU_34K:
- case CPU_74K:
case CPU_1004K:
+ case CPU_74K:
+ case CPU_PROAPTIV:
+ case CPU_INTERAPTIV:
+ case CPU_SAMURAI:
config0 = read_c0_config();
/* FIXME: addresses are Malta specific */
if (config0 & (1<<24)) {
diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c
index b79d13f..091388f 100644
--- a/arch/mips/kernel/syscall.c
+++ b/arch/mips/kernel/syscall.c
@@ -26,6 +26,7 @@
#include <linux/uaccess.h>
#include <linux/slab.h>
#include <linux/elf.h>
+#include <linux/prctl.h>
#include <asm/asm.h>
#include <asm/branch.h>
@@ -38,7 +39,10 @@
#include <asm/sysmips.h>
#include <asm/uaccess.h>
#include <asm/switch_to.h>
+#include <asm/fpu.h>
+extern unsigned int system_has_fpu;
+extern unsigned int global_fpu_id;
/*
* For historic reasons the pipe(2) syscall on MIPS has an unusual calling
* convention. It returns results in registers $v0 / $v1 which means there
@@ -135,8 +139,12 @@
: "memory");
} else if (cpu_has_llsc) {
__asm__ __volatile__ (
- " .set mips3 \n"
- " li %[err], 0 \n"
+#ifdef CONFIG_CPU_MIPSR6
+ " .set mips64r6 \n"
+#else
+ " .set mips3 \n"
+#endif
+ " li %[err], 0 \n"
"1: ll %[old], (%[addr]) \n"
" move %[tmp], %[new] \n"
"2: sc %[tmp], (%[addr]) \n"
@@ -197,6 +205,126 @@
unreachable();
}
+asmlinkage void mips_lose_fpu(void)
+{
+ preempt_disable();
+ clear_thread_flag(TIF_FPU_LOSE_REQUEST);
+ lose_fpu_inatomic(1);
+ preempt_enable_no_resched();
+}
+
+void mips_switch_fpu_mode(void *info)
+{
+ struct mm_struct *mm = info;
+
+ if ((current->mm == mm) && (is_fpu_owner() || is_msa_enabled()))
+ set_thread_flag(TIF_FPU_LOSE_REQUEST);
+}
+
+unsigned int mips_fpu_prctl(unsigned long type, unsigned long param)
+{
+ register unsigned long val;
+ register unsigned long mask;
+ register unsigned long *addr;
+
+ switch (type) {
+ case PR_SET_FP_MODE:
+ if (param & ~(PR_FP_MODE_FR|PR_FP_MODE_FRE))
+ return -EINVAL;
+
+#ifndef CONFIG_MIPS_INCOMPATIBLE_ARCH_EMULATION
+ if (system_has_fpu) {
+ if ((param & PR_FP_MODE_FRE) && !cpu_has_fre)
+ return -EOPNOTSUPP;
+ if ((param & PR_FP_MODE_FR) && !(global_fpu_id & MIPS_FPIR_F64))
+ return -EOPNOTSUPP;
+ if (!(param & PR_FP_MODE_FR)) {
+ unsigned int res;
+ unsigned int status;
+
+ /* test FPU with FR0 capability */
+ local_irq_disable();
+ status = change_c0_status(ST0_FR|ST0_CU1, ST0_CU1);
+ enable_fpu_hazard();
+ res = read_c0_status();
+ write_c0_status(status);
+ disable_fpu_hazard();
+ local_irq_enable();
+ if (res & ST0_FR)
+ return -EOPNOTSUPP;
+ }
+ }
+#endif
+ val = (param & PR_FP_MODE_FR)? LTIF_FPU_FR : 0;
+ val |= (param & PR_FP_MODE_FRE)? LTIF_FPU_FRE : 0;
+ mask = ~(LTIF_FPU_FR|LTIF_FPU_FRE);
+ preempt_disable();
+ addr = &(current->mm->context.thread_flags);
+ __asm__ __volatile__(
+ ".set push \n"
+ ".set noreorder \n"
+ ".set noat \n"
+#ifdef CONFIG_64BIT
+ "1: lld $1, 0(%0) \n"
+ " and $1, $1, %1 \n"
+ " or $1, $1, %2 \n"
+ " scd $1, 0(%0) \n"
+#else
+ "1: ll $1, 0(%0) \n"
+ " and $1, $1, %1 \n"
+ " or $1, $1, %2 \n"
+ " sc $1, 0(%0) \n"
+#endif
+ " beqz $1, 1b \n"
+ " nop \n"
+ ".set pop \n"
+ :
+ : "r"(addr), "r"(mask), "r"(val)
+ : "memory");
+ smp_llsc_mb();
+ /* send a "barrier" for FPU mode - force other CPUs to lose FPU */
+ if (atomic_read(¤t->mm->mm_users) != 1)
+ smp_call_function(mips_switch_fpu_mode, (void *)(current->mm), 1);
+ lose_fpu(1);
+ preempt_enable();
+ break;
+
+ case PR_GET_FP_MODE:
+ if (unlikely(param & 3))
+ return -EINVAL;
+
+ if (unlikely(!access_ok(VERIFY_WRITE, param, 4)))
+ return -EINVAL;
+
+ val = (current_thread_info()->local_flags & LTIF_FPU_FR)? PR_FP_MODE_FR : 0;
+ val |= (current_thread_info()->local_flags & LTIF_FPU_FRE)? PR_FP_MODE_FRE : 0;
+ if (put_user(val, (unsigned long *)param))
+ return -EINVAL;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+long mips_get_process_fp_mode(struct task_struct *task)
+{
+ unsigned long val;
+
+ val = (((struct thread_info *)task_stack_page(task))->local_flags & LTIF_FPU_FR)? PR_FP_MODE_FR : 0;
+ val |= (((struct thread_info *)task_stack_page(task))->local_flags & LTIF_FPU_FRE)? PR_FP_MODE_FRE : 0;
+
+ return val;
+}
+
+long mips_set_process_fp_mode(struct task_struct *task,
+ unsigned long value)
+{
+ return mips_fpu_prctl(PR_SET_FP_MODE, value);
+}
+
SYSCALL_DEFINE3(sysmips, long, cmd, long, arg1, long, arg2)
{
switch (cmd) {
@@ -221,6 +349,9 @@
case FLUSH_CACHE:
__flush_cache_all();
return 0;
+
+ case MIPS_FPU_PRCTL:
+ return mips_fpu_prctl(arg1, arg2);
}
return -EINVAL;
diff --git a/arch/mips/kernel/time.c b/arch/mips/kernel/time.c
index 9d686bf..364d26a 100644
--- a/arch/mips/kernel/time.c
+++ b/arch/mips/kernel/time.c
@@ -121,6 +121,14 @@
{
plat_time_init();
- if (!mips_clockevent_init() || !cpu_has_mfc0_count_bug())
+ /*
+ * The use of the R4k timer as a clock event takes precedence;
+ * if reading the Count register might interfere with the timer
+ * interrupt, then we don't use the timer as a clock source.
+ * We may still use the timer as a clock source though if the
+ * timer interrupt isn't reliable; the interference doesn't
+ * matter then, because we don't use the interrupt.
+ */
+ if (mips_clockevent_init() != 0 || !cpu_has_mfc0_count_bug())
init_mips_clocksource();
}
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index a75ae40..37d5bd5 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -45,6 +45,7 @@
#include <asm/mipsregs.h>
#include <asm/mipsmtregs.h>
#include <asm/module.h>
+#include <asm/msa.h>
#include <asm/pgtable.h>
#include <asm/ptrace.h>
#include <asm/sections.h>
@@ -75,7 +76,10 @@
extern asmlinkage void handle_cpu(void);
extern asmlinkage void handle_ov(void);
extern asmlinkage void handle_tr(void);
+extern asmlinkage void handle_msa_fpe(void);
extern asmlinkage void handle_fpe(void);
+extern asmlinkage void handle_ftlb(void);
+extern asmlinkage void handle_msa(void);
extern asmlinkage void handle_mdmx(void);
extern asmlinkage void handle_watch(void);
extern asmlinkage void handle_mt(void);
@@ -264,7 +268,7 @@
printk("Status: %08x ", (uint32_t) regs->cp0_status);
- if (current_cpu_data.isa_level == MIPS_CPU_ISA_I) {
+ if (raw_current_cpu_data.isa_level == MIPS_CPU_ISA_I) {
if (regs->cp0_status & ST0_KUO)
printk("KUo ");
if (regs->cp0_status & ST0_IEO)
@@ -328,6 +332,7 @@
void show_registers(struct pt_regs *regs)
{
const int field = 2 * sizeof(unsigned long);
+ mm_segment_t old_fs = get_fs();
__show_regs(regs);
print_modules();
@@ -342,9 +347,12 @@
printk("*HwTLS: %0*lx\n", field, tls);
}
+ if (!user_mode(regs))
+ set_fs(KERNEL_DS);
show_stacktrace(current, regs);
show_code((unsigned int __user *) regs->cp0_epc);
printk("\n");
+ set_fs(old_fs);
}
static int regs_to_trapnr(struct pt_regs *regs)
@@ -686,6 +694,13 @@
int process_fpemu_return(int sig, void __user *fault_addr)
{
+ /*
+ * We can't allow the emulated instruction to leave any of the cause
+ * bits set in FCSR. If they were then the kernel would take an FP
+ * exception when restoring FP context.
+ */
+ current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X;
+
if (sig == SIGSEGV || sig == SIGBUS) {
struct siginfo si = {0};
si.si_addr = fault_addr;
@@ -724,6 +739,8 @@
int sig;
void __user *fault_addr = NULL;
+ if (!used_math())
+ init_fpu();
/*
* Unimplemented operation exception. If we've got the full
* software emulator on-board, let's use it...
@@ -741,18 +758,12 @@
sig = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1,
&fault_addr);
- /*
- * We can't allow the emulated instruction to leave any of
- * the cause bit set in $fcr31.
- */
- current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X;
+ /* If something went wrong, signal */
+ process_fpemu_return(sig, fault_addr);
/* Restore the hardware register state */
own_fpu(1); /* Using the FPU again. */
- /* If something went wrong, signal */
- process_fpemu_return(sig, fault_addr);
-
return;
} else if (fcr31 & FPU_CSR_INV_X)
info.si_code = FPE_FLTINV;
@@ -772,7 +783,7 @@
force_sig_info(SIGFPE, &info, current);
}
-static void do_trap_or_bp(struct pt_regs *regs, unsigned int code,
+void do_trap_or_bp(struct pt_regs *regs, unsigned int code,
const char *str)
{
siginfo_t info;
@@ -837,6 +848,13 @@
unsigned int opcode, bcode;
unsigned long epc;
u16 instr[2];
+#ifdef CONFIG_EVA
+ mm_segment_t seg;
+
+ seg = get_fs();
+ if (!user_mode(regs))
+ set_fs(KERNEL_DS);
+#endif
if (get_isa16_mode(regs->cp0_epc)) {
/* Calculate EPC. */
@@ -852,6 +870,9 @@
goto out_sigsegv;
bcode = (instr[0] >> 6) & 0x3f;
do_trap_or_bp(regs, bcode, "Break");
+#ifdef CONFIG_EVA
+ set_fs(seg);
+#endif
return;
}
} else {
@@ -875,23 +896,35 @@
*/
switch (bcode) {
case BRK_KPROBE_BP:
- if (notify_die(DIE_BREAK, "debug", regs, bcode, regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP)
+ if (notify_die(DIE_BREAK, "debug", regs, bcode, regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP) {
+#ifdef CONFIG_EVA
+ set_fs(seg);
+#endif
return;
- else
+ } else
break;
case BRK_KPROBE_SSTEPBP:
- if (notify_die(DIE_SSTEPBP, "single_step", regs, bcode, regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP)
+ if (notify_die(DIE_SSTEPBP, "single_step", regs, bcode, regs_to_trapnr(regs), SIGTRAP) == NOTIFY_STOP) {
+#ifdef CONFIG_EVA
+ set_fs(seg);
+#endif
return;
- else
+ } else
break;
default:
break;
}
do_trap_or_bp(regs, bcode, "Break");
+#ifdef CONFIG_EVA
+ set_fs(seg);
+#endif
return;
out_sigsegv:
+#ifdef CONFIG_EVA
+ set_fs(seg);
+#endif
force_sig(SIGSEGV, current);
}
@@ -900,6 +933,13 @@
u32 opcode, tcode = 0;
u16 instr[2];
unsigned long epc = msk_isa16_mode(exception_epc(regs));
+#ifdef CONFIG_EVA
+ mm_segment_t seg;
+
+ seg = get_fs();
+ if (!user_mode(regs))
+ set_fs(KERNEL_DS);
+#endif
if (get_isa16_mode(regs->cp0_epc)) {
if (__get_user(instr[0], (u16 __user *)(epc + 0)) ||
@@ -918,9 +958,15 @@
}
do_trap_or_bp(regs, tcode, "Trap");
+#ifdef CONFIG_EVA
+ set_fs(seg);
+#endif
return;
out_sigsegv:
+#ifdef CONFIG_EVA
+ set_fs(seg);
+#endif
force_sig(SIGSEGV, current);
}
@@ -932,6 +978,24 @@
unsigned int opcode = 0;
int status = -1;
+#ifdef CONFIG_MIPS_INCOMPATIBLE_ARCH_EMULATION
+ if (mipsr2_emulation && likely(user_mode(regs))) {
+ if (likely(get_user(opcode, epc) >= 0)) {
+ status = mipsr2_decoder(regs, opcode);
+ switch (status) {
+ case SIGEMT:
+ case 0:
+ return;
+ case SIGILL:
+ break;
+ default:
+ process_fpemu_return(status, (void __user *)current->thread.cp0_baduaddr);
+ return;
+ }
+ }
+ }
+#endif
+
if (notify_die(DIE_RI, "RI Fault", regs, 0, regs_to_trapnr(regs), SIGILL)
== NOTIFY_STOP)
return;
@@ -1038,6 +1102,131 @@
return NOTIFY_OK;
}
+static int enable_restore_fp_context(int msa)
+{
+ int err, was_fpu_owner, prior_msa;
+
+ if (!used_math()) {
+ /* First time FP context user. */
+ if (msa && !raw_cpu_has_fpu) {
+ force_sig(SIGFPE, current);
+ return(-1);
+ }
+ preempt_disable();
+ err = init_fpu();
+ if (msa && !err) {
+ enable_msa();
+ _init_msa_upper();
+ set_thread_flag(TIF_USEDMSA);
+ set_thread_flag(TIF_MSA_CTX_LIVE);
+ }
+ preempt_enable();
+ if (err && raw_cpu_has_fpu)
+#ifdef CONFIG_MIPS_INCOMPATIBLE_ARCH_EMULATION
+ if (!mipsr2_emulation)
+#endif
+ {
+ force_sig(SIGFPE, current);
+ return(-1);
+ }
+ return err;
+ }
+
+ /*
+ * This task has formerly used the FP context.
+ *
+ * If this thread has no live MSA vector context then we can simply
+ * restore the scalar FP context. If it has live MSA vector context
+ * (that is, it has or may have used MSA since last performing a
+ * function call) then we'll need to restore the vector context. This
+ * applies even if we're currently only executing a scalar FP
+ * instruction. This is because if we were to later execute an MSA
+ * instruction then we'd either have to:
+ *
+ * - Restore the vector context & clobber any registers modified by
+ * scalar FP instructions between now & then.
+ *
+ * or
+ *
+ * - Not restore the vector context & lose the most significant bits
+ * of all vector registers.
+ *
+ * Neither of those options is acceptable. We cannot restore the least
+ * significant bits of the registers now & only restore the most
+ * significant bits later because the most significant bits of any
+ * vector registers whose aliased FP register is modified now will have
+ * been zeroed. We'd have no way to know that when restoring the vector
+ * context & thus may load an outdated value for the most significant
+ * bits of a vector register.
+ *
+ * Note LY22: It is possible to restore a partial MSA context via "insert"
+ * This can be used to restore an upper parts of MSA.
+ * But that upper parts should exists and be saved.
+ */
+ if (!msa && !thread_msa_context_live())
+ return own_fpu(1);
+
+ if (!raw_cpu_has_fpu) {
+ force_sig(SIGFPE, current);
+ return(-1);
+ }
+ /*
+ * This task is using or has previously used MSA. Thus we require
+ * that Status.FR == 1.
+ */
+ preempt_disable();
+ was_fpu_owner = is_fpu_owner();
+ err = own_fpu_inatomic(0);
+ if (err)
+ goto out;
+
+ enable_msa();
+ write_msa_csr(current->thread.fpu.msacsr);
+ set_thread_flag(TIF_USEDMSA);
+
+ /*
+ * If this is the first time that the task is using MSA and it has
+ * previously used scalar FP in this time slice then we already nave
+ * FP context which we shouldn't clobber. We do however need to clear
+ * the upper 64b of each vector register so that this task has no
+ * opportunity to see data left behind by another.
+ */
+ prior_msa = test_and_set_thread_flag(TIF_MSA_CTX_LIVE);
+ if (!prior_msa && was_fpu_owner) {
+ _init_msa_upper();
+
+ goto out;
+ }
+
+ if (!prior_msa) {
+ /*
+ * Restore the least significant 64b of each vector register
+ * from the existing scalar FP context.
+ */
+ _restore_fp(current);
+
+ /*
+ * The task has not formerly used MSA, so clear the upper 64b
+ * of each vector register such that it cannot see data left
+ * behind by another task.
+ */
+ _init_msa_upper();
+ } else {
+ /* prior_msa: We need to restore the vector context. */
+ if (!was_fpu_owner) {
+ restore_msa(current);
+ write_32bit_cp1_register(CP1_STATUS,current->thread.fpu.fcr31);
+ } else {
+ _restore_msa_uppers_from_thread(¤t->thread.fpu.fpr[0]);
+ }
+ }
+
+out:
+ preempt_enable();
+
+ return 0;
+}
+
asmlinkage void do_cpu(struct pt_regs *regs)
{
unsigned int __user *epc;
@@ -1114,20 +1303,17 @@
/* Fall through. */
case 1:
- if (used_math()) /* Using the FPU again. */
- own_fpu(1);
- else { /* First time FPU user. */
- init_fpu();
- set_used_math();
- }
+ status = enable_restore_fp_context(0);
+ if (status < 0)
+ return;
- if (!raw_cpu_has_fpu) {
+ if ((!raw_cpu_has_fpu) || status) {
int sig;
void __user *fault_addr = NULL;
sig = fpu_emulator_cop1Handler(regs,
¤t->thread.fpu,
0, &fault_addr);
- if (!process_fpemu_return(sig, fault_addr))
+ if ((!process_fpemu_return(sig, fault_addr)) && !status)
mt_ase_fp_affinity();
}
@@ -1141,6 +1327,30 @@
force_sig(SIGILL, current);
}
+asmlinkage void do_msa_fpe(struct pt_regs *regs)
+{
+ die_if_kernel("do_msa_fpe invoked from kernel context!", regs);
+ force_sig(SIGFPE, current);
+}
+
+asmlinkage void do_msa(struct pt_regs *regs)
+{
+ int err;
+
+ if ((!cpu_has_msa) || !test_thread_local_flags(LTIF_FPU_FR)) {
+ force_sig(SIGILL, current);
+ goto out;
+ }
+
+ die_if_kernel("do_msa invoked from kernel context!", regs);
+
+ err = enable_restore_fp_context(1);
+ if (err)
+ force_sig(SIGILL, current);
+out:
+ ;
+}
+
asmlinkage void do_mdmx(struct pt_regs *regs)
{
force_sig(SIGILL, current);
@@ -1176,14 +1386,34 @@
}
}
+#ifdef CONFIG_CPU_MIPSR6
+char *mcheck_code[32] = { "non R6 multiple hit in TLB: Status.TS = 1",
+ "multiple hit in TLB",
+ "multiple hit in TLB, speculative access",
+ "page size mismatch, unsupported FTLB page mask",
+ "index doesn't match EntryHI.VPN2 position in FTLB",
+ "HW PageTableWalker: Valid bits mismatch in PTE pair on directory level",
+ "HW PageTableWalker: Dual page mode is not implemented"
+ };
+#endif
+
asmlinkage void do_mcheck(struct pt_regs *regs)
{
const int field = 2 * sizeof(unsigned long);
int multi_match = regs->cp0_status & ST0_TS;
+#ifdef CONFIG_CPU_MIPSR6
+ int code = 0;
+#endif
show_regs(regs);
+#ifdef CONFIG_CPU_MIPSR6
+ if (multi_match || (code = read_c0_pagegrain() & PG_MCCAUSE)) {
+ printk("PageGrain: %0x\n", read_c0_pagegrain());
+ printk("BadVAddr: %0*lx\n", field, read_c0_badvaddr());
+#else
if (multi_match) {
+#endif
printk("Index : %0x\n", read_c0_index());
printk("Pagemask: %0x\n", read_c0_pagemask());
printk("EntryHi : %0*lx\n", field, read_c0_entryhi());
@@ -1195,6 +1425,9 @@
show_code((unsigned int __user *) regs->cp0_epc);
+#ifdef CONFIG_CPU_MIPSR6
+ panic("Caught Machine Check exception - %s",mcheck_code[code]);
+#else
/*
* Some chips may have other causes of machine check (e.g. SB1
* graduation timer)
@@ -1202,6 +1435,7 @@
panic("Caught Machine Check exception - %scaused by multiple "
"matching entries in the TLB.",
(multi_match) ? "" : "not ");
+#endif
}
asmlinkage void do_mt(struct pt_regs *regs)
@@ -1286,6 +1520,9 @@
case CPU_34K:
case CPU_74K:
case CPU_1004K:
+ case CPU_PROAPTIV:
+ case CPU_INTERAPTIV:
+ case CPU_SAMURAI:
{
#define ERRCTL_PE 0x80000000
#define ERRCTL_L2P 0x00800000
@@ -1367,22 +1604,26 @@
unsigned int reg_val;
/* For the moment, report the problem and hang. */
- printk("Cache error exception:\n");
+ printk("Cache error exception, cp0_ecc=0x%08x:\n",read_c0_ecc());
printk("cp0_errorepc == %0*lx\n", field, read_c0_errorepc());
reg_val = read_c0_cacheerr();
printk("c0_cacheerr == %08x\n", reg_val);
- printk("Decoded c0_cacheerr: %s cache fault in %s reference.\n",
- reg_val & (1<<30) ? "secondary" : "primary",
- reg_val & (1<<31) ? "data" : "insn");
- printk("Error bits: %s%s%s%s%s%s%s\n",
+ if ((reg_val & 0xc0000000) == 0xc0000000)
+ printk("Decoded c0_cacheerr: FTLB parity error\n");
+ else
+ printk("Decoded c0_cacheerr: %s cache fault in %s reference.\n",
+ reg_val & (1<<30) ? "secondary" : "primary",
+ reg_val & (1<<31) ? "data" : "insn");
+ printk("Error bits: %s%s%s%s%s%s%s%s\n",
reg_val & (1<<29) ? "ED " : "",
reg_val & (1<<28) ? "ET " : "",
+ reg_val & (1<<27) ? "ES " : "",
reg_val & (1<<26) ? "EE " : "",
reg_val & (1<<25) ? "EB " : "",
- reg_val & (1<<24) ? "EI " : "",
- reg_val & (1<<23) ? "E1 " : "",
- reg_val & (1<<22) ? "E0 " : "");
+ reg_val & (1<<24) ? "EI/EF " : "",
+ reg_val & (1<<23) ? "E1/SP " : "",
+ reg_val & (1<<22) ? "E0/EW " : "");
printk("IDX: 0x%08x\n", reg_val & ((1<<22)-1));
#if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64)
@@ -1396,6 +1637,45 @@
panic("Can't handle the cache error!");
}
+asmlinkage void do_ftlb(void)
+{
+ const int field = 2 * sizeof(unsigned long);
+ unsigned int reg_val;
+
+ /* For the moment, report the problem and hang. */
+ printk("FTLB error exception, cp0_ecc=0x%08x:\n",read_c0_ecc());
+ printk("cp0_errorepc == %0*lx\n", field, read_c0_errorepc());
+ reg_val = read_c0_cacheerr();
+ printk("c0_cacheerr == %08x\n", reg_val);
+
+ if ((reg_val & 0xc0000000) == 0xc0000000)
+ printk("Decoded c0_cacheerr: FTLB parity error\n");
+ else
+ printk("Decoded c0_cacheerr: %s cache fault in %s reference.\n",
+ reg_val & (1<<30) ? "secondary" : "primary",
+ reg_val & (1<<31) ? "data" : "insn");
+ printk("Error bits: %s%s%s%s%s%s%s%s\n",
+ reg_val & (1<<29) ? "ED " : "",
+ reg_val & (1<<28) ? "ET " : "",
+ reg_val & (1<<27) ? "ES " : "",
+ reg_val & (1<<26) ? "EE " : "",
+ reg_val & (1<<25) ? "EB " : "",
+ reg_val & (1<<24) ? "EI/EF " : "",
+ reg_val & (1<<23) ? "E1/SP " : "",
+ reg_val & (1<<22) ? "E0/EW " : "");
+ printk("IDX: 0x%08x\n", reg_val & ((1<<22)-1));
+
+#if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64)
+ if (reg_val & (1<<22))
+ printk("DErrAddr0: 0x%0*lx\n", field, read_c0_derraddr0());
+
+ if (reg_val & (1<<23))
+ printk("DErrAddr1: 0x%0*lx\n", field, read_c0_derraddr1());
+#endif
+
+ panic("Can't handle the FTLB parity error!");
+}
+
/*
* SDBBP EJTAG debug exception handler.
* We skip the instruction and return to the next instruction.
@@ -1447,10 +1727,16 @@
void __noreturn nmi_exception_handler(struct pt_regs *regs)
{
+ unsigned long epc;
+ char str[100];
+
raw_notifier_call_chain(&nmi_chain, 0, regs);
bust_spinlocks(1);
- printk("NMI taken!!!!\n");
- die("NMI", regs);
+ epc = regs->cp0_epc;
+ snprintf(str, 100, "CPU%d NMI taken, CP0_EPC=%lx (before replacement by CP0_ERROREPC)\n",smp_processor_id(),regs->cp0_epc);
+ regs->cp0_epc = read_c0_errorepc();
+ die(str, regs);
+ regs->cp0_epc = epc;
}
#define VECTORSPACING 0x100 /* for EI/VI mode */
@@ -1513,7 +1799,6 @@
unsigned char *b;
BUG_ON(!cpu_has_veic && !cpu_has_vint);
- BUG_ON((n < 0) && (n > 9));
if (addr == NULL) {
handler = (unsigned long) do_default_vi;
@@ -1687,10 +1972,10 @@
if (cpu_has_dsp)
status_set |= ST0_MX;
- change_c0_status(ST0_CU|ST0_MX|ST0_RE|ST0_FR|ST0_BEV|ST0_TS|ST0_KX|ST0_SX|ST0_UX,
+ change_c0_status(ST0_CU|ST0_MX|ST0_RE|ST0_BEV|ST0_TS|ST0_KX|ST0_SX|ST0_UX,
status_set);
- if (cpu_has_mips_r2)
+ if (cpu_has_mips_r2 || cpu_has_mips_r6)
hwrena |= 0x0000000f;
if (!noulri && cpu_has_userlocal)
@@ -1705,6 +1990,10 @@
if (cpu_has_veic || cpu_has_vint) {
unsigned long sr = set_c0_status(ST0_BEV);
+#if defined(CONFIG_EVA) || defined(CONFIG_CPU_MIPS64_R6)
+ write_c0_ebase(ebase|MIPS_EBASE_WG);
+ back_to_back_c0_hazard();
+#endif
write_c0_ebase(ebase);
write_c0_status(sr);
/* Setting vector spacing enables EI/VI mode */
@@ -1725,7 +2014,7 @@
* o read IntCtl.IPTI to determine the timer interrupt
* o read IntCtl.IPPCI to determine the performance counter interrupt
*/
- if (cpu_has_mips_r2) {
+ if (cpu_has_mips_r2 || cpu_has_mips_r6) {
cp0_compare_irq_shift = CAUSEB_TI - CAUSEB_IP;
cp0_compare_irq = (read_c0_intctl() >> INTCTLB_IPTI) & 7;
cp0_perfcount_irq = (read_c0_intctl() >> INTCTLB_IPPCI) & 7;
@@ -1770,7 +2059,7 @@
}
/* Install CPU exception handler */
-void __cpuinit set_handler(unsigned long offset, void *addr, unsigned long size)
+void set_handler(unsigned long offset, void *addr, unsigned long size)
{
#ifdef CONFIG_CPU_MICROMIPS
memcpy((void *)(ebase + offset), ((unsigned char *)addr - 1), size);
@@ -1808,6 +2097,8 @@
__setup("rdhwr_noopt", set_rdhwr_noopt);
+extern void tlb_do_page_fault_0(void);
+
void __init trap_init(void)
{
extern char except_vec3_generic;
@@ -1829,11 +2120,11 @@
} else {
#ifdef CONFIG_KVM_GUEST
#define KVM_GUEST_KSEG0 0x40000000
- ebase = KVM_GUEST_KSEG0;
+ ebase = KVM_GUEST_KSEG0;
#else
- ebase = CKSEG0;
+ ebase = CKSEG0;
#endif
- if (cpu_has_mips_r2)
+ if (cpu_has_mips_r2 || cpu_has_mips_r6)
ebase += (read_c0_ebase() & 0x3ffff000);
}
@@ -1912,6 +2203,7 @@
set_except_vector(11, handle_cpu);
set_except_vector(12, handle_ov);
set_except_vector(13, handle_tr);
+ set_except_vector(14, handle_msa_fpe);
if (current_cpu_type() == CPU_R6000 ||
current_cpu_type() == CPU_R6000A) {
@@ -1934,6 +2226,14 @@
if (cpu_has_fpu && !cpu_has_nofpuex)
set_except_vector(15, handle_fpe);
+ set_except_vector(16, handle_ftlb);
+
+ if (cpu_has_rixi && cpu_has_rixi_except) {
+ set_except_vector(19, tlb_do_page_fault_0);
+ set_except_vector(20, tlb_do_page_fault_0);
+ }
+
+ set_except_vector(21, handle_msa);
set_except_vector(22, handle_mdmx);
if (cpu_has_mcheck)
diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c
index 203d885..a1daf5b 100644
--- a/arch/mips/kernel/unaligned.c
+++ b/arch/mips/kernel/unaligned.c
@@ -105,6 +105,747 @@
#define unaligned_action UNALIGNED_ACTION_QUIET
#endif
extern void show_registers(struct pt_regs *regs);
+asmlinkage void do_cpu(struct pt_regs *regs);
+
+#ifdef CONFIG_CPU_HAS_MSA
+void msa_to_wd(unsigned int wd, union fpureg *from);
+void msa_from_wd(unsigned int wd, union fpureg *to);
+#endif
+
+#ifdef CONFIG_EVA
+/* EVA variant */
+
+#ifdef __BIG_ENDIAN
+#define LoadHW(addr, value, res) \
+ __asm__ __volatile__ (".set\tnoat\n" \
+ "1:\tlbe\t%0, 0(%2)\n" \
+ "2:\tlbue\t$1, 1(%2)\n\t" \
+ "sll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+ "li\t%1, 0\n" \
+ "3:\t.set\tat\n\t" \
+ ".insn\n\t" \
+ ".section\t.fixup,\"ax\"\n\t" \
+ "4:\tli\t%1, %3\n\t" \
+ "j\t3b\n\t" \
+ ".previous\n\t" \
+ ".section\t__ex_table,\"a\"\n\t" \
+ STR(PTR)"\t1b, 4b\n\t" \
+ STR(PTR)"\t2b, 4b\n\t" \
+ ".previous" \
+ : "=&r" (value), "=r" (res) \
+ : "r" (addr), "i" (-EFAULT));
+
+#define LoadW(addr, value, res) \
+ __asm__ __volatile__ ( \
+ "1:\tlwle\t%0, (%2)\n" \
+ "2:\tlwre\t%0, 3(%2)\n\t" \
+ "li\t%1, 0\n" \
+ "3:\n\t" \
+ ".insn\n\t" \
+ ".section\t.fixup,\"ax\"\n\t" \
+ "4:\tli\t%1, %3\n\t" \
+ "j\t3b\n\t" \
+ ".previous\n\t" \
+ ".section\t__ex_table,\"a\"\n\t" \
+ STR(PTR)"\t1b, 4b\n\t" \
+ STR(PTR)"\t2b, 4b\n\t" \
+ ".previous" \
+ : "=&r" (value), "=r" (res) \
+ : "r" (addr), "i" (-EFAULT));
+
+#define LoadHWU(addr, value, res) \
+ __asm__ __volatile__ ( \
+ ".set\tnoat\n" \
+ "1:\tlbue\t%0, 0(%2)\n" \
+ "2:\tlbue\t$1, 1(%2)\n\t" \
+ "sll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+ "li\t%1, 0\n" \
+ "3:\n\t" \
+ ".insn\n\t" \
+ ".set\tat\n\t" \
+ ".section\t.fixup,\"ax\"\n\t" \
+ "4:\tli\t%1, %3\n\t" \
+ "j\t3b\n\t" \
+ ".previous\n\t" \
+ ".section\t__ex_table,\"a\"\n\t" \
+ STR(PTR)"\t1b, 4b\n\t" \
+ STR(PTR)"\t2b, 4b\n\t" \
+ ".previous" \
+ : "=&r" (value), "=r" (res) \
+ : "r" (addr), "i" (-EFAULT));
+
+#define LoadWU(addr, value, res) \
+ __asm__ __volatile__ ( \
+ "1:\tlwle\t%0, (%2)\n" \
+ "2:\tlwre\t%0, 3(%2)\n\t" \
+ "dsll\t%0, %0, 32\n\t" \
+ "dsrl\t%0, %0, 32\n\t" \
+ "li\t%1, 0\n" \
+ "3:\n\t" \
+ ".insn\n\t" \
+ "\t.section\t.fixup,\"ax\"\n\t" \
+ "4:\tli\t%1, %3\n\t" \
+ "j\t3b\n\t" \
+ ".previous\n\t" \
+ ".section\t__ex_table,\"a\"\n\t" \
+ STR(PTR)"\t1b, 4b\n\t" \
+ STR(PTR)"\t2b, 4b\n\t" \
+ ".previous" \
+ : "=&r" (value), "=r" (res) \
+ : "r" (addr), "i" (-EFAULT));
+
+#define StoreHW(addr, value, res) \
+ __asm__ __volatile__ ( \
+ ".set\tnoat\n" \
+ "1:\tsbe\t%1, 1(%2)\n\t" \
+ "srl\t$1, %1, 0x8\n" \
+ "2:\tsbe\t$1, 0(%2)\n\t" \
+ ".set\tat\n\t" \
+ "li\t%0, 0\n" \
+ "3:\n\t" \
+ ".insn\n\t" \
+ ".section\t.fixup,\"ax\"\n\t" \
+ "4:\tli\t%0, %3\n\t" \
+ "j\t3b\n\t" \
+ ".previous\n\t" \
+ ".section\t__ex_table,\"a\"\n\t" \
+ STR(PTR)"\t1b, 4b\n\t" \
+ STR(PTR)"\t2b, 4b\n\t" \
+ ".previous" \
+ : "=r" (res) \
+ : "r" (value), "r" (addr), "i" (-EFAULT) \
+ : "memory");
+
+#define StoreW(addr, value, res) \
+ __asm__ __volatile__ ( \
+ "1:\tswle\t%1,(%2)\n" \
+ "2:\tswre\t%1, 3(%2)\n\t" \
+ "li\t%0, 0\n" \
+ "3:\n\t" \
+ ".insn\n\t" \
+ ".section\t.fixup,\"ax\"\n\t" \
+ "4:\tli\t%0, %3\n\t" \
+ "j\t3b\n\t" \
+ ".previous\n\t" \
+ ".section\t__ex_table,\"a\"\n\t" \
+ STR(PTR)"\t1b, 4b\n\t" \
+ STR(PTR)"\t2b, 4b\n\t" \
+ ".previous" \
+ : "=r" (res) \
+ : "r" (value), "r" (addr), "i" (-EFAULT) \
+ : "memory");
+#endif
+
+#ifdef __LITTLE_ENDIAN
+#define LoadHW(addr, value, res) \
+ __asm__ __volatile__ (".set\tnoat\n" \
+ "1:\tlbe\t%0, 1(%2)\n" \
+ "2:\tlbue\t$1, 0(%2)\n\t" \
+ "sll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+ "li\t%1, 0\n" \
+ "3:\t.set\tat\n\t" \
+ ".insn\n\t" \
+ ".section\t.fixup,\"ax\"\n\t" \
+ "4:\tli\t%1, %3\n\t" \
+ "j\t3b\n\t" \
+ ".previous\n\t" \
+ ".section\t__ex_table,\"a\"\n\t" \
+ STR(PTR)"\t1b, 4b\n\t" \
+ STR(PTR)"\t2b, 4b\n\t" \
+ ".previous" \
+ : "=&r" (value), "=r" (res) \
+ : "r" (addr), "i" (-EFAULT));
+
+#define LoadW(addr, value, res) \
+ __asm__ __volatile__ ( \
+ "1:\tlwle\t%0, 3(%2)\n" \
+ "2:\tlwre\t%0, (%2)\n\t" \
+ "li\t%1, 0\n" \
+ "3:\n\t" \
+ ".insn\n\t" \
+ ".section\t.fixup,\"ax\"\n\t" \
+ "4:\tli\t%1, %3\n\t" \
+ "j\t3b\n\t" \
+ ".previous\n\t" \
+ ".section\t__ex_table,\"a\"\n\t" \
+ STR(PTR)"\t1b, 4b\n\t" \
+ STR(PTR)"\t2b, 4b\n\t" \
+ ".previous" \
+ : "=&r" (value), "=r" (res) \
+ : "r" (addr), "i" (-EFAULT));
+
+#define LoadHWU(addr, value, res) \
+ __asm__ __volatile__ ( \
+ ".set\tnoat\n" \
+ "1:\tlbue\t%0, 1(%2)\n" \
+ "2:\tlbue\t$1, 0(%2)\n\t" \
+ "sll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+ "li\t%1, 0\n" \
+ "3:\n\t" \
+ ".insn\n\t" \
+ ".set\tat\n\t" \
+ ".section\t.fixup,\"ax\"\n\t" \
+ "4:\tli\t%1, %3\n\t" \
+ "j\t3b\n\t" \
+ ".previous\n\t" \
+ ".section\t__ex_table,\"a\"\n\t" \
+ STR(PTR)"\t1b, 4b\n\t" \
+ STR(PTR)"\t2b, 4b\n\t" \
+ ".previous" \
+ : "=&r" (value), "=r" (res) \
+ : "r" (addr), "i" (-EFAULT));
+
+#define LoadWU(addr, value, res) \
+ __asm__ __volatile__ ( \
+ "1:\tlwle\t%0, 3(%2)\n" \
+ "2:\tlwre\t%0, (%2)\n\t" \
+ "dsll\t%0, %0, 32\n\t" \
+ "dsrl\t%0, %0, 32\n\t" \
+ "li\t%1, 0\n" \
+ "3:\n\t" \
+ ".insn\n\t" \
+ "\t.section\t.fixup,\"ax\"\n\t" \
+ "4:\tli\t%1, %3\n\t" \
+ "j\t3b\n\t" \
+ ".previous\n\t" \
+ ".section\t__ex_table,\"a\"\n\t" \
+ STR(PTR)"\t1b, 4b\n\t" \
+ STR(PTR)"\t2b, 4b\n\t" \
+ ".previous" \
+ : "=&r" (value), "=r" (res) \
+ : "r" (addr), "i" (-EFAULT));
+
+#define StoreHW(addr, value, res) \
+ __asm__ __volatile__ ( \
+ ".set\tnoat\n" \
+ "1:\tsbe\t%1, 0(%2)\n\t" \
+ "srl\t$1,%1, 0x8\n" \
+ "2:\tsbe\t$1, 1(%2)\n\t" \
+ ".set\tat\n\t" \
+ "li\t%0, 0\n" \
+ "3:\n\t" \
+ ".insn\n\t" \
+ ".section\t.fixup,\"ax\"\n\t" \
+ "4:\tli\t%0, %3\n\t" \
+ "j\t3b\n\t" \
+ ".previous\n\t" \
+ ".section\t__ex_table,\"a\"\n\t" \
+ STR(PTR)"\t1b, 4b\n\t" \
+ STR(PTR)"\t2b, 4b\n\t" \
+ ".previous" \
+ : "=r" (res) \
+ : "r" (value), "r" (addr), "i" (-EFAULT) \
+ : "memory");
+
+#define StoreW(addr, value, res) \
+ __asm__ __volatile__ ( \
+ "1:\tswle\t%1, 3(%2)\n" \
+ "2:\tswre\t%1, (%2)\n\t" \
+ "li\t%0, 0\n" \
+ "3:\n\t" \
+ ".insn\n\t" \
+ ".section\t.fixup,\"ax\"\n\t" \
+ "4:\tli\t%0, %3\n\t" \
+ "j\t3b\n\t" \
+ ".previous\n\t" \
+ ".section\t__ex_table,\"a\"\n\t" \
+ STR(PTR)"\t1b, 4b\n\t" \
+ STR(PTR)"\t2b, 4b\n\t" \
+ ".previous" \
+ : "=r" (res) \
+ : "r" (value), "r" (addr), "i" (-EFAULT) \
+ : "memory");
+#endif
+
+#elif defined(CONFIG_CPU_MIPSR6)
+/* non-EVA R6 variant */
+
+#ifdef __BIG_ENDIAN
+#define LoadHW(addr, value, res) \
+ __asm__ __volatile__ (".set\tnoat\n" \
+ "1:\tlb\t%0, 0(%2)\n" \
+ "2:\tlbu\t$1, 1(%2)\n\t" \
+ "sll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+ "li\t%1, 0\n" \
+ "3:\t.set\tat\n\t" \
+ ".insn\n\t" \
+ ".section\t.fixup,\"ax\"\n\t" \
+ "4:\tli\t%1, %3\n\t" \
+ "j\t3b\n\t" \
+ ".previous\n\t" \
+ ".section\t__ex_table,\"a\"\n\t" \
+ STR(PTR)"\t1b, 4b\n\t" \
+ STR(PTR)"\t2b, 4b\n\t" \
+ ".previous" \
+ : "=&r" (value), "=r" (res) \
+ : "r" (addr), "i" (-EFAULT));
+
+#define LoadW(addr, value, res) \
+ __asm__ __volatile__ ( \
+ ".set\tnoat\n" \
+ "1:\tlb\t%0, 0(%2)\n" \
+ "2:\tlbu\t$1, 1(%2)\n\t" \
+ "sll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+ "3:\tlbu\t$1, 2(%2)\n\t" \
+ "sll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+ "4:\tlbu\t$1, 3(%2)\n\t" \
+ "sll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+ "li\t%1, 0\n" \
+ ".set\tat\n" \
+ "10:\n\t" \
+ ".insn\n\t" \
+ ".section\t.fixup,\"ax\"\n\t" \
+ "11:\tli\t%1, %3\n\t" \
+ "j\t10b\n\t" \
+ ".previous\n\t" \
+ ".section\t__ex_table,\"a\"\n\t" \
+ STR(PTR)"\t1b, 11b\n\t" \
+ STR(PTR)"\t2b, 11b\n\t" \
+ STR(PTR)"\t3b, 11b\n\t" \
+ STR(PTR)"\t4b, 11b\n\t" \
+ ".previous" \
+ : "=&r" (value), "=r" (res) \
+ : "r" (addr), "i" (-EFAULT));
+
+#define LoadHWU(addr, value, res) \
+ __asm__ __volatile__ ( \
+ ".set\tnoat\n" \
+ "1:\tlbu\t%0, 0(%2)\n" \
+ "2:\tlbu\t$1, 1(%2)\n\t" \
+ "sll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+ "li\t%1, 0\n" \
+ "3:\n\t" \
+ ".insn\n\t" \
+ ".set\tat\n\t" \
+ ".section\t.fixup,\"ax\"\n\t" \
+ "4:\tli\t%1, %3\n\t" \
+ "j\t3b\n\t" \
+ ".previous\n\t" \
+ ".section\t__ex_table,\"a\"\n\t" \
+ STR(PTR)"\t1b, 4b\n\t" \
+ STR(PTR)"\t2b, 4b\n\t" \
+ ".previous" \
+ : "=&r" (value), "=r" (res) \
+ : "r" (addr), "i" (-EFAULT));
+
+#define LoadWU(addr, value, res) \
+ __asm__ __volatile__ ( \
+ ".set\tnoat\n" \
+ "1:\tlbu\t%0, 0(%2)\n" \
+ "2:\tlbu\t$1, 1(%2)\n\t" \
+ "dsll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+ "3:\tlbu\t$1, 2(%2)\n\t" \
+ "dsll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+ "4:\tlbu\t$1, 3(%2)\n\t" \
+ "dsll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+ "li\t%1, 0\n" \
+ ".set\tat\n" \
+ "10:\n\t" \
+ ".insn\n\t" \
+ ".section\t.fixup,\"ax\"\n\t" \
+ "11:\tli\t%1, %3\n\t" \
+ "j\t10b\n\t" \
+ ".previous\n\t" \
+ ".section\t__ex_table,\"a\"\n\t" \
+ STR(PTR)"\t1b, 11b\n\t" \
+ STR(PTR)"\t2b, 11b\n\t" \
+ STR(PTR)"\t3b, 11b\n\t" \
+ STR(PTR)"\t4b, 11b\n\t" \
+ ".previous" \
+ : "=&r" (value), "=r" (res) \
+ : "r" (addr), "i" (-EFAULT));
+
+#define LoadDW(addr, value, res) \
+ __asm__ __volatile__ ( \
+ ".set\tnoat\n" \
+ "1:\tlb\t%0, 0(%2)\n" \
+ "2:\tlbu\t$1, 1(%2)\n\t" \
+ "dsll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+ "3:\tlbu\t$1, 2(%2)\n\t" \
+ "dsll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+ "4:\tlbu\t$1, 3(%2)\n\t" \
+ "dsll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+ "5:\tlbu\t$1, 4(%2)\n\t" \
+ "dsll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+ "6:\tlbu\t$1, 5(%2)\n\t" \
+ "dsll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+ "7:\tlbu\t$1, 6(%2)\n\t" \
+ "dsll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+ "8:\tlbu\t$1, 7(%2)\n\t" \
+ "dsll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+ "li\t%1, 0\n" \
+ ".set\tat\n" \
+ "10:\n\t" \
+ ".insn\n\t" \
+ ".section\t.fixup,\"ax\"\n\t" \
+ "11:\tli\t%1, %3\n\t" \
+ "j\t10b\n\t" \
+ ".previous\n\t" \
+ ".section\t__ex_table,\"a\"\n\t" \
+ STR(PTR)"\t1b, 11b\n\t" \
+ STR(PTR)"\t2b, 11b\n\t" \
+ STR(PTR)"\t3b, 11b\n\t" \
+ STR(PTR)"\t4b, 11b\n\t" \
+ STR(PTR)"\t5b, 11b\n\t" \
+ STR(PTR)"\t6b, 11b\n\t" \
+ STR(PTR)"\t7b, 11b\n\t" \
+ STR(PTR)"\t8b, 11b\n\t" \
+ ".previous" \
+ : "=&r" (value), "=r" (res) \
+ : "r" (addr), "i" (-EFAULT));
+
+#define StoreHW(addr, value, res) \
+ __asm__ __volatile__ ( \
+ ".set\tnoat\n" \
+ "1:\tsb\t%1, 1(%2)\n\t" \
+ "srl\t$1, %1, 0x8\n" \
+ "2:\tsb\t$1, 0(%2)\n\t" \
+ ".set\tat\n\t" \
+ "li\t%0, 0\n" \
+ "3:\n\t" \
+ ".insn\n\t" \
+ ".section\t.fixup,\"ax\"\n\t" \
+ "4:\tli\t%0, %3\n\t" \
+ "j\t3b\n\t" \
+ ".previous\n\t" \
+ ".section\t__ex_table,\"a\"\n\t" \
+ STR(PTR)"\t1b, 4b\n\t" \
+ STR(PTR)"\t2b, 4b\n\t" \
+ ".previous" \
+ : "=r" (res) \
+ : "r" (value), "r" (addr), "i" (-EFAULT) \
+ : "memory");
+
+#define StoreW(addr, value, res) \
+ __asm__ __volatile__ ( \
+ ".set\tnoat\n" \
+ "1:\tsb\t%1, 3(%2)\n\t" \
+ "srl\t$1, %1, 0x8\n" \
+ "2:\tsb\t$1, 2(%2)\n\t" \
+ "srl\t$1, $1, 0x8\n" \
+ "3:\tsb\t$1, 1(%2)\n\t" \
+ "srl\t$1, $1, 0x8\n" \
+ "4:\tsb\t$1, 0(%2)\n\t" \
+ ".set\tat\n\t" \
+ "li\t%0, 0\n" \
+ "10:\n\t" \
+ ".insn\n\t" \
+ ".section\t.fixup,\"ax\"\n\t" \
+ "11:\tli\t%0, %3\n\t" \
+ "j\t10b\n\t" \
+ ".previous\n\t" \
+ ".section\t__ex_table,\"a\"\n\t" \
+ STR(PTR)"\t1b, 11b\n\t" \
+ STR(PTR)"\t2b, 11b\n\t" \
+ STR(PTR)"\t3b, 11b\n\t" \
+ STR(PTR)"\t4b, 11b\n\t" \
+ ".previous" \
+ : "=r" (res) \
+ : "r" (value), "r" (addr), "i" (-EFAULT) \
+ : "memory");
+
+#define StoreDW(addr, value, res) \
+ __asm__ __volatile__ ( \
+ ".set\tnoat\n" \
+ "1:\tsb\t%1, 7(%2)\n\t" \
+ "dsrl\t$1, %1, 0x8\n" \
+ "2:\tsb\t$1, 6(%2)\n\t" \
+ "dsrl\t$1, $1, 0x8\n" \
+ "3:\tsb\t$1, 5(%2)\n\t" \
+ "dsrl\t$1, $1, 0x8\n" \
+ "4:\tsb\t$1, 4(%2)\n\t" \
+ "dsrl\t$1, $1, 0x8\n" \
+ "2:\tsb\t$1, 3(%2)\n\t" \
+ "dsrl\t$1, $1, 0x8\n" \
+ "3:\tsb\t$1, 2(%2)\n\t" \
+ "dsrl\t$1, $1, 0x8\n" \
+ "4:\tsb\t$1, 1(%2)\n\t" \
+ "dsrl\t$1, $1, 0x8\n" \
+ "4:\tsb\t$1, 0(%2)\n\t" \
+ ".set\tat\n\t" \
+ "li\t%0, 0\n" \
+ "10:\n\t" \
+ ".insn\n\t" \
+ ".section\t.fixup,\"ax\"\n\t" \
+ "11:\tli\t%0, %3\n\t" \
+ "j\t10b\n\t" \
+ ".previous\n\t" \
+ ".section\t__ex_table,\"a\"\n\t" \
+ STR(PTR)"\t1b, 11b\n\t" \
+ STR(PTR)"\t2b, 11b\n\t" \
+ STR(PTR)"\t3b, 11b\n\t" \
+ STR(PTR)"\t4b, 11b\n\t" \
+ STR(PTR)"\t5b, 11b\n\t" \
+ STR(PTR)"\t6b, 11b\n\t" \
+ STR(PTR)"\t7b, 11b\n\t" \
+ STR(PTR)"\t8b, 11b\n\t" \
+ ".previous" \
+ : "=r" (res) \
+ : "r" (value), "r" (addr), "i" (-EFAULT) \
+ : "memory");
+#endif
+
+#ifdef __LITTLE_ENDIAN
+#define LoadHW(addr, value, res) \
+ __asm__ __volatile__ (".set\tnoat\n" \
+ "1:\tlb\t%0, 1(%2)\n" \
+ "2:\tlbu\t$1, 0(%2)\n\t" \
+ "sll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+ "li\t%1, 0\n" \
+ "3:\t.set\tat\n\t" \
+ ".insn\n\t" \
+ ".section\t.fixup,\"ax\"\n\t" \
+ "4:\tli\t%1, %3\n\t" \
+ "j\t3b\n\t" \
+ ".previous\n\t" \
+ ".section\t__ex_table,\"a\"\n\t" \
+ STR(PTR)"\t1b, 4b\n\t" \
+ STR(PTR)"\t2b, 4b\n\t" \
+ ".previous" \
+ : "=&r" (value), "=r" (res) \
+ : "r" (addr), "i" (-EFAULT));
+
+#define LoadW(addr, value, res) \
+ __asm__ __volatile__ ( \
+ ".set\tnoat\n" \
+ "1:\tlb\t%0, 3(%2)\n" \
+ "2:\tlbu\t$1, 2(%2)\n\t" \
+ "sll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+ "3:\tlbu\t$1, 1(%2)\n\t" \
+ "sll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+ "4:\tlbu\t$1, 0(%2)\n\t" \
+ "sll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+ "li\t%1, 0\n" \
+ ".set\tat\n" \
+ "10:\n\t" \
+ ".insn\n\t" \
+ ".section\t.fixup,\"ax\"\n\t" \
+ "11:\tli\t%1, %3\n\t" \
+ "j\t10b\n\t" \
+ ".previous\n\t" \
+ ".section\t__ex_table,\"a\"\n\t" \
+ STR(PTR)"\t1b, 11b\n\t" \
+ STR(PTR)"\t2b, 11b\n\t" \
+ STR(PTR)"\t3b, 11b\n\t" \
+ STR(PTR)"\t4b, 11b\n\t" \
+ ".previous" \
+ : "=&r" (value), "=r" (res) \
+ : "r" (addr), "i" (-EFAULT));
+
+#define LoadHWU(addr, value, res) \
+ __asm__ __volatile__ ( \
+ ".set\tnoat\n" \
+ "1:\tlbu\t%0, 1(%2)\n" \
+ "2:\tlbu\t$1, 0(%2)\n\t" \
+ "sll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+ "li\t%1, 0\n" \
+ "3:\n\t" \
+ ".insn\n\t" \
+ ".set\tat\n\t" \
+ ".section\t.fixup,\"ax\"\n\t" \
+ "4:\tli\t%1, %3\n\t" \
+ "j\t3b\n\t" \
+ ".previous\n\t" \
+ ".section\t__ex_table,\"a\"\n\t" \
+ STR(PTR)"\t1b, 4b\n\t" \
+ STR(PTR)"\t2b, 4b\n\t" \
+ ".previous" \
+ : "=&r" (value), "=r" (res) \
+ : "r" (addr), "i" (-EFAULT));
+
+#define LoadWU(addr, value, res) \
+ __asm__ __volatile__ ( \
+ ".set\tnoat\n" \
+ "1:\tlbu\t%0, 3(%2)\n" \
+ "2:\tlbu\t$1, 2(%2)\n\t" \
+ "dsll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+ "3:\tlbu\t$1, 1(%2)\n\t" \
+ "dsll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+ "4:\tlbu\t$1, 0(%2)\n\t" \
+ "dsll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+ "li\t%1, 0\n" \
+ ".set\tat\n" \
+ "10:\n\t" \
+ ".insn\n\t" \
+ ".section\t.fixup,\"ax\"\n\t" \
+ "11:\tli\t%1, %3\n\t" \
+ "j\t10b\n\t" \
+ ".previous\n\t" \
+ ".section\t__ex_table,\"a\"\n\t" \
+ STR(PTR)"\t1b, 11b\n\t" \
+ STR(PTR)"\t2b, 11b\n\t" \
+ STR(PTR)"\t3b, 11b\n\t" \
+ STR(PTR)"\t4b, 11b\n\t" \
+ ".previous" \
+ : "=&r" (value), "=r" (res) \
+ : "r" (addr), "i" (-EFAULT));
+
+#define LoadDW(addr, value, res) \
+ __asm__ __volatile__ ( \
+ ".set\tnoat\n" \
+ "1:\tlbu\t%0, 7(%2)\n" \
+ "2:\tlbu\t$1, 6(%2)\n\t" \
+ "dsll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+ "3:\tlbu\t$1, 5(%2)\n\t" \
+ "dsll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+ "4:\tlbu\t$1, 4(%2)\n\t" \
+ "dsll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+ "5:\tlbu\t$1, 3(%2)\n\t" \
+ "dsll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+ "6:\tlbu\t$1, 2(%2)\n\t" \
+ "dsll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+ "7:\tlbu\t$1, 1(%2)\n\t" \
+ "dsll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+ "8:\tlbu\t$1, 0(%2)\n\t" \
+ "dsll\t%0, 0x8\n\t" \
+ "or\t%0, $1\n\t" \
+ "li\t%1, 0\n" \
+ ".set\tat\n" \
+ "10:\n\t" \
+ ".insn\n\t" \
+ ".section\t.fixup,\"ax\"\n\t" \
+ "11:\tli\t%1, %3\n\t" \
+ "j\t10b\n\t" \
+ ".previous\n\t" \
+ ".section\t__ex_table,\"a\"\n\t" \
+ STR(PTR)"\t1b, 11b\n\t" \
+ STR(PTR)"\t2b, 11b\n\t" \
+ STR(PTR)"\t3b, 11b\n\t" \
+ STR(PTR)"\t4b, 11b\n\t" \
+ STR(PTR)"\t5b, 11b\n\t" \
+ STR(PTR)"\t6b, 11b\n\t" \
+ STR(PTR)"\t7b, 11b\n\t" \
+ STR(PTR)"\t8b, 11b\n\t" \
+ ".previous" \
+ : "=&r" (value), "=r" (res) \
+ : "r" (addr), "i" (-EFAULT));
+
+#define StoreHW(addr, value, res) \
+ __asm__ __volatile__ ( \
+ ".set\tnoat\n" \
+ "1:\tsb\t%1, 0(%2)\n\t" \
+ "srl\t$1,%1, 0x8\n" \
+ "2:\tsb\t$1, 1(%2)\n\t" \
+ ".set\tat\n\t" \
+ "li\t%0, 0\n" \
+ "3:\n\t" \
+ ".insn\n\t" \
+ ".section\t.fixup,\"ax\"\n\t" \
+ "4:\tli\t%0, %3\n\t" \
+ "j\t3b\n\t" \
+ ".previous\n\t" \
+ ".section\t__ex_table,\"a\"\n\t" \
+ STR(PTR)"\t1b, 4b\n\t" \
+ STR(PTR)"\t2b, 4b\n\t" \
+ ".previous" \
+ : "=r" (res) \
+ : "r" (value), "r" (addr), "i" (-EFAULT) \
+ : "memory");
+
+#define StoreW(addr, value, res) \
+ __asm__ __volatile__ ( \
+ ".set\tnoat\n" \
+ "1:\tsb\t%1, 0(%2)\n\t" \
+ "srl\t$1,%1, 0x8\n" \
+ "2:\tsb\t$1, 1(%2)\n\t" \
+ "srl\t$1,$1, 0x8\n" \
+ "3:\tsb\t$1, 2(%2)\n\t" \
+ "srl\t$1,$1, 0x8\n" \
+ "4:\tsb\t$1, 3(%2)\n\t" \
+ ".set\tat\n\t" \
+ "li\t%0, 0\n" \
+ "10:\n\t" \
+ ".insn\n\t" \
+ ".section\t.fixup,\"ax\"\n\t" \
+ "11:\tli\t%0, %3\n\t" \
+ "j\t10b\n\t" \
+ ".previous\n\t" \
+ ".section\t__ex_table,\"a\"\n\t" \
+ STR(PTR)"\t1b, 11b\n\t" \
+ STR(PTR)"\t2b, 11b\n\t" \
+ STR(PTR)"\t3b, 11b\n\t" \
+ STR(PTR)"\t4b, 11b\n\t" \
+ ".previous" \
+ : "=r" (res) \
+ : "r" (value), "r" (addr), "i" (-EFAULT) \
+ : "memory");
+
+#define StoreDW(addr, value, res) \
+ __asm__ __volatile__ ( \
+ ".set\tnoat\n" \
+ "1:\tsb\t%1, 0(%2)\n\t" \
+ "dsrl\t$1,%1, 0x8\n" \
+ "2:\tsb\t$1, 1(%2)\n\t" \
+ "dsrl\t$1,$1, 0x8\n" \
+ "3:\tsb\t$1, 2(%2)\n\t" \
+ "dsrl\t$1,$1, 0x8\n" \
+ "4:\tsb\t$1, 3(%2)\n\t" \
+ "dsrl\t$1,$1, 0x8\n" \
+ "5:\tsb\t$1, 4(%2)\n\t" \
+ "dsrl\t$1,$1, 0x8\n" \
+ "6:\tsb\t$1, 5(%2)\n\t" \
+ "dsrl\t$1,$1, 0x8\n" \
+ "7:\tsb\t$1, 6(%2)\n\t" \
+ "dsrl\t$1,$1, 0x8\n" \
+ "8:\tsb\t$1, 7(%2)\n\t" \
+ ".set\tat\n\t" \
+ "li\t%0, 0\n" \
+ "10:\n\t" \
+ ".insn\n\t" \
+ ".section\t.fixup,\"ax\"\n\t" \
+ "11:\tli\t%0, %3\n\t" \
+ "j\t10b\n\t" \
+ ".previous\n\t" \
+ ".section\t__ex_table,\"a\"\n\t" \
+ STR(PTR)"\t1b, 11b\n\t" \
+ STR(PTR)"\t2b, 11b\n\t" \
+ STR(PTR)"\t3b, 11b\n\t" \
+ STR(PTR)"\t4b, 11b\n\t" \
+ STR(PTR)"\t5b, 11b\n\t" \
+ STR(PTR)"\t6b, 11b\n\t" \
+ STR(PTR)"\t7b, 11b\n\t" \
+ STR(PTR)"\t8b, 11b\n\t" \
+ ".previous" \
+ : "=r" (res) \
+ : "r" (value), "r" (addr), "i" (-EFAULT) \
+ : "memory");
+#endif
+
+#else
+/* non-EVA variant */
#ifdef __BIG_ENDIAN
#define LoadHW(addr, value, res) \
@@ -224,7 +965,8 @@
STR(PTR)"\t2b, 4b\n\t" \
".previous" \
: "=r" (res) \
- : "r" (value), "r" (addr), "i" (-EFAULT));
+ : "r" (value), "r" (addr), "i" (-EFAULT) \
+ : "memory");
#define StoreW(addr, value, res) \
__asm__ __volatile__ ( \
@@ -242,7 +984,8 @@
STR(PTR)"\t2b, 4b\n\t" \
".previous" \
: "=r" (res) \
- : "r" (value), "r" (addr), "i" (-EFAULT));
+ : "r" (value), "r" (addr), "i" (-EFAULT) \
+ : "memory");
#define StoreDW(addr, value, res) \
__asm__ __volatile__ ( \
@@ -260,7 +1003,8 @@
STR(PTR)"\t2b, 4b\n\t" \
".previous" \
: "=r" (res) \
- : "r" (value), "r" (addr), "i" (-EFAULT));
+ : "r" (value), "r" (addr), "i" (-EFAULT) \
+ : "memory");
#endif
#ifdef __LITTLE_ENDIAN
@@ -381,7 +1125,8 @@
STR(PTR)"\t2b, 4b\n\t" \
".previous" \
: "=r" (res) \
- : "r" (value), "r" (addr), "i" (-EFAULT));
+ : "r" (value), "r" (addr), "i" (-EFAULT) \
+ : "memory");
#define StoreW(addr, value, res) \
__asm__ __volatile__ ( \
@@ -399,7 +1144,8 @@
STR(PTR)"\t2b, 4b\n\t" \
".previous" \
: "=r" (res) \
- : "r" (value), "r" (addr), "i" (-EFAULT));
+ : "r" (value), "r" (addr), "i" (-EFAULT) \
+ : "memory");
#define StoreDW(addr, value, res) \
__asm__ __volatile__ ( \
@@ -417,7 +1163,396 @@
STR(PTR)"\t2b, 4b\n\t" \
".previous" \
: "=r" (res) \
- : "r" (value), "r" (addr), "i" (-EFAULT));
+ : "r" (value), "r" (addr), "i" (-EFAULT) \
+ : "memory");
+#endif
+
+#endif
+
+#ifdef CONFIG_64BIT
+static inline void dmtc1(unsigned long val, unsigned reg)
+{
+ switch (reg) {
+ case 0: __asm__ __volatile__ (".set push\n.set hardfloat\ndmtc1\t%0,$0\n.set pop"::"r"(val)); break;
+ case 1: __asm__ __volatile__ (".set push\n.set hardfloat\ndmtc1\t%0,$1\n.set pop"::"r"(val)); break;
+ case 2: __asm__ __volatile__ (".set push\n.set hardfloat\ndmtc1\t%0,$2\n.set pop"::"r"(val)); break;
+ case 3: __asm__ __volatile__ (".set push\n.set hardfloat\ndmtc1\t%0,$3\n.set pop"::"r"(val)); break;
+ case 4: __asm__ __volatile__ (".set push\n.set hardfloat\ndmtc1\t%0,$4\n.set pop"::"r"(val)); break;
+ case 5: __asm__ __volatile__ (".set push\n.set hardfloat\ndmtc1\t%0,$5\n.set pop"::"r"(val)); break;
+ case 6: __asm__ __volatile__ (".set push\n.set hardfloat\ndmtc1\t%0,$6\n.set pop"::"r"(val)); break;
+ case 7: __asm__ __volatile__ (".set push\n.set hardfloat\ndmtc1\t%0,$7\n.set pop"::"r"(val)); break;
+ case 8: __asm__ __volatile__ (".set push\n.set hardfloat\ndmtc1\t%0,$8\n.set pop"::"r"(val)); break;
+ case 9: __asm__ __volatile__ (".set push\n.set hardfloat\ndmtc1\t%0,$9\n.set pop"::"r"(val)); break;
+ case 10: __asm__ __volatile__ (".set push\n.set hardfloat\ndmtc1\t%0,$10\n.set pop"::"r"(val)); break;
+ case 11: __asm__ __volatile__ (".set push\n.set hardfloat\ndmtc1\t%0,$11\n.set pop"::"r"(val)); break;
+ case 12: __asm__ __volatile__ (".set push\n.set hardfloat\ndmtc1\t%0,$12\n.set pop"::"r"(val)); break;
+ case 13: __asm__ __volatile__ (".set push\n.set hardfloat\ndmtc1\t%0,$13\n.set pop"::"r"(val)); break;
+ case 14: __asm__ __volatile__ (".set push\n.set hardfloat\ndmtc1\t%0,$14\n.set pop"::"r"(val)); break;
+ case 15: __asm__ __volatile__ (".set push\n.set hardfloat\ndmtc1\t%0,$15\n.set pop"::"r"(val)); break;
+ case 16: __asm__ __volatile__ (".set push\n.set hardfloat\ndmtc1\t%0,$16\n.set pop"::"r"(val)); break;
+ case 17: __asm__ __volatile__ (".set push\n.set hardfloat\ndmtc1\t%0,$17\n.set pop"::"r"(val)); break;
+ case 18: __asm__ __volatile__ (".set push\n.set hardfloat\ndmtc1\t%0,$18\n.set pop"::"r"(val)); break;
+ case 19: __asm__ __volatile__ (".set push\n.set hardfloat\ndmtc1\t%0,$19\n.set pop"::"r"(val)); break;
+ case 20: __asm__ __volatile__ (".set push\n.set hardfloat\ndmtc1\t%0,$20\n.set pop"::"r"(val)); break;
+ case 21: __asm__ __volatile__ (".set push\n.set hardfloat\ndmtc1\t%0,$21\n.set pop"::"r"(val)); break;
+ case 22: __asm__ __volatile__ (".set push\n.set hardfloat\ndmtc1\t%0,$22\n.set pop"::"r"(val)); break;
+ case 23: __asm__ __volatile__ (".set push\n.set hardfloat\ndmtc1\t%0,$23\n.set pop"::"r"(val)); break;
+ case 24: __asm__ __volatile__ (".set push\n.set hardfloat\ndmtc1\t%0,$24\n.set pop"::"r"(val)); break;
+ case 25: __asm__ __volatile__ (".set push\n.set hardfloat\ndmtc1\t%0,$25\n.set pop"::"r"(val)); break;
+ case 26: __asm__ __volatile__ (".set push\n.set hardfloat\ndmtc1\t%0,$26\n.set pop"::"r"(val)); break;
+ case 27: __asm__ __volatile__ (".set push\n.set hardfloat\ndmtc1\t%0,$27\n.set pop"::"r"(val)); break;
+ case 28: __asm__ __volatile__ (".set push\n.set hardfloat\ndmtc1\t%0,$28\n.set pop"::"r"(val)); break;
+ case 29: __asm__ __volatile__ (".set push\n.set hardfloat\ndmtc1\t%0,$29\n.set pop"::"r"(val)); break;
+ case 30: __asm__ __volatile__ (".set push\n.set hardfloat\ndmtc1\t%0,$30\n.set pop"::"r"(val)); break;
+ case 31: __asm__ __volatile__ (".set push\n.set hardfloat\ndmtc1\t%0,$31\n.set pop"::"r"(val)); break;
+ }
+}
+
+static inline unsigned long dmfc1(unsigned reg)
+{
+ unsigned long uninitialized_var(val);
+
+ switch (reg) {
+ case 0: __asm__ __volatile__ (".set push\n.set hardfloat\ndmfc1\t%0,$0\n.set pop":"=r"(val)); break;
+ case 1: __asm__ __volatile__ (".set push\n.set hardfloat\ndmfc1\t%0,$1\n.set pop":"=r"(val)); break;
+ case 2: __asm__ __volatile__ (".set push\n.set hardfloat\ndmfc1\t%0,$2\n.set pop":"=r"(val)); break;
+ case 3: __asm__ __volatile__ (".set push\n.set hardfloat\ndmfc1\t%0,$3\n.set pop":"=r"(val)); break;
+ case 4: __asm__ __volatile__ (".set push\n.set hardfloat\ndmfc1\t%0,$4\n.set pop":"=r"(val)); break;
+ case 5: __asm__ __volatile__ (".set push\n.set hardfloat\ndmfc1\t%0,$5\n.set pop":"=r"(val)); break;
+ case 6: __asm__ __volatile__ (".set push\n.set hardfloat\ndmfc1\t%0,$6\n.set pop":"=r"(val)); break;
+ case 7: __asm__ __volatile__ (".set push\n.set hardfloat\ndmfc1\t%0,$7\n.set pop":"=r"(val)); break;
+ case 8: __asm__ __volatile__ (".set push\n.set hardfloat\ndmfc1\t%0,$8\n.set pop":"=r"(val)); break;
+ case 9: __asm__ __volatile__ (".set push\n.set hardfloat\ndmfc1\t%0,$9\n.set pop":"=r"(val)); break;
+ case 10: __asm__ __volatile__ (".set push\n.set hardfloat\ndmfc1\t%0,$10\n.set pop":"=r"(val)); break;
+ case 11: __asm__ __volatile__ (".set push\n.set hardfloat\ndmfc1\t%0,$11\n.set pop":"=r"(val)); break;
+ case 12: __asm__ __volatile__ (".set push\n.set hardfloat\ndmfc1\t%0,$12\n.set pop":"=r"(val)); break;
+ case 13: __asm__ __volatile__ (".set push\n.set hardfloat\ndmfc1\t%0,$13\n.set pop":"=r"(val)); break;
+ case 14: __asm__ __volatile__ (".set push\n.set hardfloat\ndmfc1\t%0,$14\n.set pop":"=r"(val)); break;
+ case 15: __asm__ __volatile__ (".set push\n.set hardfloat\ndmfc1\t%0,$15\n.set pop":"=r"(val)); break;
+ case 16: __asm__ __volatile__ (".set push\n.set hardfloat\ndmfc1\t%0,$16\n.set pop":"=r"(val)); break;
+ case 17: __asm__ __volatile__ (".set push\n.set hardfloat\ndmfc1\t%0,$17\n.set pop":"=r"(val)); break;
+ case 18: __asm__ __volatile__ (".set push\n.set hardfloat\ndmfc1\t%0,$18\n.set pop":"=r"(val)); break;
+ case 19: __asm__ __volatile__ (".set push\n.set hardfloat\ndmfc1\t%0,$19\n.set pop":"=r"(val)); break;
+ case 20: __asm__ __volatile__ (".set push\n.set hardfloat\ndmfc1\t%0,$20\n.set pop":"=r"(val)); break;
+ case 21: __asm__ __volatile__ (".set push\n.set hardfloat\ndmfc1\t%0,$21\n.set pop":"=r"(val)); break;
+ case 22: __asm__ __volatile__ (".set push\n.set hardfloat\ndmfc1\t%0,$22\n.set pop":"=r"(val)); break;
+ case 23: __asm__ __volatile__ (".set push\n.set hardfloat\ndmfc1\t%0,$23\n.set pop":"=r"(val)); break;
+ case 24: __asm__ __volatile__ (".set push\n.set hardfloat\ndmfc1\t%0,$24\n.set pop":"=r"(val)); break;
+ case 25: __asm__ __volatile__ (".set push\n.set hardfloat\ndmfc1\t%0,$25\n.set pop":"=r"(val)); break;
+ case 26: __asm__ __volatile__ (".set push\n.set hardfloat\ndmfc1\t%0,$26\n.set pop":"=r"(val)); break;
+ case 27: __asm__ __volatile__ (".set push\n.set hardfloat\ndmfc1\t%0,$27\n.set pop":"=r"(val)); break;
+ case 28: __asm__ __volatile__ (".set push\n.set hardfloat\ndmfc1\t%0,$28\n.set pop":"=r"(val)); break;
+ case 29: __asm__ __volatile__ (".set push\n.set hardfloat\ndmfc1\t%0,$29\n.set pop":"=r"(val)); break;
+ case 30: __asm__ __volatile__ (".set push\n.set hardfloat\ndmfc1\t%0,$30\n.set pop":"=r"(val)); break;
+ case 31: __asm__ __volatile__ (".set push\n.set hardfloat\ndmfc1\t%0,$31\n.set pop":"=r"(val)); break;
+ }
+
+ return val;
+}
+#else /* !CONFIG_64BIT */
+
+#ifndef CONFIG_CPU_MIPSR1
+static inline void mtc1_mthc1(unsigned long val, unsigned long val2, unsigned reg)
+{
+ switch (reg) {
+#ifdef __BIG_ENDIAN
+ case 0: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$0\n\tmthc1\t%1,$0\n.set pop"::"r"(val2),"r"(val)); break;
+ case 1: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$1\n\tmthc1\t%1,$1\n.set pop"::"r"(val2),"r"(val)); break;
+ case 2: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$2\n\tmthc1\t%1,$2\n.set pop"::"r"(val2),"r"(val)); break;
+ case 3: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$3\n\tmthc1\t%1,$3\n.set pop"::"r"(val2),"r"(val)); break;
+ case 4: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$4\n\tmthc1\t%1,$4\n.set pop"::"r"(val2),"r"(val)); break;
+ case 5: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$5\n\tmthc1\t%1,$5\n.set pop"::"r"(val2),"r"(val)); break;
+ case 6: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$6\n\tmthc1\t%1,$6\n.set pop"::"r"(val2),"r"(val)); break;
+ case 7: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$7\n\tmthc1\t%1,$7\n.set pop"::"r"(val2),"r"(val)); break;
+ case 8: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$8\n\tmthc1\t%1,$8\n.set pop"::"r"(val2),"r"(val)); break;
+ case 9: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$9\n\tmthc1\t%1,$9\n.set pop"::"r"(val2),"r"(val)); break;
+ case 10: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$10\n\tmthc1\t%1,$10\n.set pop"::"r"(val2),"r"(val)); break;
+ case 11: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$11\n\tmthc1\t%1,$11\n.set pop"::"r"(val2),"r"(val)); break;
+ case 12: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$12\n\tmthc1\t%1,$12\n.set pop"::"r"(val2),"r"(val)); break;
+ case 13: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$13\n\tmthc1\t%1,$13\n.set pop"::"r"(val2),"r"(val)); break;
+ case 14: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$14\n\tmthc1\t%1,$14\n.set pop"::"r"(val2),"r"(val)); break;
+ case 15: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$15\n\tmthc1\t%1,$15\n.set pop"::"r"(val2),"r"(val)); break;
+ case 16: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$16\n\tmthc1\t%1,$16\n.set pop"::"r"(val2),"r"(val)); break;
+ case 17: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$17\n\tmthc1\t%1,$17\n.set pop"::"r"(val2),"r"(val)); break;
+ case 18: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$18\n\tmthc1\t%1,$18\n.set pop"::"r"(val2),"r"(val)); break;
+ case 19: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$19\n\tmthc1\t%1,$19\n.set pop"::"r"(val2),"r"(val)); break;
+ case 20: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$20\n\tmthc1\t%1,$20\n.set pop"::"r"(val2),"r"(val)); break;
+ case 21: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$21\n\tmthc1\t%1,$21\n.set pop"::"r"(val2),"r"(val)); break;
+ case 22: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$22\n\tmthc1\t%1,$22\n.set pop"::"r"(val2),"r"(val)); break;
+ case 23: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$23\n\tmthc1\t%1,$23\n.set pop"::"r"(val2),"r"(val)); break;
+ case 24: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$24\n\tmthc1\t%1,$24\n.set pop"::"r"(val2),"r"(val)); break;
+ case 25: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$25\n\tmthc1\t%1,$25\n.set pop"::"r"(val2),"r"(val)); break;
+ case 26: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$26\n\tmthc1\t%1,$26\n.set pop"::"r"(val2),"r"(val)); break;
+ case 27: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$27\n\tmthc1\t%1,$27\n.set pop"::"r"(val2),"r"(val)); break;
+ case 28: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$28\n\tmthc1\t%1,$28\n.set pop"::"r"(val2),"r"(val)); break;
+ case 29: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$29\n\tmthc1\t%1,$29\n.set pop"::"r"(val2),"r"(val)); break;
+ case 30: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$30\n\tmthc1\t%1,$30\n.set pop"::"r"(val2),"r"(val)); break;
+ case 31: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$31\n\tmthc1\t%1,$31\n.set pop"::"r"(val2),"r"(val)); break;
+ }
+#endif
+#ifdef __LITTLE_ENDIAN
+ case 0: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$0\n\tmthc1\t%1,$0\n.set pop"::"r"(val),"r"(val2)); break;
+ case 1: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$1\n\tmthc1\t%1,$1\n.set pop"::"r"(val),"r"(val2)); break;
+ case 2: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$2\n\tmthc1\t%1,$2\n.set pop"::"r"(val),"r"(val2)); break;
+ case 3: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$3\n\tmthc1\t%1,$3\n.set pop"::"r"(val),"r"(val2)); break;
+ case 4: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$4\n\tmthc1\t%1,$4\n.set pop"::"r"(val),"r"(val2)); break;
+ case 5: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$5\n\tmthc1\t%1,$5\n.set pop"::"r"(val),"r"(val2)); break;
+ case 6: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$6\n\tmthc1\t%1,$6\n.set pop"::"r"(val),"r"(val2)); break;
+ case 7: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$7\n\tmthc1\t%1,$7\n.set pop"::"r"(val),"r"(val2)); break;
+ case 8: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$8\n\tmthc1\t%1,$8\n.set pop"::"r"(val),"r"(val2)); break;
+ case 9: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$9\n\tmthc1\t%1,$9\n.set pop"::"r"(val),"r"(val2)); break;
+ case 10: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$10\n\tmthc1\t%1,$10\n.set pop"::"r"(val),"r"(val2)); break;
+ case 11: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$11\n\tmthc1\t%1,$11\n.set pop"::"r"(val),"r"(val2)); break;
+ case 12: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$12\n\tmthc1\t%1,$12\n.set pop"::"r"(val),"r"(val2)); break;
+ case 13: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$13\n\tmthc1\t%1,$13\n.set pop"::"r"(val),"r"(val2)); break;
+ case 14: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$14\n\tmthc1\t%1,$14\n.set pop"::"r"(val),"r"(val2)); break;
+ case 15: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$15\n\tmthc1\t%1,$15\n.set pop"::"r"(val),"r"(val2)); break;
+ case 16: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$16\n\tmthc1\t%1,$16\n.set pop"::"r"(val),"r"(val2)); break;
+ case 17: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$17\n\tmthc1\t%1,$17\n.set pop"::"r"(val),"r"(val2)); break;
+ case 18: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$18\n\tmthc1\t%1,$18\n.set pop"::"r"(val),"r"(val2)); break;
+ case 19: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$19\n\tmthc1\t%1,$19\n.set pop"::"r"(val),"r"(val2)); break;
+ case 20: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$20\n\tmthc1\t%1,$20\n.set pop"::"r"(val),"r"(val2)); break;
+ case 21: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$21\n\tmthc1\t%1,$21\n.set pop"::"r"(val),"r"(val2)); break;
+ case 22: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$22\n\tmthc1\t%1,$22\n.set pop"::"r"(val),"r"(val2)); break;
+ case 23: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$23\n\tmthc1\t%1,$23\n.set pop"::"r"(val),"r"(val2)); break;
+ case 24: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$24\n\tmthc1\t%1,$24\n.set pop"::"r"(val),"r"(val2)); break;
+ case 25: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$25\n\tmthc1\t%1,$25\n.set pop"::"r"(val),"r"(val2)); break;
+ case 26: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$26\n\tmthc1\t%1,$26\n.set pop"::"r"(val),"r"(val2)); break;
+ case 27: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$27\n\tmthc1\t%1,$27\n.set pop"::"r"(val),"r"(val2)); break;
+ case 28: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$28\n\tmthc1\t%1,$28\n.set pop"::"r"(val),"r"(val2)); break;
+ case 29: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$29\n\tmthc1\t%1,$29\n.set pop"::"r"(val),"r"(val2)); break;
+ case 30: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$30\n\tmthc1\t%1,$30\n.set pop"::"r"(val),"r"(val2)); break;
+ case 31: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$31\n\tmthc1\t%1,$31\n.set pop"::"r"(val),"r"(val2)); break;
+ }
+#endif
+}
+
+static inline void mfc1_mfhc1(unsigned long *val, unsigned long *val2, unsigned reg)
+{
+ unsigned long uninitialized_var(lval), uninitialized_var(lval2);
+
+ switch (reg) {
+#ifdef __BIG_ENDIAN
+ case 0: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$0\n\tmfhc1\t%1,$0\n.set pop":"=r"(lval2),"=r"(lval)); break;
+ case 1: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$1\n\tmfhc1\t%1,$1\n.set pop":"=r"(lval2),"=r"(lval)); break;
+ case 2: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$2\n\tmfhc1\t%1,$2\n.set pop":"=r"(lval2),"=r"(lval)); break;
+ case 3: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$3\n\tmfhc1\t%1,$3\n.set pop":"=r"(lval2),"=r"(lval)); break;
+ case 4: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$4\n\tmfhc1\t%1,$4\n.set pop":"=r"(lval2),"=r"(lval)); break;
+ case 5: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$5\n\tmfhc1\t%1,$5\n.set pop":"=r"(lval2),"=r"(lval)); break;
+ case 6: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$6\n\tmfhc1\t%1,$6\n.set pop":"=r"(lval2),"=r"(lval)); break;
+ case 7: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$7\n\tmfhc1\t%1,$7\n.set pop":"=r"(lval2),"=r"(lval)); break;
+ case 8: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$8\n\tmfhc1\t%1,$8\n.set pop":"=r"(lval2),"=r"(lval)); break;
+ case 9: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$9\n\tmfhc1\t%1,$9\n.set pop":"=r"(lval2),"=r"(lval)); break;
+ case 10: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$10\n\tmfhc1\t%1,$10\n.set pop":"=r"(lval2),"=r"(lval)); break;
+ case 11: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$11\n\tmfhc1\t%1,$11\n.set pop":"=r"(lval2),"=r"(lval)); break;
+ case 12: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$12\n\tmfhc1\t%1,$12\n.set pop":"=r"(lval2),"=r"(lval)); break;
+ case 13: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$13\n\tmfhc1\t%1,$13\n.set pop":"=r"(lval2),"=r"(lval)); break;
+ case 14: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$14\n\tmfhc1\t%1,$14\n.set pop":"=r"(lval2),"=r"(lval)); break;
+ case 15: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$15\n\tmfhc1\t%1,$15\n.set pop":"=r"(lval2),"=r"(lval)); break;
+ case 16: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$16\n\tmfhc1\t%1,$16\n.set pop":"=r"(lval2),"=r"(lval)); break;
+ case 17: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$17\n\tmfhc1\t%1,$17\n.set pop":"=r"(lval2),"=r"(lval)); break;
+ case 18: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$18\n\tmfhc1\t%1,$18\n.set pop":"=r"(lval2),"=r"(lval)); break;
+ case 19: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$19\n\tmfhc1\t%1,$19\n.set pop":"=r"(lval2),"=r"(lval)); break;
+ case 20: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$20\n\tmfhc1\t%1,$20\n.set pop":"=r"(lval2),"=r"(lval)); break;
+ case 21: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$21\n\tmfhc1\t%1,$21\n.set pop":"=r"(lval2),"=r"(lval)); break;
+ case 22: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$22\n\tmfhc1\t%1,$22\n.set pop":"=r"(lval2),"=r"(lval)); break;
+ case 23: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$23\n\tmfhc1\t%1,$23\n.set pop":"=r"(lval2),"=r"(lval)); break;
+ case 24: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$24\n\tmfhc1\t%1,$24\n.set pop":"=r"(lval2),"=r"(lval)); break;
+ case 25: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$25\n\tmfhc1\t%1,$25\n.set pop":"=r"(lval2),"=r"(lval)); break;
+ case 26: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$26\n\tmfhc1\t%1,$26\n.set pop":"=r"(lval2),"=r"(lval)); break;
+ case 27: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$27\n\tmfhc1\t%1,$27\n.set pop":"=r"(lval2),"=r"(lval)); break;
+ case 28: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$28\n\tmfhc1\t%1,$28\n.set pop":"=r"(lval2),"=r"(lval)); break;
+ case 29: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$29\n\tmfhc1\t%1,$29\n.set pop":"=r"(lval2),"=r"(lval)); break;
+ case 30: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$30\n\tmfhc1\t%1,$30\n.set pop":"=r"(lval2),"=r"(lval)); break;
+ case 31: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$31\n\tmfhc1\t%1,$31\n.set pop":"=r"(lval2),"=r"(lval)); break;
+#endif
+#ifdef __LITTLE_ENDIAN
+ case 0: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$0\n\tmfhc1\t%1,$0\n.set pop":"=r"(lval),"=r"(lval2)); break;
+ case 1: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$1\n\tmfhc1\t%1,$1\n.set pop":"=r"(lval),"=r"(lval2)); break;
+ case 2: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$2\n\tmfhc1\t%1,$2\n.set pop":"=r"(lval),"=r"(lval2)); break;
+ case 3: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$3\n\tmfhc1\t%1,$3\n.set pop":"=r"(lval),"=r"(lval2)); break;
+ case 4: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$4\n\tmfhc1\t%1,$4\n.set pop":"=r"(lval),"=r"(lval2)); break;
+ case 5: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$5\n\tmfhc1\t%1,$5\n.set pop":"=r"(lval),"=r"(lval2)); break;
+ case 6: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$6\n\tmfhc1\t%1,$6\n.set pop":"=r"(lval),"=r"(lval2)); break;
+ case 7: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$7\n\tmfhc1\t%1,$7\n.set pop":"=r"(lval),"=r"(lval2)); break;
+ case 8: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$8\n\tmfhc1\t%1,$8\n.set pop":"=r"(lval),"=r"(lval2)); break;
+ case 9: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$9\n\tmfhc1\t%1,$9\n.set pop":"=r"(lval),"=r"(lval2)); break;
+ case 10: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$10\n\tmfhc1\t%1,$10\n.set pop":"=r"(lval),"=r"(lval2)); break;
+ case 11: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$11\n\tmfhc1\t%1,$11\n.set pop":"=r"(lval),"=r"(lval2)); break;
+ case 12: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$12\n\tmfhc1\t%1,$12\n.set pop":"=r"(lval),"=r"(lval2)); break;
+ case 13: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$13\n\tmfhc1\t%1,$13\n.set pop":"=r"(lval),"=r"(lval2)); break;
+ case 14: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$14\n\tmfhc1\t%1,$14\n.set pop":"=r"(lval),"=r"(lval2)); break;
+ case 15: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$15\n\tmfhc1\t%1,$15\n.set pop":"=r"(lval),"=r"(lval2)); break;
+ case 16: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$16\n\tmfhc1\t%1,$16\n.set pop":"=r"(lval),"=r"(lval2)); break;
+ case 17: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$17\n\tmfhc1\t%1,$17\n.set pop":"=r"(lval),"=r"(lval2)); break;
+ case 18: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$18\n\tmfhc1\t%1,$18\n.set pop":"=r"(lval),"=r"(lval2)); break;
+ case 19: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$19\n\tmfhc1\t%1,$19\n.set pop":"=r"(lval),"=r"(lval2)); break;
+ case 20: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$20\n\tmfhc1\t%1,$20\n.set pop":"=r"(lval),"=r"(lval2)); break;
+ case 21: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$21\n\tmfhc1\t%1,$21\n.set pop":"=r"(lval),"=r"(lval2)); break;
+ case 22: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$22\n\tmfhc1\t%1,$22\n.set pop":"=r"(lval),"=r"(lval2)); break;
+ case 23: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$23\n\tmfhc1\t%1,$23\n.set pop":"=r"(lval),"=r"(lval2)); break;
+ case 24: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$24\n\tmfhc1\t%1,$24\n.set pop":"=r"(lval),"=r"(lval2)); break;
+ case 25: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$25\n\tmfhc1\t%1,$25\n.set pop":"=r"(lval),"=r"(lval2)); break;
+ case 26: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$26\n\tmfhc1\t%1,$26\n.set pop":"=r"(lval),"=r"(lval2)); break;
+ case 27: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$27\n\tmfhc1\t%1,$27\n.set pop":"=r"(lval),"=r"(lval2)); break;
+ case 28: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$28\n\tmfhc1\t%1,$28\n.set pop":"=r"(lval),"=r"(lval2)); break;
+ case 29: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$29\n\tmfhc1\t%1,$29\n.set pop":"=r"(lval),"=r"(lval2)); break;
+ case 30: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$30\n\tmfhc1\t%1,$30\n.set pop":"=r"(lval),"=r"(lval2)); break;
+ case 31: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$31\n\tmfhc1\t%1,$31\n.set pop":"=r"(lval),"=r"(lval2)); break;
+#endif
+ }
+ *val = lval;
+ *val2 = lval2;
+}
+#endif /* CONFIG_CPU_MIPSR1 */
+#endif /* CONFIG_64BIT */
+
+static inline void mtc1_pair(unsigned long val, unsigned long val2, unsigned reg)
+{
+ switch (reg & ~0x1) {
+#ifdef __BIG_ENDIAN
+ case 0: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$0\n\tmtc1\t%1,$1\n.set pop"::"r"(val2),"r"(val)); break;
+ case 2: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$2\n\tmtc1\t%1,$3\n.set pop"::"r"(val2),"r"(val)); break;
+ case 4: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$4\n\tmtc1\t%1,$5\n.set pop"::"r"(val2),"r"(val)); break;
+ case 6: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$6\n\tmtc1\t%1,$7\n.set pop"::"r"(val2),"r"(val)); break;
+ case 8: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$8\n\tmtc1\t%1,$9\n.set pop"::"r"(val2),"r"(val)); break;
+ case 10: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$10\n\tmtc1\t%1,$11\n.set pop"::"r"(val2),"r"(val)); break;
+ case 12: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$12\n\tmtc1\t%1,$13\n.set pop"::"r"(val2),"r"(val)); break;
+ case 14: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$14\n\tmtc1\t%1,$15\n.set pop"::"r"(val2),"r"(val)); break;
+ case 16: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$16\n\tmtc1\t%1,$17\n.set pop"::"r"(val2),"r"(val)); break;
+ case 18: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$18\n\tmtc1\t%1,$19\n.set pop"::"r"(val2),"r"(val)); break;
+ case 20: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$20\n\tmtc1\t%1,$21\n.set pop"::"r"(val2),"r"(val)); break;
+ case 22: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$22\n\tmtc1\t%1,$23\n.set pop"::"r"(val2),"r"(val)); break;
+ case 24: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$24\n\tmtc1\t%1,$25\n.set pop"::"r"(val2),"r"(val)); break;
+ case 26: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$26\n\tmtc1\t%1,$27\n.set pop"::"r"(val2),"r"(val)); break;
+ case 28: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$28\n\tmtc1\t%1,$29\n.set pop"::"r"(val2),"r"(val)); break;
+ case 30: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$30\n\tmtc1\t%1,$31\n.set pop"::"r"(val2),"r"(val)); break;
+#endif
+#ifdef __LITTLE_ENDIAN
+ case 0: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$0\n\tmtc1\t%1,$1\n.set pop"::"r"(val),"r"(val2)); break;
+ case 2: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$2\n\tmtc1\t%1,$3\n.set pop"::"r"(val),"r"(val2)); break;
+ case 4: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$4\n\tmtc1\t%1,$5\n.set pop"::"r"(val),"r"(val2)); break;
+ case 6: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$6\n\tmtc1\t%1,$7\n.set pop"::"r"(val),"r"(val2)); break;
+ case 8: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$8\n\tmtc1\t%1,$9\n.set pop"::"r"(val),"r"(val2)); break;
+ case 10: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$10\n\tmtc1\t%1,$11\n.set pop"::"r"(val),"r"(val2)); break;
+ case 12: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$12\n\tmtc1\t%1,$13\n.set pop"::"r"(val),"r"(val2)); break;
+ case 14: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$14\n\tmtc1\t%1,$15\n.set pop"::"r"(val),"r"(val2)); break;
+ case 16: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$16\n\tmtc1\t%1,$17\n.set pop"::"r"(val),"r"(val2)); break;
+ case 18: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$18\n\tmtc1\t%1,$19\n.set pop"::"r"(val),"r"(val2)); break;
+ case 20: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$20\n\tmtc1\t%1,$21\n.set pop"::"r"(val),"r"(val2)); break;
+ case 22: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$22\n\tmtc1\t%1,$23\n.set pop"::"r"(val),"r"(val2)); break;
+ case 24: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$24\n\tmtc1\t%1,$25\n.set pop"::"r"(val),"r"(val2)); break;
+ case 26: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$26\n\tmtc1\t%1,$27\n.set pop"::"r"(val),"r"(val2)); break;
+ case 28: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$28\n\tmtc1\t%1,$29\n.set pop"::"r"(val),"r"(val2)); break;
+ case 30: __asm__ __volatile__ (".set push\n.set hardfloat\nmtc1\t%0,$30\n\tmtc1\t%1,$31\n.set pop"::"r"(val),"r"(val2)); break;
+#endif
+ }
+}
+
+static inline void mfc1_pair(unsigned long *val, unsigned long *val2, unsigned reg)
+{
+ unsigned long uninitialized_var(lval), uninitialized_var(lval2);
+
+ switch (reg & ~0x1) {
+#ifdef __BIG_ENDIAN
+ case 0: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$0\n\tmfc1\t%1,$1\n.set pop":"=r"(lval2),"=r"(lval)); break;
+ case 2: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$2\n\tmfc1\t%1,$3\n.set pop":"=r"(lval2),"=r"(lval)); break;
+ case 4: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$4\n\tmfc1\t%1,$5\n.set pop":"=r"(lval2),"=r"(lval)); break;
+ case 6: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$6\n\tmfc1\t%1,$7\n.set pop":"=r"(lval2),"=r"(lval)); break;
+ case 8: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$8\n\tmfc1\t%1,$9\n.set pop":"=r"(lval2),"=r"(lval)); break;
+ case 10: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$10\n\tmfc1\t%1,$11\n.set pop":"=r"(lval2),"=r"(lval)); break;
+ case 12: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$12\n\tmfc1\t%1,$13\n.set pop":"=r"(lval2),"=r"(lval)); break;
+ case 14: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$14\n\tmfc1\t%1,$15\n.set pop":"=r"(lval2),"=r"(lval)); break;
+ case 16: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$16\n\tmfc1\t%1,$17\n.set pop":"=r"(lval2),"=r"(lval)); break;
+ case 18: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$18\n\tmfc1\t%1,$19\n.set pop":"=r"(lval2),"=r"(lval)); break;
+ case 20: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$20\n\tmfc1\t%1,$21\n.set pop":"=r"(lval2),"=r"(lval)); break;
+ case 22: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$22\n\tmfc1\t%1,$23\n.set pop":"=r"(lval2),"=r"(lval)); break;
+ case 24: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$24\n\tmfc1\t%1,$25\n.set pop":"=r"(lval2),"=r"(lval)); break;
+ case 26: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$26\n\tmfc1\t%1,$27\n.set pop":"=r"(lval2),"=r"(lval)); break;
+ case 28: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$28\n\tmfc1\t%1,$29\n.set pop":"=r"(lval2),"=r"(lval)); break;
+ case 30: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$30\n\tmfc1\t%1,$31\n.set pop":"=r"(lval2),"=r"(lval)); break;
+#endif
+#ifdef __LITTLE_ENDIAN
+ case 0: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$0\n\tmfc1\t%1,$1\n.set pop":"=r"(lval),"=r"(lval2)); break;
+ case 2: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$2\n\tmfc1\t%1,$3\n.set pop":"=r"(lval),"=r"(lval2)); break;
+ case 4: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$4\n\tmfc1\t%1,$5\n.set pop":"=r"(lval),"=r"(lval2)); break;
+ case 6: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$6\n\tmfc1\t%1,$7\n.set pop":"=r"(lval),"=r"(lval2)); break;
+ case 8: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$8\n\tmfc1\t%1,$9\n.set pop":"=r"(lval),"=r"(lval2)); break;
+ case 10: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$10\n\tmfc1\t%1,$11\n.set pop":"=r"(lval),"=r"(lval2)); break;
+ case 12: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$12\n\tmfc1\t%1,$13\n.set pop":"=r"(lval),"=r"(lval2)); break;
+ case 14: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$14\n\tmfc1\t%1,$15\n.set pop":"=r"(lval),"=r"(lval2)); break;
+ case 16: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$16\n\tmfc1\t%1,$17\n.set pop":"=r"(lval),"=r"(lval2)); break;
+ case 18: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$18\n\tmfc1\t%1,$19\n.set pop":"=r"(lval),"=r"(lval2)); break;
+ case 20: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$20\n\tmfc1\t%1,$21\n.set pop":"=r"(lval),"=r"(lval2)); break;
+ case 22: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$22\n\tmfc1\t%1,$23\n.set pop":"=r"(lval),"=r"(lval2)); break;
+ case 24: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$24\n\tmfc1\t%1,$25\n.set pop":"=r"(lval),"=r"(lval2)); break;
+ case 26: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$26\n\tmfc1\t%1,$27\n.set pop":"=r"(lval),"=r"(lval2)); break;
+ case 28: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$28\n\tmfc1\t%1,$29\n.set pop":"=r"(lval),"=r"(lval2)); break;
+ case 30: __asm__ __volatile__ (".set push\n.set hardfloat\nmfc1\t%0,$30\n\tmfc1\t%1,$31\n.set pop":"=r"(lval),"=r"(lval2)); break;
+#endif
+ }
+ *val = lval;
+ *val2 = lval2;
+}
+
+#ifdef CONFIG_CPU_HAS_MSA
+#ifdef __BIG_ENDIAN
+/*
+ * MSA data format conversion.
+ * Only for BIG ENDIAN - LITTLE ENDIAN has register format which matches memory
+ * layout contiguously.
+ *
+ * Conversion is done between two Double words and other formats (W/H/B)
+ * because kernel uses LD.D and ST.D to load/store MSA registers and keeps
+ * MSA registers in this format in current->thread.fpu.fpr
+ */
+static void msa_convert(union fpureg *to, union fpureg *from, int fmt)
+{
+ switch (fmt) {
+ case 0: /* byte */
+ to->val8[0] = from->val8[7];
+ to->val8[1] = from->val8[6];
+ to->val8[2] = from->val8[5];
+ to->val8[3] = from->val8[4];
+ to->val8[4] = from->val8[3];
+ to->val8[5] = from->val8[2];
+ to->val8[6] = from->val8[1];
+ to->val8[7] = from->val8[0];
+ to->val8[8] = from->val8[15];
+ to->val8[9] = from->val8[14];
+ to->val8[10] = from->val8[13];
+ to->val8[11] = from->val8[12];
+ to->val8[12] = from->val8[11];
+ to->val8[13] = from->val8[10];
+ to->val8[14] = from->val8[9];
+ to->val8[15] = from->val8[8];
+ break;
+
+ case 1: /* halfword */
+ to->val16[0] = from->val16[3];
+ to->val16[1] = from->val16[2];
+ to->val16[2] = from->val16[1];
+ to->val16[3] = from->val16[0];
+ to->val16[4] = from->val16[7];
+ to->val16[5] = from->val16[6];
+ to->val16[6] = from->val16[5];
+ to->val16[7] = from->val16[4];
+ break;
+
+ case 2: /* word */
+ to->val32[0] = from->val32[1];
+ to->val32[1] = from->val32[0];
+ to->val32[2] = from->val32[3];
+ to->val32[3] = from->val32[2];
+ break;
+
+ case 3: /* doubleword, no conversion */
+ to->val64[0] = from->val64[0];
+ to->val64[1] = from->val64[1];
+ break;
+ }
+}
+#endif
#endif
static void emulate_load_store_insn(struct pt_regs *regs,
@@ -429,6 +1564,13 @@
unsigned long origpc;
unsigned long orig31;
void __user *fault_addr = NULL;
+#ifdef CONFIG_EVA
+ mm_segment_t seg;
+#endif
+#ifdef CONFIG_CPU_HAS_MSA
+ union fpureg msadatabase[2], *msadata;
+ unsigned int func, df, rs, wd;
+#endif
origpc = (unsigned long)pc;
orig31 = regs->regs[31];
@@ -474,6 +1616,98 @@
* The remaining opcodes are the ones that are really of
* interest.
*/
+#ifdef CONFIG_EVA
+ case spec3_op:
+
+ /* we can land here only from kernel accessing USER,
+ so - set user space, temporary for verification */
+ seg = get_fs();
+ set_fs(USER_DS);
+
+ switch (insn.spec3_format.ls_func) {
+
+ case lhe_op:
+ if (!access_ok(VERIFY_READ, addr, 2)) {
+ set_fs(seg);
+ goto sigbus;
+ }
+
+ LoadHW(addr, value, res);
+ if (res) {
+ set_fs(seg);
+ goto fault;
+ }
+ compute_return_epc(regs);
+ regs->regs[insn.spec3_format.rt] = value;
+ break;
+
+ case lwe_op:
+ if (!access_ok(VERIFY_READ, addr, 4)) {
+ set_fs(seg);
+ goto sigbus;
+ }
+
+ LoadW(addr, value, res);
+ if (res) {
+ set_fs(seg);
+ goto fault;
+ }
+ compute_return_epc(regs);
+ regs->regs[insn.spec3_format.rt] = value;
+ break;
+
+ case lhue_op:
+ if (!access_ok(VERIFY_READ, addr, 2)) {
+ set_fs(seg);
+ goto sigbus;
+ }
+
+ LoadHWU(addr, value, res);
+ if (res) {
+ set_fs(seg);
+ goto fault;
+ }
+ compute_return_epc(regs);
+ regs->regs[insn.spec3_format.rt] = value;
+ break;
+
+ case she_op:
+ if (!access_ok(VERIFY_WRITE, addr, 2)) {
+ set_fs(seg);
+ goto sigbus;
+ }
+
+ compute_return_epc(regs);
+ value = regs->regs[insn.spec3_format.rt];
+ StoreHW(addr, value, res);
+ if (res) {
+ set_fs(seg);
+ goto fault;
+ }
+ break;
+
+ case swe_op:
+ if (!access_ok(VERIFY_WRITE, addr, 4)) {
+ set_fs(seg);
+ goto sigbus;
+ }
+
+ compute_return_epc(regs);
+ value = regs->regs[insn.spec3_format.rt];
+ StoreW(addr, value, res);
+ if (res) {
+ set_fs(seg);
+ goto fault;
+ }
+ break;
+
+ default:
+ set_fs(seg);
+ goto sigill;
+ }
+ set_fs(seg);
+ break;
+#endif
case lh_op:
if (!access_ok(VERIFY_READ, addr, 2))
goto sigbus;
@@ -598,13 +1832,122 @@
/* Cannot handle 64-bit instructions in 32-bit kernel */
goto sigill;
- case lwc1_op:
case ldc1_op:
- case swc1_op:
+ if (!access_ok(VERIFY_READ, addr, 8))
+ goto sigbus;
+
+ preempt_disable();
+ if (is_fpu_owner()) {
+ if (read_c0_status() & ST0_FR) {
+#ifdef CONFIG_64BIT
+ LoadDW(addr, value, res);
+ if (res)
+ goto preempt_fault;
+ dmtc1(value, insn.i_format.rt);
+#else /* !CONFIG_64BIT */
+#ifdef CONFIG_CPU_MIPSR1
+ preempt_enable_no_resched();
+ goto fpu_continue;
+#else
+ unsigned long value2;
+
+ LoadW(addr, value, res);
+ if (res)
+ goto preempt_fault;
+ LoadW((addr + 4), value2, res);
+ if (res)
+ goto preempt_fault;
+ mtc1_mthc1(value, value2, insn.i_format.rt);
+#endif
+#endif /* CONFIG_64BIT */
+ } else {
+ unsigned long value2;
+
+ LoadW(addr, value, res);
+ if (res)
+ goto preempt_fault;
+ LoadW((addr + 4), value2, res);
+ if (res)
+ goto preempt_fault;
+ mtc1_pair(value, value2, insn.i_format.rt);
+ }
+ preempt_enable_no_resched();
+ compute_return_epc(regs);
+ break;
+ }
+
+ preempt_enable_no_resched();
+ goto fpu_continue;
+
case sdc1_op:
+ if (!access_ok(VERIFY_WRITE, addr, 8))
+ goto sigbus;
+
+ preempt_disable();
+ if (is_fpu_owner()) {
+ compute_return_epc(regs);
+ if (read_c0_status() & ST0_FR) {
+#ifdef CONFIG_64BIT
+ value = dmfc1(insn.i_format.rt);
+ StoreDW(addr, value, res);
+ if (res)
+ goto preempt_fault;
+#else /* !CONFIG_64BIT */
+#ifdef CONFIG_CPU_MIPSR1
+ preempt_enable_no_resched();
+ /* roll back jump/branch */
+ regs->cp0_epc = origpc;
+ regs->regs[31] = orig31;
+ goto fpu_continue;
+#else
+ unsigned long value2;
+
+ mfc1_mfhc1(&value, &value2, insn.i_format.rt);
+ StoreW(addr, value, res);
+ if (res)
+ goto preempt_fault;
+ StoreW((addr + 4), value2, res);
+ if (res)
+ goto preempt_fault;
+#endif
+#endif /* CONFIG_64BIT */
+ } else {
+ unsigned long value2;
+
+ mfc1_pair(&value, &value2, insn.i_format.rt);
+ StoreW(addr, value, res);
+ if (res)
+ goto preempt_fault;
+ StoreW((addr + 4), value2, res);
+ if (res)
+ goto preempt_fault;
+ }
+ preempt_enable_no_resched();
+ break;
+ }
+
+preempt_fault:
+ preempt_enable_no_resched();
+ goto fpu_continue;
+
+ case cop1x_op:
+ switch (insn.f_format.func) {
+ case lwxc1_op:
+ case swxc1_op:
+ case ldxc1_op:
+ case sdxc1_op:
+ goto fpu_continue;
+ default:
+ goto sigill;
+ }
+ break;
+
+ case lwc1_op:
+ case swc1_op:
+fpu_continue:
die_if_kernel("Unaligned FP access in kernel code", regs);
BUG_ON(!used_math());
- BUG_ON(!is_fpu_owner());
+// BUG_ON(!is_fpu_owner());
lose_fpu(1); /* Save FPU state for the emulator. */
res = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1,
@@ -618,6 +1961,84 @@
break;
return;
+#ifdef CONFIG_CPU_HAS_MSA
+ case msa_op:
+ if (cpu_has_mdmx)
+ goto sigill;
+
+ func = insn.msa_mi10_format.func;
+ switch (func) {
+ default:
+ goto sigbus;
+
+ case msa_ld_op:
+ case msa_st_op:
+ ;
+ }
+
+ if (!thread_msa_context_live())
+ goto sigbus;
+
+ df = insn.msa_mi10_format.df;
+ rs = insn.msa_mi10_format.rs;
+ wd = insn.msa_mi10_format.wd;
+ addr = (unsigned long *)(regs->regs[rs] + (insn.msa_mi10_format.s10 << df));
+ /* align a working space in stack... */
+ msadata = (union fpureg *)(((unsigned long)msadatabase + 15) & ~(unsigned long)0xf);
+ if (func == msa_ld_op) {
+ if (!access_ok(VERIFY_READ, addr, 16))
+ goto sigbus;
+ compute_return_epc(regs);
+ res = __copy_from_user_inatomic(msadata, addr, 16);
+ if (res)
+ goto fault;
+ preempt_disable();
+ if (test_thread_flag(TIF_USEDMSA)) {
+#ifdef __BIG_ENDIAN
+ msa_convert(¤t->thread.fpu.fpr[wd], msadata, df);
+ msa_to_wd(wd, ¤t->thread.fpu.fpr[wd]);
+#else
+ msa_to_wd(wd, msadata);
+#endif
+ preempt_enable_no_resched();
+ } else {
+ preempt_enable_no_resched();
+#ifdef __BIG_ENDIAN
+ msa_convert(¤t->thread.fpu.fpr[wd], msadata, df);
+#else
+ current->thread.fpu.fpr[wd] = *msadata;
+#endif
+ }
+ } else {
+ if (!access_ok(VERIFY_WRITE, addr, 16))
+ goto sigbus;
+ compute_return_epc(regs);
+ preempt_disable();
+ if (test_thread_flag(TIF_USEDMSA)) {
+#ifdef __BIG_ENDIAN
+ msa_from_wd(wd, ¤t->thread.fpu.fpr[wd]);
+ msa_convert(msadata, ¤t->thread.fpu.fpr[wd], df);
+#else
+ msa_from_wd(wd, msadata);
+#endif
+ preempt_enable_no_resched();
+ } else {
+ preempt_enable_no_resched();
+#ifdef __BIG_ENDIAN
+ msa_convert(msadata, ¤t->thread.fpu.fpr[wd], df);
+#else
+ *msadata = current->thread.fpu.fpr[wd];
+#endif
+ }
+ res = __copy_to_user_inatomic(addr, msadata, 16);
+ if (res)
+ goto fault;
+ }
+
+ break;
+#endif /* CONFIG_CPU_HAS_MSA */
+
+#ifndef CONFIG_CPU_MIPSR6
/*
* COP2 is available to implementor for application specific use.
* It's up to applications to register a notifier chain and do
@@ -638,6 +2059,7 @@
case sdc2_op:
cu2_notifier_call_chain(CU2_SDC2_OP, regs);
break;
+#endif
default:
/*
@@ -1025,7 +2447,97 @@
goto sigbus;
case mm_ldc132_op:
+ if (!access_ok(VERIFY_READ, addr, 8))
+ goto sigbus;
+
+ preempt_disable();
+ if (is_fpu_owner()) {
+ if (read_c0_status() & ST0_FR) {
+#ifdef CONFIG_64BIT
+ LoadDW(addr, value, res);
+ if (res)
+ goto preempt_fault;
+ dmtc1(value, insn.mm_i_format.rt);
+#else /* !CONFIG_64BIT */
+#ifdef CONFIG_CPU_MIPSR1
+ preempt_enable_no_resched();
+ goto fpu_emul;
+#else
+ unsigned long value2;
+
+ LoadW(addr, value, res);
+ if (res)
+ goto preempt_fault;
+ LoadW((addr + 4), value2, res);
+ if (res)
+ goto preempt_fault;
+ mtc1_mthc1(value, value2, insn.mm_i_format.rt);
+#endif
+#endif /* CONFIG_64BIT */
+ } else {
+ unsigned long value2;
+
+ LoadW(addr, value, res);
+ if (res)
+ goto preempt_fault;
+ LoadW((addr + 4), value2, res);
+ if (res)
+ goto preempt_fault;
+ mtc1_pair(value, value2, insn.mm_i_format.rt);
+ }
+ preempt_enable_no_resched();
+ goto success;
+ }
+
+ preempt_enable_no_resched();
+ goto fpu_emul;
+
case mm_sdc132_op:
+ if (!access_ok(VERIFY_WRITE, addr, 8))
+ goto sigbus;
+
+ preempt_disable();
+ if (is_fpu_owner()) {
+ if (read_c0_status() & ST0_FR) {
+#ifdef CONFIG_64BIT
+ value = dmfc1(insn.mm_i_format.rt);
+ StoreDW(addr, value, res);
+ if (res)
+ goto preempt_fault;
+#else /* !CONFIG_64BIT */
+#ifdef CONFIG_CPU_MIPSR1
+ preempt_enable_no_resched();
+ goto fpu_emul;
+#else
+ unsigned long value2;
+
+ mfc1_mfhc1(&value, &value2, insn.mm_i_format.rt);
+ StoreW(addr, value, res);
+ if (res)
+ goto preempt_fault;
+ StoreW((addr + 4), value2, res);
+ if (res)
+ goto preempt_fault;
+#endif
+#endif /* CONFIG_64BIT */
+ } else {
+ unsigned long value2;
+
+ mfc1_pair(&value, &value2, insn.mm_i_format.rt);
+ StoreW(addr, value, res);
+ if (res)
+ goto preempt_fault;
+ StoreW((addr + 4), value2, res);
+ if (res)
+ goto preempt_fault;
+ }
+ preempt_enable_no_resched();
+ goto success;
+ }
+
+ preempt_enable_no_resched();
+ goto fpu_emul;
+
case mm_lwc132_op:
case mm_swc132_op:
fpu_emul:
@@ -1279,6 +2791,8 @@
#endif
return;
+preempt_fault:
+ preempt_enable();
fault:
/* roll back jump/branch */
regs->cp0_epc = origpc;
diff --git a/arch/mips/kernel/vdso.c b/arch/mips/kernel/vdso.c
index 0f1af58..6052950 100644
--- a/arch/mips/kernel/vdso.c
+++ b/arch/mips/kernel/vdso.c
@@ -19,6 +19,8 @@
#include <asm/vdso.h>
#include <asm/uasm.h>
+#include <asm/cacheflush.h>
+#include <asm/tlbflush.h>
/*
* Including <asm/unistd.h> would give use the 64-bit syscall numbers ...
@@ -88,14 +90,20 @@
ret = install_special_mapping(mm, addr, PAGE_SIZE,
VM_READ|VM_EXEC|
- VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
+ VM_MAYREAD|VM_MAYEXEC,
&vdso_page);
if (ret)
goto up_fail;
mm->context.vdso = (void *)addr;
+ mm->context.thread_flags = current_thread_info()->local_flags;
+ /* if cache aliasing - use a different cache flush later */
+ if (cpu_has_rixi && (cpu_has_dc_aliases || cpu_has_ic_aliases))
+ mm->context.vdso_vma = find_vma(mm,addr);
+ mips_thread_vdso(current_thread_info());
+ smp_wmb();
up_fail:
up_write(&mm->mmap_sem);
return ret;
@@ -107,3 +115,38 @@
return "[vdso]";
return NULL;
}
+
+void mips_thread_vdso(struct thread_info *ti)
+{
+ struct page *vdso;
+ unsigned long addr;
+
+ if (cpu_has_rixi && ti->task->mm && !ti->vdso_page) {
+ vdso = alloc_page(GFP_USER);
+ if (!vdso)
+ return;
+ ti->vdso_page = vdso;
+ ti->vdso_offset = PAGE_SIZE;
+ addr = (unsigned long)page_address(vdso);
+ copy_page((void *)addr, (void *)page_address(vdso_page));
+ if (!cpu_has_ic_fills_f_dc)
+ flush_data_cache_page(addr);
+ /* any vma in mmap is used, just to get ASIDs back from mm */
+ local_flush_tlb_page(ti->task->mm->mmap,(unsigned long)ti->task->mm->context.vdso);
+ }
+}
+
+void arch_release_thread_info(struct thread_info *info)
+{
+ if (info->vdso_page) {
+ if (info->task->mm) {
+ preempt_disable();
+ /* any vma in mmap is used, just to get ASIDs */
+ local_flush_tlb_page(info->task->mm->mmap,(unsigned long)info->task->mm->context.vdso);
+ info->task->mm->context.vdso_asid[smp_processor_id()] = 0;
+ preempt_enable();
+ }
+ __free_page(info->vdso_page);
+ info->vdso_page = NULL;
+ }
+}
diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S
index 05826d2..7ed2fe5 100644
--- a/arch/mips/kernel/vmlinux.lds.S
+++ b/arch/mips/kernel/vmlinux.lds.S
@@ -145,6 +145,12 @@
_end = . ;
+#ifdef CONFIG_MIPS_APPENDED_DTB
+ __appended_dtb = .;
+ /* leave space for appended DTB */
+ . = . + 0x100000;
+#endif
+
/* These mark the ABI of the kernel for debuggers. */
.mdebug.abi32 : {
KEEP(*(.mdebug.abi32))
diff --git a/arch/mips/kernel/vpe.c b/arch/mips/kernel/vpe.c
index 1765bab..4169541 100644
--- a/arch/mips/kernel/vpe.c
+++ b/arch/mips/kernel/vpe.c
@@ -832,6 +832,7 @@
char *secstrings, *strtab = NULL;
unsigned int len, i, symindex = 0, strindex = 0, relocate = 0;
struct module mod; // so we can re-use the relocations code
+ mm_segment_t old_fs;
memset(&mod, 0, sizeof(struct module));
strcpy(mod.name, "VPE loader");
@@ -973,8 +974,12 @@
}
/* make sure it's physically written out */
+ /* flush the icache in correct context */
+ old_fs = get_fs();
+ set_fs(KERNEL_DS);
flush_icache_range((unsigned long)v->load_addr,
(unsigned long)v->load_addr + v->len);
+ set_fs(old_fs);
if ((find_vpe_symbols(v, sechdrs, symindex, strtab, &mod)) < 0) {
if (v->__start == 0) {
diff --git a/arch/mips/lantiq/irq.c b/arch/mips/lantiq/irq.c
index 5119487..f983d95 100644
--- a/arch/mips/lantiq/irq.c
+++ b/arch/mips/lantiq/irq.c
@@ -438,6 +438,7 @@
arch_init_ipiirq(MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_RESCHED_IRQ,
&irq_resched);
arch_init_ipiirq(MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_CALL_IRQ, &irq_call);
+ mips_smp_c0_status_mask |= (IE_SW0 | IE_SW1);
#endif
#if !defined(CONFIG_MIPS_MT_SMP) && !defined(CONFIG_MIPS_MT_SMTC)
diff --git a/arch/mips/lasat/image/Makefile b/arch/mips/lasat/image/Makefile
index dfb509d..fd32075 100644
--- a/arch/mips/lasat/image/Makefile
+++ b/arch/mips/lasat/image/Makefile
@@ -13,13 +13,11 @@
MKLASATIMG = mklasatimg
MKLASATIMG_ARCH = mq2,mqpro,sp100,sp200
KERNEL_IMAGE = vmlinux
-KERNEL_START = $(shell $(NM) $(KERNEL_IMAGE) | grep " _text" | cut -f1 -d\ )
-KERNEL_ENTRY = $(shell $(NM) $(KERNEL_IMAGE) | grep kernel_entry | cut -f1 -d\ )
LDSCRIPT= -L$(srctree)/$(src) -Tromscript.normal
-HEAD_DEFINES := -D_kernel_start=0x$(KERNEL_START) \
- -D_kernel_entry=0x$(KERNEL_ENTRY) \
+HEAD_DEFINES := -D_kernel_start=$(VMLINUX_LOAD_ADDRESS) \
+ -D_kernel_entry=$(VMLINUX_ENTRY_ADDRESS) \
-D VERSION="\"$(Version)\"" \
-D TIMESTAMP=$(shell date +%s)
diff --git a/arch/mips/lib/csum_partial.S b/arch/mips/lib/csum_partial.S
index a6adffb..773002b 100644
--- a/arch/mips/lib/csum_partial.S
+++ b/arch/mips/lib/csum_partial.S
@@ -9,6 +9,19 @@
* Copyright (C) 1999 Silicon Graphics, Inc.
* Copyright (C) 2007 Maciej W. Rozycki
*/
+/*
+ * Hack to resolve longstanding prefetch issue
+ *
+ * Prefetching may be fatal on some systems if we're prefetching beyond the
+ * end of memory on some systems. It's also a seriously bad idea on non
+ * dma-coherent systems.
+ */
+#ifndef CONFIG_CPU_MIPSR6
+
+#if defined(CONFIG_DMA_NONCOHERENT) || defined(CONFIG_MIPS_MALTA)
+#undef CONFIG_CPU_HAS_PREFETCH
+#endif
+
#include <linux/errno.h>
#include <asm/asm.h>
#include <asm/asm-offsets.h>
@@ -43,6 +56,8 @@
#define ADD daddu
#define NBYTES 8
+#define LOADK ld
+
#else
#define LOAD lw
@@ -50,6 +65,8 @@
#define ADD addu
#define NBYTES 4
+#define LOADK lw
+
#endif /* USE_DOUBLE */
#define UNIT(unit) ((unit)*NBYTES)
@@ -417,12 +434,18 @@
*
* If len < NBYTES use byte operations.
*/
- sltu t2, len, NBYTES
+ PREF( 0, 0(src) )
+ PREF( 1, 0(dst) )
+ sltu t2, len, NBYTES
and t1, dst, ADDRMASK
- bnez t2, .Lcopy_bytes_checklen
+ PREF( 0, 1*32(src) )
+ PREF( 1, 1*32(dst) )
+ bnez t2, .Lcopy_bytes_checklen
and t0, src, ADDRMASK
andi odd, dst, 0x1 /* odd buffer? */
- bnez t1, .Ldst_unaligned
+ PREF( 0, 2*32(src) )
+ PREF( 1, 2*32(dst) )
+ bnez t1, .Ldst_unaligned
nop
bnez t0, .Lsrc_unaligned_dst_aligned
/*
@@ -434,7 +457,9 @@
beqz t0, .Lcleanup_both_aligned # len < 8*NBYTES
nop
SUB len, 8*NBYTES # subtract here for bgez loop
- .align 4
+ PREF( 0, 3*32(src) )
+ PREF( 1, 3*32(dst) )
+ .align 4
1:
EXC( LOAD t0, UNIT(0)(src), .Ll_exc)
EXC( LOAD t1, UNIT(1)(src), .Ll_exc_copy)
@@ -462,8 +487,10 @@
ADDC(sum, t6)
EXC( STORE t7, UNIT(7)(dst), .Ls_exc)
ADDC(sum, t7)
- .set reorder /* DADDI_WAR */
+ .set reorder /* DADDI_WAR */
ADD dst, dst, 8*NBYTES
+ PREF( 0, 8*32(src) )
+ PREF( 1, 8*32(dst) )
bgez len, 1b
.set noreorder
ADD len, 8*NBYTES # revert len (see above)
@@ -568,9 +595,11 @@
ADD src, src, t2
.Lsrc_unaligned_dst_aligned:
- SRL t0, len, LOG_NBYTES+2 # +2 for 4 units/iter
+ SRL t0, len, LOG_NBYTES+2 # +2 for 4 units/iter
+ PREF( 0, 3*32(src) )
beqz t0, .Lcleanup_src_unaligned
- and rem, len, (4*NBYTES-1) # rem = len % 4*NBYTES
+ and rem, len, (4*NBYTES-1) # rem = len % 4*NBYTES
+ PREF( 1, 3*32(dst) )
1:
/*
* Avoid consecutive LD*'s to the same register since some mips
@@ -587,7 +616,8 @@
EXC( LDFIRST t3, FIRST(3)(src), .Ll_exc_copy)
EXC( LDREST t2, REST(2)(src), .Ll_exc_copy)
EXC( LDREST t3, REST(3)(src), .Ll_exc_copy)
- ADD src, src, 4*NBYTES
+ PREF( 0, 9*32(src) ) # 0 is PREF_LOAD (not streamed)
+ ADD src, src, 4*NBYTES
#ifdef CONFIG_CPU_SB1
nop # improves slotting
#endif
@@ -600,7 +630,8 @@
EXC( STORE t3, UNIT(3)(dst), .Ls_exc)
ADDC(sum, t3)
.set reorder /* DADDI_WAR */
- ADD dst, dst, 4*NBYTES
+ PREF( 1, 9*32(dst) ) # 1 is PREF_STORE (not streamed)
+ ADD dst, dst, 4*NBYTES
bne len, rem, 1b
.set noreorder
@@ -700,9 +731,9 @@
*
* Assumes src < THREAD_BUADDR($28)
*/
- LOAD t0, TI_TASK($28)
+ LOADK t0, TI_TASK($28)
li t2, SHIFT_START
- LOAD t0, THREAD_BUADDR(t0)
+ LOADK t0, THREAD_BUADDR(t0)
1:
EXC( lbu t1, 0(src), .Ll_exc)
ADD src, src, 1
@@ -715,9 +746,9 @@
bne src, t0, 1b
.set noreorder
.Ll_exc:
- LOAD t0, TI_TASK($28)
+ LOADK t0, TI_TASK($28)
nop
- LOAD t0, THREAD_BUADDR(t0) # t0 is just past last good address
+ LOADK t0, THREAD_BUADDR(t0) # t0 is just past last good address
nop
SUB len, AT, t0 # len number of uncopied bytes
/*
@@ -758,3 +789,740 @@
sw v1, (errptr)
.set pop
END(__csum_partial_copy_user)
+
+
+#ifdef CONFIG_EVA
+
+ .set eva
+
+#undef LOAD
+#undef LOADL
+#undef LOADR
+#undef STORE
+#undef STOREL
+#undef STORER
+#undef LDFIRST
+#undef LDREST
+#undef STFIRST
+#undef STREST
+#undef COPY_BYTE
+
+#define LOAD lwe
+#define LOADL lwle
+#define LOADR lwre
+#define STOREL swl
+#define STORER swr
+#define STORE sw
+
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+#define LDFIRST LOADR
+#define LDREST LOADL
+#define STFIRST STORER
+#define STREST STOREL
+#else
+#define LDFIRST LOADL
+#define LDREST LOADR
+#define STFIRST STOREL
+#define STREST STORER
+#endif
+
+LEAF(__csum_partial_copy_fromuser)
+ PTR_ADDU AT, src, len /* See (1) above. */
+#ifdef CONFIG_64BIT
+ move errptr, a4
+#else
+ lw errptr, 16(sp)
+#endif
+ move sum, zero
+ move odd, zero
+ /*
+ * Note: dst & src may be unaligned, len may be 0
+ * Temps
+ */
+ /*
+ * The "issue break"s below are very approximate.
+ * Issue delays for dcache fills will perturb the schedule, as will
+ * load queue full replay traps, etc.
+ *
+ * If len < NBYTES use byte operations.
+ */
+ PREFE( 0, 0(src) )
+ PREF( 1, 0(dst) )
+ sltu t2, len, NBYTES
+ and t1, dst, ADDRMASK
+ PREFE( 0, 1*32(src) )
+ PREF( 1, 1*32(dst) )
+ bnez t2, .LFcopy_bytes_checklen
+ and t0, src, ADDRMASK
+ andi odd, dst, 0x1 /* odd buffer? */
+ PREFE( 0, 2*32(src) )
+ PREF( 1, 2*32(dst) )
+ bnez t1, .LFdst_unaligned
+ nop
+ bnez t0, .LFsrc_unaligned_dst_aligned
+ /*
+ * use delay slot for fall-through
+ * src and dst are aligned; need to compute rem
+ */
+.LFboth_aligned:
+ SRL t0, len, LOG_NBYTES+3 # +3 for 8 units/iter
+ beqz t0, .LFcleanup_both_aligned # len < 8*NBYTES
+ nop
+ SUB len, 8*NBYTES # subtract here for bgez loop
+ PREFE( 0, 3*32(src) )
+ PREF( 1, 3*32(dst) )
+ .align 4
+1:
+EXC( LOAD t0, UNIT(0)(src), .LFl_exc)
+EXC( LOAD t1, UNIT(1)(src), .LFl_exc_copy)
+EXC( LOAD t2, UNIT(2)(src), .LFl_exc_copy)
+EXC( LOAD t3, UNIT(3)(src), .LFl_exc_copy)
+EXC( LOAD t4, UNIT(4)(src), .LFl_exc_copy)
+EXC( LOAD t5, UNIT(5)(src), .LFl_exc_copy)
+EXC( LOAD t6, UNIT(6)(src), .LFl_exc_copy)
+EXC( LOAD t7, UNIT(7)(src), .LFl_exc_copy)
+ SUB len, len, 8*NBYTES
+ ADD src, src, 8*NBYTES
+ STORE t0, UNIT(0)(dst)
+ ADDC(sum, t0)
+ STORE t1, UNIT(1)(dst)
+ ADDC(sum, t1)
+ STORE t2, UNIT(2)(dst)
+ ADDC(sum, t2)
+ STORE t3, UNIT(3)(dst)
+ ADDC(sum, t3)
+ STORE t4, UNIT(4)(dst)
+ ADDC(sum, t4)
+ STORE t5, UNIT(5)(dst)
+ ADDC(sum, t5)
+ STORE t6, UNIT(6)(dst)
+ ADDC(sum, t6)
+ STORE t7, UNIT(7)(dst)
+ ADDC(sum, t7)
+ .set reorder /* DADDI_WAR */
+ ADD dst, dst, 8*NBYTES
+ PREFE( 0, 8*32(src) )
+ PREF( 1, 8*32(dst) )
+ bgez len, 1b
+ .set noreorder
+ ADD len, 8*NBYTES # revert len (see above)
+
+ /*
+ * len == the number of bytes left to copy < 8*NBYTES
+ */
+.LFcleanup_both_aligned:
+ beqz len, .LFdone
+ sltu t0, len, 4*NBYTES
+ bnez t0, .LFless_than_4units
+ and rem, len, (NBYTES-1) # rem = len % NBYTES
+ /*
+ * len >= 4*NBYTES
+ */
+EXC( LOAD t0, UNIT(0)(src), .LFl_exc)
+EXC( LOAD t1, UNIT(1)(src), .LFl_exc_copy)
+EXC( LOAD t2, UNIT(2)(src), .LFl_exc_copy)
+EXC( LOAD t3, UNIT(3)(src), .LFl_exc_copy)
+ SUB len, len, 4*NBYTES
+ ADD src, src, 4*NBYTES
+ STORE t0, UNIT(0)(dst)
+ ADDC(sum, t0)
+ STORE t1, UNIT(1)(dst)
+ ADDC(sum, t1)
+ STORE t2, UNIT(2)(dst)
+ ADDC(sum, t2)
+ STORE t3, UNIT(3)(dst)
+ ADDC(sum, t3)
+ .set reorder /* DADDI_WAR */
+ ADD dst, dst, 4*NBYTES
+ beqz len, .LFdone
+ .set noreorder
+.LFless_than_4units:
+ /*
+ * rem = len % NBYTES
+ */
+ beq rem, len, .LFcopy_bytes
+ nop
+1:
+EXC( LOAD t0, 0(src), .LFl_exc)
+ ADD src, src, NBYTES
+ SUB len, len, NBYTES
+ STORE t0, 0(dst)
+ ADDC(sum, t0)
+ .set reorder /* DADDI_WAR */
+ ADD dst, dst, NBYTES
+ bne rem, len, 1b
+ .set noreorder
+
+ /*
+ * src and dst are aligned, need to copy rem bytes (rem < NBYTES)
+ * A loop would do only a byte at a time with possible branch
+ * mispredicts. Can't do an explicit LOAD dst,mask,or,STORE
+ * because can't assume read-access to dst. Instead, use
+ * STREST dst, which doesn't require read access to dst.
+ *
+ * This code should perform better than a simple loop on modern,
+ * wide-issue mips processors because the code has fewer branches and
+ * more instruction-level parallelism.
+ */
+ beqz len, .LFdone
+ ADD t1, dst, len # t1 is just past last byte of dst
+ li bits, 8*NBYTES
+ SLL rem, len, 3 # rem = number of bits to keep
+EXC( LOAD t0, 0(src), .LFl_exc)
+ SUB bits, bits, rem # bits = number of bits to discard
+ SHIFT_DISCARD t0, t0, bits
+ STREST t0, -1(t1)
+ SHIFT_DISCARD_REVERT t0, t0, bits
+ .set reorder
+ ADDC(sum, t0)
+ b .LFdone
+ .set noreorder
+.LFdst_unaligned:
+ /*
+ * dst is unaligned
+ * t0 = src & ADDRMASK
+ * t1 = dst & ADDRMASK; T1 > 0
+ * len >= NBYTES
+ *
+ * Copy enough bytes to align dst
+ * Set match = (src and dst have same alignment)
+ */
+EXC( LDFIRST t3, FIRST(0)(src), .LFl_exc)
+ ADD t2, zero, NBYTES
+EXC( LDREST t3, REST(0)(src), .LFl_exc_copy)
+ SUB t2, t2, t1 # t2 = number of bytes copied
+ xor match, t0, t1
+ STFIRST t3, FIRST(0)(dst)
+ SLL t4, t1, 3 # t4 = number of bits to discard
+ SHIFT_DISCARD t3, t3, t4
+ /* no SHIFT_DISCARD_REVERT to handle odd buffer properly */
+ ADDC(sum, t3)
+ beq len, t2, .LFdone
+ SUB len, len, t2
+ ADD dst, dst, t2
+ beqz match, .LFboth_aligned
+ ADD src, src, t2
+
+.LFsrc_unaligned_dst_aligned:
+ SRL t0, len, LOG_NBYTES+2 # +2 for 4 units/iter
+ PREFE( 0, 3*32(src) )
+ beqz t0, .LFcleanup_src_unaligned
+ and rem, len, (4*NBYTES-1) # rem = len % 4*NBYTES
+ PREF( 1, 3*32(dst) )
+1:
+/*
+ * Avoid consecutive LD*'s to the same register since some mips
+ * implementations can't issue them in the same cycle.
+ * It's OK to load FIRST(N+1) before REST(N) because the two addresses
+ * are to the same unit (unless src is aligned, but it's not).
+ */
+EXC( LDFIRST t0, FIRST(0)(src), .LFl_exc)
+EXC( LDFIRST t1, FIRST(1)(src), .LFl_exc_copy)
+ SUB len, len, 4*NBYTES
+EXC( LDREST t0, REST(0)(src), .LFl_exc_copy)
+EXC( LDREST t1, REST(1)(src), .LFl_exc_copy)
+EXC( LDFIRST t2, FIRST(2)(src), .LFl_exc_copy)
+EXC( LDFIRST t3, FIRST(3)(src), .LFl_exc_copy)
+EXC( LDREST t2, REST(2)(src), .LFl_exc_copy)
+EXC( LDREST t3, REST(3)(src), .LFl_exc_copy)
+ PREFE( 0, 9*32(src) ) # 0 is PREF_LOAD (not streamed)
+ ADD src, src, 4*NBYTES
+#ifdef CONFIG_CPU_SB1
+ nop # improves slotting
+#endif
+ STORE t0, UNIT(0)(dst)
+ ADDC(sum, t0)
+ STORE t1, UNIT(1)(dst)
+ ADDC(sum, t1)
+ STORE t2, UNIT(2)(dst)
+ ADDC(sum, t2)
+ STORE t3, UNIT(3)(dst)
+ ADDC(sum, t3)
+ PREF( 1, 9*32(dst) ) # 1 is PREF_STORE (not streamed)
+ .set reorder /* DADDI_WAR */
+ ADD dst, dst, 4*NBYTES
+ bne len, rem, 1b
+ .set noreorder
+
+.LFcleanup_src_unaligned:
+ beqz len, .LFdone
+ and rem, len, NBYTES-1 # rem = len % NBYTES
+ beq rem, len, .LFcopy_bytes
+ nop
+1:
+EXC( LDFIRST t0, FIRST(0)(src), .LFl_exc)
+EXC( LDREST t0, REST(0)(src), .LFl_exc_copy)
+ ADD src, src, NBYTES
+ SUB len, len, NBYTES
+ STORE t0, 0(dst)
+ ADDC(sum, t0)
+ .set reorder /* DADDI_WAR */
+ ADD dst, dst, NBYTES
+ bne len, rem, 1b
+ .set noreorder
+
+.LFcopy_bytes_checklen:
+ beqz len, .LFdone
+ nop
+.LFcopy_bytes:
+ /* 0 < len < NBYTES */
+ move t2, zero # partial word
+ li t3, SHIFT_START # shift
+/* use .Ll_exc_copy here to return correct sum on fault */
+#define COPY_BYTE(N) \
+EXC( lbue t0, N(src), .LFl_exc_copy); \
+ SUB len, len, 1; \
+ sb t0, N(dst); \
+ SLLV t0, t0, t3; \
+ addu t3, SHIFT_INC; \
+ beqz len, .LFcopy_bytes_done; \
+ or t2, t0
+
+ COPY_BYTE(0)
+ COPY_BYTE(1)
+#ifdef USE_DOUBLE
+ COPY_BYTE(2)
+ COPY_BYTE(3)
+ COPY_BYTE(4)
+ COPY_BYTE(5)
+#endif
+EXC( lbue t0, NBYTES-2(src), .LFl_exc_copy)
+ SUB len, len, 1
+ sb t0, NBYTES-2(dst)
+ SLLV t0, t0, t3
+ or t2, t0
+.LFcopy_bytes_done:
+ ADDC(sum, t2)
+.LFdone:
+ /* fold checksum */
+#ifdef USE_DOUBLE
+ dsll32 v1, sum, 0
+ daddu sum, v1
+ sltu v1, sum, v1
+ dsra32 sum, sum, 0
+ addu sum, v1
+#endif
+
+#ifdef CONFIG_CPU_MIPSR2
+ wsbh v1, sum
+ movn sum, v1, odd
+#else
+ beqz odd, 1f /* odd buffer alignment? */
+ lui v1, 0x00ff
+ addu v1, 0x00ff
+ and t0, sum, v1
+ sll t0, t0, 8
+ srl sum, sum, 8
+ and sum, sum, v1
+ or sum, sum, t0
+1:
+#endif
+ .set reorder
+ ADDC32(sum, psum)
+ jr ra
+ .set noreorder
+
+.LFl_exc_copy:
+ /*
+ * Copy bytes from src until faulting load address (or until a
+ * lb faults)
+ *
+ * When reached by a faulting LDFIRST/LDREST, THREAD_BUADDR($28)
+ * may be more than a byte beyond the last address.
+ * Hence, the lb below may get an exception.
+ *
+ * Assumes src < THREAD_BUADDR($28)
+ */
+ LOADK t0, TI_TASK($28)
+ li t2, SHIFT_START
+ addi t0, t0, THREAD_BUADDR
+ LOADK t0, 0(t0)
+1:
+EXC( lbue t1, 0(src), .LFl_exc)
+ ADD src, src, 1
+ sb t1, 0(dst) # can't fault -- we're copy_from_user
+ SLLV t1, t1, t2
+ addu t2, SHIFT_INC
+ ADDC(sum, t1)
+ .set reorder /* DADDI_WAR */
+ ADD dst, dst, 1
+ bne src, t0, 1b
+ .set noreorder
+.LFl_exc:
+ LOADK t0, TI_TASK($28)
+ addi t0, t0, THREAD_BUADDR
+ LOADK t0, 0(t0) # t0 is just past last good address
+ SUB len, AT, t0 # len number of uncopied bytes
+ /*
+ * Here's where we rely on src and dst being incremented in tandem,
+ * See (3) above.
+ * dst += (fault addr - src) to put dst at first byte to clear
+ */
+ ADD dst, t0 # compute start address in a1
+ SUB dst, src
+ /*
+ * Clear len bytes starting at dst. Can't call __bzero because it
+ * might modify len. An inefficient loop for these rare times...
+ */
+ .set reorder /* DADDI_WAR */
+ SUB src, len, 1
+ beqz len, .LFdone
+ .set noreorder
+1: sb zero, 0(dst)
+ ADD dst, dst, 1
+ .set push
+ .set noat
+#ifndef CONFIG_CPU_DADDI_WORKAROUNDS
+ bnez src, 1b
+ SUB src, src, 1
+#else
+ li v1, 1
+ bnez src, 1b
+ SUB src, src, v1
+#endif
+ li v1, -EFAULT
+ b .LFdone
+ sw v1, (errptr)
+
+ .set pop
+ END(__csum_partial_copy_fromuser)
+
+
+
+#undef LOAD
+#undef LOADL
+#undef LOADR
+#undef STORE
+#undef STOREL
+#undef STORER
+#undef LDFIRST
+#undef LDREST
+#undef STFIRST
+#undef STREST
+#undef COPY_BYTE
+
+#define LOAD lw
+#define LOADL lwl
+#define LOADR lwr
+#define STOREL swle
+#define STORER swre
+#define STORE swe
+
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+#define LDFIRST LOADR
+#define LDREST LOADL
+#define STFIRST STORER
+#define STREST STOREL
+#else
+#define LDFIRST LOADL
+#define LDREST LOADR
+#define STFIRST STOREL
+#define STREST STORER
+#endif
+
+LEAF(__csum_partial_copy_touser)
+ PTR_ADDU AT, src, len /* See (1) above. */
+#ifdef CONFIG_64BIT
+ move errptr, a4
+#else
+ lw errptr, 16(sp)
+#endif
+ move sum, zero
+ move odd, zero
+ /*
+ * Note: dst & src may be unaligned, len may be 0
+ * Temps
+ */
+ /*
+ * The "issue break"s below are very approximate.
+ * Issue delays for dcache fills will perturb the schedule, as will
+ * load queue full replay traps, etc.
+ *
+ * If len < NBYTES use byte operations.
+ */
+ PREF( 0, 0(src) )
+ PREFE( 1, 0(dst) )
+ sltu t2, len, NBYTES
+ and t1, dst, ADDRMASK
+ PREF( 0, 1*32(src) )
+ PREFE( 1, 1*32(dst) )
+ bnez t2, .LTcopy_bytes_checklen
+ and t0, src, ADDRMASK
+ andi odd, dst, 0x1 /* odd buffer? */
+ PREF( 0, 2*32(src) )
+ PREFE( 1, 2*32(dst) )
+ bnez t1, .LTdst_unaligned
+ nop
+ bnez t0, .LTsrc_unaligned_dst_aligned
+ /*
+ * use delay slot for fall-through
+ * src and dst are aligned; need to compute rem
+ */
+.LTboth_aligned:
+ SRL t0, len, LOG_NBYTES+3 # +3 for 8 units/iter
+ beqz t0, .LTcleanup_both_aligned # len < 8*NBYTES
+ nop
+ SUB len, 8*NBYTES # subtract here for bgez loop
+ PREF( 0, 3*32(src) )
+ PREFE( 1, 3*32(dst) )
+ .align 4
+1:
+ LOAD t0, UNIT(0)(src)
+ LOAD t1, UNIT(1)(src)
+ LOAD t2, UNIT(2)(src)
+ LOAD t3, UNIT(3)(src)
+ LOAD t4, UNIT(4)(src)
+ LOAD t5, UNIT(5)(src)
+ LOAD t6, UNIT(6)(src)
+ LOAD t7, UNIT(7)(src)
+ SUB len, len, 8*NBYTES
+ ADD src, src, 8*NBYTES
+EXC( STORE t0, UNIT(0)(dst), .LTs_exc)
+ ADDC(sum, t0)
+EXC( STORE t1, UNIT(1)(dst), .LTs_exc)
+ ADDC(sum, t1)
+EXC( STORE t2, UNIT(2)(dst), .LTs_exc)
+ ADDC(sum, t2)
+EXC( STORE t3, UNIT(3)(dst), .LTs_exc)
+ ADDC(sum, t3)
+EXC( STORE t4, UNIT(4)(dst), .LTs_exc)
+ ADDC(sum, t4)
+EXC( STORE t5, UNIT(5)(dst), .LTs_exc)
+ ADDC(sum, t5)
+EXC( STORE t6, UNIT(6)(dst), .LTs_exc)
+ ADDC(sum, t6)
+EXC( STORE t7, UNIT(7)(dst), .LTs_exc)
+ ADDC(sum, t7)
+ .set reorder /* DADDI_WAR */
+ ADD dst, dst, 8*NBYTES
+ PREF( 0, 8*32(src) )
+ PREFE( 1, 8*32(dst) )
+ bgez len, 1b
+ .set noreorder
+ ADD len, 8*NBYTES # revert len (see above)
+
+ /*
+ * len == the number of bytes left to copy < 8*NBYTES
+ */
+.LTcleanup_both_aligned:
+ beqz len, .LTdone
+ sltu t0, len, 4*NBYTES
+ bnez t0, .LTless_than_4units
+ and rem, len, (NBYTES-1) # rem = len % NBYTES
+ /*
+ * len >= 4*NBYTES
+ */
+ LOAD t0, UNIT(0)(src)
+ LOAD t1, UNIT(1)(src)
+ LOAD t2, UNIT(2)(src)
+ LOAD t3, UNIT(3)(src)
+ SUB len, len, 4*NBYTES
+ ADD src, src, 4*NBYTES
+EXC( STORE t0, UNIT(0)(dst), .LTs_exc)
+ ADDC(sum, t0)
+EXC( STORE t1, UNIT(1)(dst), .LTs_exc)
+ ADDC(sum, t1)
+EXC( STORE t2, UNIT(2)(dst), .LTs_exc)
+ ADDC(sum, t2)
+EXC( STORE t3, UNIT(3)(dst), .LTs_exc)
+ ADDC(sum, t3)
+ .set reorder /* DADDI_WAR */
+ ADD dst, dst, 4*NBYTES
+ beqz len, .LTdone
+ .set noreorder
+.LTless_than_4units:
+ /*
+ * rem = len % NBYTES
+ */
+ beq rem, len, .LTcopy_bytes
+ nop
+1:
+ LOAD t0, 0(src)
+ ADD src, src, NBYTES
+ SUB len, len, NBYTES
+EXC( STORE t0, 0(dst), .LTs_exc)
+ ADDC(sum, t0)
+ .set reorder /* DADDI_WAR */
+ ADD dst, dst, NBYTES
+ bne rem, len, 1b
+ .set noreorder
+
+ /*
+ * src and dst are aligned, need to copy rem bytes (rem < NBYTES)
+ * A loop would do only a byte at a time with possible branch
+ * mispredicts. Can't do an explicit LOAD dst,mask,or,STORE
+ * because can't assume read-access to dst. Instead, use
+ * STREST dst, which doesn't require read access to dst.
+ *
+ * This code should perform better than a simple loop on modern,
+ * wide-issue mips processors because the code has fewer branches and
+ * more instruction-level parallelism.
+ */
+ beqz len, .LTdone
+ ADD t1, dst, len # t1 is just past last byte of dst
+ li bits, 8*NBYTES
+ SLL rem, len, 3 # rem = number of bits to keep
+ LOAD t0, 0(src)
+ SUB bits, bits, rem # bits = number of bits to discard
+ SHIFT_DISCARD t0, t0, bits
+EXC( STREST t0, -1(t1), .LTs_exc)
+ SHIFT_DISCARD_REVERT t0, t0, bits
+ .set reorder
+ ADDC(sum, t0)
+ b .LTdone
+ .set noreorder
+.LTdst_unaligned:
+ /*
+ * dst is unaligned
+ * t0 = src & ADDRMASK
+ * t1 = dst & ADDRMASK; T1 > 0
+ * len >= NBYTES
+ *
+ * Copy enough bytes to align dst
+ * Set match = (src and dst have same alignment)
+ */
+ LDFIRST t3, FIRST(0)(src)
+ ADD t2, zero, NBYTES
+ LDREST t3, REST(0)(src)
+ SUB t2, t2, t1 # t2 = number of bytes copied
+ xor match, t0, t1
+EXC( STFIRST t3, FIRST(0)(dst), .LTs_exc)
+ SLL t4, t1, 3 # t4 = number of bits to discard
+ SHIFT_DISCARD t3, t3, t4
+ /* no SHIFT_DISCARD_REVERT to handle odd buffer properly */
+ ADDC(sum, t3)
+ beq len, t2, .LTdone
+ SUB len, len, t2
+ ADD dst, dst, t2
+ beqz match, .LTboth_aligned
+ ADD src, src, t2
+
+.LTsrc_unaligned_dst_aligned:
+ SRL t0, len, LOG_NBYTES+2 # +2 for 4 units/iter
+ PREF( 0, 3*32(src) )
+ beqz t0, .LTcleanup_src_unaligned
+ and rem, len, (4*NBYTES-1) # rem = len % 4*NBYTES
+ PREFE( 1, 3*32(dst) )
+1:
+/*
+ * Avoid consecutive LD*'s to the same register since some mips
+ * implementations can't issue them in the same cycle.
+ * It's OK to load FIRST(N+1) before REST(N) because the two addresses
+ * are to the same unit (unless src is aligned, but it's not).
+ */
+ LDFIRST t0, FIRST(0)(src)
+ LDFIRST t1, FIRST(1)(src)
+ SUB len, len, 4*NBYTES
+ LDREST t0, REST(0)(src)
+ LDREST t1, REST(1)(src)
+ LDFIRST t2, FIRST(2)(src)
+ LDFIRST t3, FIRST(3)(src)
+ LDREST t2, REST(2)(src)
+ LDREST t3, REST(3)(src)
+ PREF( 0, 9*32(src) ) # 0 is PREF_LOAD (not streamed)
+ ADD src, src, 4*NBYTES
+#ifdef CONFIG_CPU_SB1
+ nop # improves slotting
+#endif
+EXC( STORE t0, UNIT(0)(dst), .LTs_exc)
+ ADDC(sum, t0)
+EXC( STORE t1, UNIT(1)(dst), .LTs_exc)
+ ADDC(sum, t1)
+EXC( STORE t2, UNIT(2)(dst), .LTs_exc)
+ ADDC(sum, t2)
+EXC( STORE t3, UNIT(3)(dst), .LTs_exc)
+ ADDC(sum, t3)
+ PREFE( 1, 9*32(dst) ) # 1 is PREF_STORE (not streamed)
+ .set reorder /* DADDI_WAR */
+ ADD dst, dst, 4*NBYTES
+ bne len, rem, 1b
+ .set noreorder
+
+.LTcleanup_src_unaligned:
+ beqz len, .LTdone
+ and rem, len, NBYTES-1 # rem = len % NBYTES
+ beq rem, len, .LTcopy_bytes
+ nop
+1:
+ LDFIRST t0, FIRST(0)(src)
+ LDREST t0, REST(0)(src)
+ ADD src, src, NBYTES
+ SUB len, len, NBYTES
+EXC( STORE t0, 0(dst), .LTs_exc)
+ ADDC(sum, t0)
+ .set reorder /* DADDI_WAR */
+ ADD dst, dst, NBYTES
+ bne len, rem, 1b
+ .set noreorder
+
+.LTcopy_bytes_checklen:
+ beqz len, .LTdone
+ nop
+.LTcopy_bytes:
+ /* 0 < len < NBYTES */
+ move t2, zero # partial word
+ li t3, SHIFT_START # shift
+/* use .Ll_exc_copy here to return correct sum on fault */
+#define COPY_BYTE(N) \
+ lbu t0, N(src); \
+ SUB len, len, 1; \
+EXC( sbe t0, N(dst), .LTs_exc); \
+ SLLV t0, t0, t3; \
+ addu t3, SHIFT_INC; \
+ beqz len, .LTcopy_bytes_done; \
+ or t2, t0
+
+ COPY_BYTE(0)
+ COPY_BYTE(1)
+#ifdef USE_DOUBLE
+ COPY_BYTE(2)
+ COPY_BYTE(3)
+ COPY_BYTE(4)
+ COPY_BYTE(5)
+#endif
+ lbu t0, NBYTES-2(src)
+ SUB len, len, 1
+EXC( sbe t0, NBYTES-2(dst), .LTs_exc)
+ SLLV t0, t0, t3
+ or t2, t0
+.LTcopy_bytes_done:
+ ADDC(sum, t2)
+.LTdone:
+ /* fold checksum */
+#ifdef USE_DOUBLE
+ dsll32 v1, sum, 0
+ daddu sum, v1
+ sltu v1, sum, v1
+ dsra32 sum, sum, 0
+ addu sum, v1
+#endif
+
+#ifdef CONFIG_CPU_MIPSR2
+ wsbh v1, sum
+ movn sum, v1, odd
+#else
+ beqz odd, 1f /* odd buffer alignment? */
+ lui v1, 0x00ff
+ addu v1, 0x00ff
+ and t0, sum, v1
+ sll t0, t0, 8
+ srl sum, sum, 8
+ and sum, sum, v1
+ or sum, sum, t0
+1:
+#endif
+ .set reorder
+ ADDC32(sum, psum)
+ jr ra
+ .set noreorder
+
+.LTs_exc:
+ li v0, -1 /* invalid checksum */
+ li v1, -EFAULT
+ jr ra
+ sw v1, (errptr)
+ END(__csum_partial_copy_touser)
+
+#endif /* CONFIG_EVA */
+
+#endif /* !CONFIG_CPU_MIPSR6 */
diff --git a/arch/mips/lib/dump_tlb.c b/arch/mips/lib/dump_tlb.c
index 32b9f21..70519e2 100644
--- a/arch/mips/lib/dump_tlb.c
+++ b/arch/mips/lib/dump_tlb.c
@@ -1,8 +1,13 @@
/*
- * Dump R4x00 TLB for debugging purposes.
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
*
* Copyright (C) 1994, 1995 by Waldorf Electronics, written by Ralf Baechle.
* Copyright (C) 1999 by Silicon Graphics, Inc.
+ * Copyright (C) 2011 MIPS Technologies, Inc.
+ *
+ * Dump R4x00 TLB for debugging purposes.
*/
#include <linux/kernel.h>
#include <linux/mm.h>
@@ -59,8 +64,10 @@
for (i = first; i <= last; i++) {
write_c0_index(i);
+ back_to_back_c0_hazard();
BARRIER();
tlb_read();
+ back_to_back_c0_hazard();
BARRIER();
pagemask = read_c0_pagemask();
entryhi = read_c0_entryhi();
@@ -68,8 +75,8 @@
entrylo1 = read_c0_entrylo1();
/* Unused entries have a virtual address of CKSEG0. */
- if ((entryhi & ~0x1ffffUL) != CKSEG0
- && (entryhi & 0xff) == asid) {
+ if (((entryhi & ~0x1ffffUL) != CKSEG0) &&
+ !(cpu_has_tlbinv && (entryhi & MIPS_EHINV))) {
#ifdef CONFIG_32BIT
int width = 8;
#else
@@ -83,7 +90,7 @@
c0 = (entrylo0 >> 3) & 7;
c1 = (entrylo1 >> 3) & 7;
- printk("va=%0*lx asid=%02lx\n",
+ printk("va=%0*lx asid=%02lx:",
width, (entryhi & ~0x1fffUL),
entryhi & 0xff);
printk("\t[pa=%0*llx c=%d d=%d v=%d g=%d] ",
@@ -105,6 +112,8 @@
write_c0_entryhi(s_entryhi);
write_c0_index(s_index);
write_c0_pagemask(s_pagemask);
+ BARRIER();
+ back_to_back_c0_hazard();
}
void dump_tlb_all(void)
diff --git a/arch/mips/lib/memcpy.S b/arch/mips/lib/memcpy.S
index c5c40da..20e7b18 100644
--- a/arch/mips/lib/memcpy.S
+++ b/arch/mips/lib/memcpy.S
@@ -27,6 +27,9 @@
#ifdef CONFIG_MIPS_MALTA
#undef CONFIG_CPU_HAS_PREFETCH
#endif
+#ifdef CONFIG_CPU_MIPSR6
+#undef CONFIG_CPU_HAS_PREFETCH
+#endif
#include <asm/asm.h>
#include <asm/asm-offsets.h>
@@ -101,10 +104,12 @@
#ifdef USE_DOUBLE
#define LOAD ld
+#ifndef CONFIG_CPU_MIPSR6
#define LOADL ldl
#define LOADR ldr
#define STOREL sdl
#define STORER sdr
+#endif
#define STORE sd
#define ADD daddu
#define SUB dsubu
@@ -116,6 +121,8 @@
#define NBYTES 8
#define LOG_NBYTES 3
+#define LOADK ld
+
/*
* As we are sharing code base with the mips32 tree (which use the o32 ABI
* register definitions). We need to redefine the register definitions from
@@ -137,10 +144,12 @@
#else
#define LOAD lw
+#ifndef CONFIG_CPU_MIPSR6
#define LOADL lwl
#define LOADR lwr
#define STOREL swl
#define STORER swr
+#endif
#define STORE sw
#define ADD addu
#define SUB subu
@@ -152,19 +161,21 @@
#define NBYTES 4
#define LOG_NBYTES 2
+#define LOADK lw
+
#endif /* USE_DOUBLE */
#ifdef CONFIG_CPU_LITTLE_ENDIAN
#define LDFIRST LOADR
-#define LDREST LOADL
+#define LDREST LOADL
#define STFIRST STORER
-#define STREST STOREL
+#define STREST STOREL
#define SHIFT_DISCARD SLLV
#else
#define LDFIRST LOADL
-#define LDREST LOADR
+#define LDREST LOADR
#define STFIRST STOREL
-#define STREST STORER
+#define STREST STORER
#define SHIFT_DISCARD SRLV
#endif
@@ -227,15 +238,20 @@
and t0, src, ADDRMASK
PREF( 0, 2*32(src) )
PREF( 1, 2*32(dst) )
+#ifndef CONFIG_CPU_MIPSR6
bnez t1, .Ldst_unaligned
nop
bnez t0, .Lsrc_unaligned_dst_aligned
+#else
+ or t0, t0, t1
+ bnez t0, .Lcopy_unaligned_bytes
+#endif
/*
* use delay slot for fall-through
* src and dst are aligned; need to compute rem
*/
.Lboth_aligned:
- SRL t0, len, LOG_NBYTES+3 # +3 for 8 units/iter
+ SRL t0, len, LOG_NBYTES+3 # +3 for 8 units/iter
beqz t0, .Lcleanup_both_aligned # len < 8*NBYTES
and rem, len, (8*NBYTES-1) # rem = len % (8*NBYTES)
PREF( 0, 3*32(src) )
@@ -310,10 +326,11 @@
bne rem, len, 1b
.set noreorder
+#ifndef CONFIG_CPU_MIPSR6
/*
* src and dst are aligned, need to copy rem bytes (rem < NBYTES)
* A loop would do only a byte at a time with possible branch
- * mispredicts. Can't do an explicit LOAD dst,mask,or,STORE
+ * mispredicts. Can't do an explicit LOAD dst,mask,or,STORE
* because can't assume read-access to dst. Instead, use
* STREST dst, which doesn't require read access to dst.
*
@@ -327,7 +344,7 @@
li bits, 8*NBYTES
SLL rem, len, 3 # rem = number of bits to keep
EXC( LOAD t0, 0(src), .Ll_exc)
- SUB bits, bits, rem # bits = number of bits to discard
+ SUB bits, bits, rem # bits = number of bits to discard
SHIFT_DISCARD t0, t0, bits
EXC( STREST t0, -1(t1), .Ls_exc)
jr ra
@@ -343,7 +360,7 @@
* Set match = (src and dst have same alignment)
*/
#define match rem
-EXC( LDFIRST t3, FIRST(0)(src), .Ll_exc)
+EXC( LDFIRST t3, FIRST(0)(src), .Ll_exc)
ADD t2, zero, NBYTES
EXC( LDREST t3, REST(0)(src), .Ll_exc_copy)
SUB t2, t2, t1 # t2 = number of bytes copied
@@ -357,10 +374,10 @@
ADD src, src, t2
.Lsrc_unaligned_dst_aligned:
- SRL t0, len, LOG_NBYTES+2 # +2 for 4 units/iter
+ SRL t0, len, LOG_NBYTES+2 # +2 for 4 units/iter
PREF( 0, 3*32(src) )
beqz t0, .Lcleanup_src_unaligned
- and rem, len, (4*NBYTES-1) # rem = len % 4*NBYTES
+ and rem, len, (4*NBYTES-1) # rem = len % 4*NBYTES
PREF( 1, 3*32(dst) )
1:
/*
@@ -370,13 +387,13 @@
* are to the same unit (unless src is aligned, but it's not).
*/
R10KCBARRIER(0(ra))
-EXC( LDFIRST t0, FIRST(0)(src), .Ll_exc)
-EXC( LDFIRST t1, FIRST(1)(src), .Ll_exc_copy)
- SUB len, len, 4*NBYTES
+EXC( LDFIRST t0, FIRST(0)(src), .Ll_exc)
+EXC( LDFIRST t1, FIRST(1)(src), .Ll_exc_copy)
+ SUB len, len, 4*NBYTES
EXC( LDREST t0, REST(0)(src), .Ll_exc_copy)
EXC( LDREST t1, REST(1)(src), .Ll_exc_copy)
-EXC( LDFIRST t2, FIRST(2)(src), .Ll_exc_copy)
-EXC( LDFIRST t3, FIRST(3)(src), .Ll_exc_copy)
+EXC( LDFIRST t2, FIRST(2)(src), .Ll_exc_copy)
+EXC( LDFIRST t3, FIRST(3)(src), .Ll_exc_copy)
EXC( LDREST t2, REST(2)(src), .Ll_exc_copy)
EXC( LDREST t3, REST(3)(src), .Ll_exc_copy)
PREF( 0, 9*32(src) ) # 0 is PREF_LOAD (not streamed)
@@ -388,7 +405,7 @@
EXC( STORE t1, UNIT(1)(dst), .Ls_exc_p3u)
EXC( STORE t2, UNIT(2)(dst), .Ls_exc_p2u)
EXC( STORE t3, UNIT(3)(dst), .Ls_exc_p1u)
- PREF( 1, 9*32(dst) ) # 1 is PREF_STORE (not streamed)
+ PREF( 1, 9*32(dst) ) # 1 is PREF_STORE (not streamed)
.set reorder /* DADDI_WAR */
ADD dst, dst, 4*NBYTES
bne len, rem, 1b
@@ -410,6 +427,7 @@
ADD dst, dst, NBYTES
bne len, rem, 1b
.set noreorder
+#endif /* !CONFIG_CPU_MIPSR6 */
.Lcopy_bytes_checklen:
beqz len, .Ldone
@@ -438,6 +456,23 @@
.Ldone:
jr ra
nop
+
+#ifdef CONFIG_CPU_MIPSR6
+.Lcopy_unaligned_bytes:
+1:
+ COPY_BYTE(0)
+ COPY_BYTE(1)
+ COPY_BYTE(2)
+ COPY_BYTE(3)
+ COPY_BYTE(4)
+ COPY_BYTE(5)
+ COPY_BYTE(6)
+ COPY_BYTE(7)
+ ADD src, src, 8
+ b 1b
+ ADD dst, dst, 8
+#endif /* CONFIG_CPU_MIPSR6 */
+
END(memcpy)
.Ll_exc_copy:
@@ -451,9 +486,9 @@
*
* Assumes src < THREAD_BUADDR($28)
*/
- LOAD t0, TI_TASK($28)
+ LOADK t0, TI_TASK($28)
nop
- LOAD t0, THREAD_BUADDR(t0)
+ LOADK t0, THREAD_BUADDR(t0)
1:
EXC( lb t1, 0(src), .Ll_exc)
ADD src, src, 1
@@ -463,12 +498,12 @@
bne src, t0, 1b
.set noreorder
.Ll_exc:
- LOAD t0, TI_TASK($28)
+ LOADK t0, TI_TASK($28)
nop
- LOAD t0, THREAD_BUADDR(t0) # t0 is just past last good address
+ LOADK t0, THREAD_BUADDR(t0) # t0 is just past last good address
nop
- SUB len, AT, t0 # len number of uncopied bytes
bnez t6, .Ldone /* Skip the zeroing part if inatomic */
+ SUB len, AT, t0 # len number of uncopied bytes
/*
* Here's where we rely on src and dst being incremented in tandem,
* See (3) above.
@@ -502,7 +537,7 @@
#define SEXC(n) \
- .set reorder; /* DADDI_WAR */ \
+ .set reorder; /* DADDI_WAR */ \
.Ls_exc_p ## n ## u: \
ADD len, len, n*NBYTES; \
jr ra; \
@@ -575,3 +610,940 @@
jr ra
move a2, zero
END(__rmemcpy)
+
+#ifdef CONFIG_EVA
+
+ .set eva
+
+LEAF(__copy_fromuser_inatomic)
+ b __copy_fromuser_common
+ li t6, 1
+ END(__copy_fromuser_inatomic)
+
+#undef LOAD
+#undef LOADL
+#undef LOADR
+#undef STORE
+#undef STOREL
+#undef STORER
+#undef LDFIRST
+#undef LDREST
+#undef STFIRST
+#undef STREST
+#undef SHIFT_DISCARD
+#undef COPY_BYTE
+#undef SEXC
+
+#define LOAD lwe
+#define LOADL lwle
+#define LOADR lwre
+#define STOREL swl
+#define STORER swr
+#define STORE sw
+
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+#define LDFIRST LOADR
+#define LDREST LOADL
+#define STFIRST STORER
+#define STREST STOREL
+#define SHIFT_DISCARD SLLV
+#else
+#define LDFIRST LOADL
+#define LDREST LOADR
+#define STFIRST STOREL
+#define STREST STORER
+#define SHIFT_DISCARD SRLV
+#endif
+
+LEAF(__copy_fromuser)
+ li t6, 0 /* not inatomic */
+__copy_fromuser_common:
+ /*
+ * Note: dst & src may be unaligned, len may be 0
+ * Temps
+ */
+
+ R10KCBARRIER(0(ra))
+ /*
+ * The "issue break"s below are very approximate.
+ * Issue delays for dcache fills will perturb the schedule, as will
+ * load queue full replay traps, etc.
+ *
+ * If len < NBYTES use byte operations.
+ */
+ PREFE( 0, 0(src) )
+ PREF( 1, 0(dst) )
+ sltu t2, len, NBYTES
+ and t1, dst, ADDRMASK
+ PREFE( 0, 1*32(src) )
+ PREF( 1, 1*32(dst) )
+ bnez t2, .LFcopy_bytes_checklen
+ and t0, src, ADDRMASK
+ PREFE( 0, 2*32(src) )
+ PREF( 1, 2*32(dst) )
+ bnez t1, .LFdst_unaligned
+ nop
+ bnez t0, .LFsrc_unaligned_dst_aligned
+ /*
+ * use delay slot for fall-through
+ * src and dst are aligned; need to compute rem
+ */
+.LFboth_aligned:
+ SRL t0, len, LOG_NBYTES+3 # +3 for 8 units/iter
+ beqz t0, .LFcleanup_both_aligned # len < 8*NBYTES
+ and rem, len, (8*NBYTES-1) # rem = len % (8*NBYTES)
+ PREFE( 0, 3*32(src) )
+ PREF( 1, 3*32(dst) )
+ .align 4
+1:
+ R10KCBARRIER(0(ra))
+EXC( LOAD t0, UNIT(0)(src), .LFl_exc)
+EXC( LOAD t1, UNIT(1)(src), .LFl_exc_copy)
+EXC( LOAD t2, UNIT(2)(src), .LFl_exc_copy)
+EXC( LOAD t3, UNIT(3)(src), .LFl_exc_copy)
+ SUB len, len, 8*NBYTES
+EXC( LOAD t4, UNIT(4)(src), .LFl_exc_copy)
+EXC( LOAD t7, UNIT(5)(src), .LFl_exc_copy)
+ STORE t0, UNIT(0)(dst)
+ STORE t1, UNIT(1)(dst)
+EXC( LOAD t0, UNIT(6)(src), .LFl_exc_copy)
+EXC( LOAD t1, UNIT(7)(src), .LFl_exc_copy)
+ ADD src, src, 8*NBYTES
+ ADD dst, dst, 8*NBYTES
+ STORE t2, UNIT(-6)(dst)
+ STORE t3, UNIT(-5)(dst)
+ STORE t4, UNIT(-4)(dst)
+ STORE t7, UNIT(-3)(dst)
+ STORE t0, UNIT(-2)(dst)
+ STORE t1, UNIT(-1)(dst)
+ PREFE( 0, 8*32(src) )
+ PREF( 1, 8*32(dst) )
+ bne len, rem, 1b
+ nop
+
+ /*
+ * len == rem == the number of bytes left to copy < 8*NBYTES
+ */
+.LFcleanup_both_aligned:
+ beqz len, .Ldone
+ sltu t0, len, 4*NBYTES
+ bnez t0, .LFless_than_4units
+ and rem, len, (NBYTES-1) # rem = len % NBYTES
+ /*
+ * len >= 4*NBYTES
+ */
+EXC( LOAD t0, UNIT(0)(src), .LFl_exc)
+EXC( LOAD t1, UNIT(1)(src), .LFl_exc_copy)
+EXC( LOAD t2, UNIT(2)(src), .LFl_exc_copy)
+EXC( LOAD t3, UNIT(3)(src), .LFl_exc_copy)
+ SUB len, len, 4*NBYTES
+ ADD src, src, 4*NBYTES
+ R10KCBARRIER(0(ra))
+ STORE t0, UNIT(0)(dst)
+ STORE t1, UNIT(1)(dst)
+ STORE t2, UNIT(2)(dst)
+ STORE t3, UNIT(3)(dst)
+ .set reorder /* DADDI_WAR */
+ ADD dst, dst, 4*NBYTES
+ beqz len, .Ldone
+ .set noreorder
+.LFless_than_4units:
+ /*
+ * rem = len % NBYTES
+ */
+ beq rem, len, .LFcopy_bytes
+ nop
+1:
+ R10KCBARRIER(0(ra))
+EXC( LOAD t0, 0(src), .LFl_exc)
+ ADD src, src, NBYTES
+ SUB len, len, NBYTES
+ STORE t0, 0(dst)
+ .set reorder /* DADDI_WAR */
+ ADD dst, dst, NBYTES
+ bne rem, len, 1b
+ .set noreorder
+
+ /*
+ * src and dst are aligned, need to copy rem bytes (rem < NBYTES)
+ * A loop would do only a byte at a time with possible branch
+ * mispredicts. Can't do an explicit LOAD dst,mask,or,STORE
+ * because can't assume read-access to dst. Instead, use
+ * STREST dst, which doesn't require read access to dst.
+ *
+ * This code should perform better than a simple loop on modern,
+ * wide-issue mips processors because the code has fewer branches and
+ * more instruction-level parallelism.
+ */
+ beqz len, .Ldone
+ ADD t1, dst, len # t1 is just past last byte of dst
+ li bits, 8*NBYTES
+ SLL rem, len, 3 # rem = number of bits to keep
+EXC( LOAD t0, 0(src), .LFl_exc)
+ SUB bits, bits, rem # bits = number of bits to discard
+ SHIFT_DISCARD t0, t0, bits
+ STREST t0, -1(t1)
+ jr ra
+ move len, zero
+.LFdst_unaligned:
+ /*
+ * dst is unaligned
+ * t0 = src & ADDRMASK
+ * t1 = dst & ADDRMASK; T1 > 0
+ * len >= NBYTES
+ *
+ * Copy enough bytes to align dst
+ * Set match = (src and dst have same alignment)
+ */
+#define match rem
+EXC( LDFIRST t3, FIRST(0)(src), .LFl_exc)
+ ADD t2, zero, NBYTES
+EXC( LDREST t3, REST(0)(src), .LFl_exc_copy)
+ SUB t2, t2, t1 # t2 = number of bytes copied
+ xor match, t0, t1
+ R10KCBARRIER(0(ra))
+ STFIRST t3, FIRST(0)(dst)
+ beq len, t2, .Ldone
+ SUB len, len, t2
+ ADD dst, dst, t2
+ beqz match, .LFboth_aligned
+ ADD src, src, t2
+
+.LFsrc_unaligned_dst_aligned:
+ SRL t0, len, LOG_NBYTES+2 # +2 for 4 units/iter
+ PREFE( 0, 3*32(src) )
+ beqz t0, .LFcleanup_src_unaligned
+ and rem, len, (4*NBYTES-1) # rem = len % 4*NBYTES
+ PREF( 1, 3*32(dst) )
+1:
+/*
+ * Avoid consecutive LD*'s to the same register since some mips
+ * implementations can't issue them in the same cycle.
+ * It's OK to load FIRST(N+1) before REST(N) because the two addresses
+ * are to the same unit (unless src is aligned, but it's not).
+ */
+ R10KCBARRIER(0(ra))
+EXC( LDFIRST t0, FIRST(0)(src), .LFl_exc)
+EXC( LDFIRST t1, FIRST(1)(src), .LFl_exc_copy)
+ SUB len, len, 4*NBYTES
+EXC( LDREST t0, REST(0)(src), .LFl_exc_copy)
+EXC( LDREST t1, REST(1)(src), .LFl_exc_copy)
+EXC( LDFIRST t2, FIRST(2)(src), .LFl_exc_copy)
+EXC( LDFIRST t3, FIRST(3)(src), .LFl_exc_copy)
+EXC( LDREST t2, REST(2)(src), .LFl_exc_copy)
+EXC( LDREST t3, REST(3)(src), .LFl_exc_copy)
+ PREFE( 0, 9*32(src) ) # 0 is PREF_LOAD (not streamed)
+ ADD src, src, 4*NBYTES
+#ifdef CONFIG_CPU_SB1
+ nop # improves slotting
+#endif
+ STORE t0, UNIT(0)(dst)
+ STORE t1, UNIT(1)(dst)
+ STORE t2, UNIT(2)(dst)
+ STORE t3, UNIT(3)(dst)
+ PREF( 1, 9*32(dst) ) # 1 is PREF_STORE (not streamed)
+ .set reorder /* DADDI_WAR */
+ ADD dst, dst, 4*NBYTES
+ bne len, rem, 1b
+ .set noreorder
+
+.LFcleanup_src_unaligned:
+ beqz len, .Ldone
+ and rem, len, NBYTES-1 # rem = len % NBYTES
+ beq rem, len, .LFcopy_bytes
+ nop
+1:
+ R10KCBARRIER(0(ra))
+EXC( LDFIRST t0, FIRST(0)(src), .LFl_exc)
+EXC( LDREST t0, REST(0)(src), .LFl_exc_copy)
+ ADD src, src, NBYTES
+ SUB len, len, NBYTES
+ STORE t0, 0(dst)
+ .set reorder /* DADDI_WAR */
+ ADD dst, dst, NBYTES
+ bne len, rem, 1b
+ .set noreorder
+
+.LFcopy_bytes_checklen:
+ beqz len, .Ldone
+ nop
+.LFcopy_bytes:
+ /* 0 < len < NBYTES */
+ R10KCBARRIER(0(ra))
+#define COPY_BYTE(N) \
+EXC( lbe t0, N(src), .LFl_exc); \
+ SUB len, len, 1; \
+ beqz len, .Ldone; \
+ sb t0, N(dst)
+
+ COPY_BYTE(0)
+ COPY_BYTE(1)
+#ifdef USE_DOUBLE
+ COPY_BYTE(2)
+ COPY_BYTE(3)
+ COPY_BYTE(4)
+ COPY_BYTE(5)
+#endif
+EXC( lbe t0, NBYTES-2(src), .LFl_exc)
+ SUB len, len, 1
+ jr ra
+ sb t0, NBYTES-2(dst)
+
+.LFl_exc_copy:
+ /*
+ * Copy bytes from src until faulting load address (or until a
+ * lb faults)
+ *
+ * When reached by a faulting LDFIRST/LDREST, THREAD_BUADDR($28)
+ * may be more than a byte beyond the last address.
+ * Hence, the lb below may get an exception.
+ *
+ * Assumes src < THREAD_BUADDR($28)
+ */
+ LOADK t0, TI_TASK($28)
+ addi t0, t0, THREAD_BUADDR
+ LOADK t0, 0(t0)
+1:
+EXC( lbe t1, 0(src), .LFl_exc)
+ ADD src, src, 1
+ sb t1, 0(dst) # can't fault -- we're copy_from_user
+ .set reorder /* DADDI_WAR */
+ ADD dst, dst, 1
+ bne src, t0, 1b
+ .set noreorder
+.LFl_exc:
+ LOADK t0, TI_TASK($28)
+ addi t0, t0, THREAD_BUADDR
+ LOADK t0, 0(t0) # t0 is just past last good address
+ bnez t6, .Ldone /* Skip the zeroing part if inatomic */
+ SUB len, AT, t0 # len number of uncopied bytes
+ /*
+ * Here's where we rely on src and dst being incremented in tandem,
+ * See (3) above.
+ * dst += (fault addr - src) to put dst at first byte to clear
+ */
+ ADD dst, t0 # compute start address in a1
+ SUB dst, src
+ /*
+ * Clear len bytes starting at dst. Can't call __bzero because it
+ * might modify len. An inefficient loop for these rare times...
+ */
+ .set reorder /* DADDI_WAR */
+ SUB src, len, 1
+ beqz len, .Ldone
+ .set noreorder
+1: sb zero, 0(dst)
+ ADD dst, dst, 1
+#ifndef CONFIG_CPU_DADDI_WORKAROUNDS
+ bnez src, 1b
+ SUB src, src, 1
+#else
+ .set push
+ .set noat
+ li v1, 1
+ bnez src, 1b
+ SUB src, src, v1
+ .set pop
+#endif
+ jr ra
+ nop
+ END(__copy_fromuser)
+
+
+#undef LOAD
+#undef LOADL
+#undef LOADR
+#undef STORE
+#undef STOREL
+#undef STORER
+#undef LDFIRST
+#undef LDREST
+#undef STFIRST
+#undef STREST
+#undef SHIFT_DISCARD
+#undef COPY_BYTE
+#undef SEXC
+
+#define LOAD lw
+#define LOADL lwl
+#define LOADR lwr
+#define STOREL swle
+#define STORER swre
+#define STORE swe
+
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+#define LDFIRST LOADR
+#define LDREST LOADL
+#define STFIRST STORER
+#define STREST STOREL
+#define SHIFT_DISCARD SLLV
+#else
+#define LDFIRST LOADL
+#define LDREST LOADR
+#define STFIRST STOREL
+#define STREST STORER
+#define SHIFT_DISCARD SRLV
+#endif
+
+LEAF(__copy_touser)
+ /*
+ * Note: dst & src may be unaligned, len may be 0
+ * Temps
+ */
+
+ R10KCBARRIER(0(ra))
+ /*
+ * The "issue break"s below are very approximate.
+ * Issue delays for dcache fills will perturb the schedule, as will
+ * load queue full replay traps, etc.
+ *
+ * If len < NBYTES use byte operations.
+ */
+ PREF( 0, 0(src) )
+ PREFE( 1, 0(dst) )
+ sltu t2, len, NBYTES
+ and t1, dst, ADDRMASK
+ PREF( 0, 1*32(src) )
+ PREFE( 1, 1*32(dst) )
+ bnez t2, .LTcopy_bytes_checklen
+ and t0, src, ADDRMASK
+ PREF( 0, 2*32(src) )
+ PREFE( 1, 2*32(dst) )
+ bnez t1, .LTdst_unaligned
+ nop
+ bnez t0, .LTsrc_unaligned_dst_aligned
+ /*
+ * use delay slot for fall-through
+ * src and dst are aligned; need to compute rem
+ */
+.LTboth_aligned:
+ SRL t0, len, LOG_NBYTES+3 # +3 for 8 units/iter
+ beqz t0, .LTcleanup_both_aligned # len < 8*NBYTES
+ and rem, len, (8*NBYTES-1) # rem = len % (8*NBYTES)
+ PREF( 0, 3*32(src) )
+ PREFE( 1, 3*32(dst) )
+ .align 4
+1:
+ R10KCBARRIER(0(ra))
+ LOAD t0, UNIT(0)(src)
+ LOAD t1, UNIT(1)(src)
+ LOAD t2, UNIT(2)(src)
+ LOAD t3, UNIT(3)(src)
+ SUB len, len, 8*NBYTES
+ LOAD t4, UNIT(4)(src)
+ LOAD t7, UNIT(5)(src)
+EXC( STORE t0, UNIT(0)(dst), .Ls_exc_p8u)
+EXC( STORE t1, UNIT(1)(dst), .Ls_exc_p7u)
+ LOAD t0, UNIT(6)(src)
+ LOAD t1, UNIT(7)(src)
+ ADD src, src, 8*NBYTES
+ ADD dst, dst, 8*NBYTES
+EXC( STORE t2, UNIT(-6)(dst), .Ls_exc_p6u)
+EXC( STORE t3, UNIT(-5)(dst), .Ls_exc_p5u)
+EXC( STORE t4, UNIT(-4)(dst), .Ls_exc_p4u)
+EXC( STORE t7, UNIT(-3)(dst), .Ls_exc_p3u)
+EXC( STORE t0, UNIT(-2)(dst), .Ls_exc_p2u)
+EXC( STORE t1, UNIT(-1)(dst), .Ls_exc_p1u)
+ PREF( 0, 8*32(src) )
+ PREFE( 1, 8*32(dst) )
+ bne len, rem, 1b
+ nop
+
+ /*
+ * len == rem == the number of bytes left to copy < 8*NBYTES
+ */
+.LTcleanup_both_aligned:
+ beqz len, .Ldone
+ sltu t0, len, 4*NBYTES
+ bnez t0, .LTless_than_4units
+ and rem, len, (NBYTES-1) # rem = len % NBYTES
+ /*
+ * len >= 4*NBYTES
+ */
+ LOAD t0, UNIT(0)(src)
+ LOAD t1, UNIT(1)(src)
+ LOAD t2, UNIT(2)(src)
+ LOAD t3, UNIT(3)(src)
+ SUB len, len, 4*NBYTES
+ ADD src, src, 4*NBYTES
+ R10KCBARRIER(0(ra))
+EXC( STORE t0, UNIT(0)(dst), .Ls_exc_p4u)
+EXC( STORE t1, UNIT(1)(dst), .Ls_exc_p3u)
+EXC( STORE t2, UNIT(2)(dst), .Ls_exc_p2u)
+EXC( STORE t3, UNIT(3)(dst), .Ls_exc_p1u)
+ .set reorder /* DADDI_WAR */
+ ADD dst, dst, 4*NBYTES
+ beqz len, .Ldone
+ .set noreorder
+.LTless_than_4units:
+ /*
+ * rem = len % NBYTES
+ */
+ beq rem, len, .LTcopy_bytes
+ nop
+1:
+ R10KCBARRIER(0(ra))
+ LOAD t0, 0(src)
+ ADD src, src, NBYTES
+ SUB len, len, NBYTES
+EXC( STORE t0, 0(dst), .Ls_exc_p1u)
+ .set reorder /* DADDI_WAR */
+ ADD dst, dst, NBYTES
+ bne rem, len, 1b
+ .set noreorder
+
+ /*
+ * src and dst are aligned, need to copy rem bytes (rem < NBYTES)
+ * A loop would do only a byte at a time with possible branch
+ * mispredicts. Can't do an explicit LOAD dst,mask,or,STORE
+ * because can't assume read-access to dst. Instead, use
+ * STREST dst, which doesn't require read access to dst.
+ *
+ * This code should perform better than a simple loop on modern,
+ * wide-issue mips processors because the code has fewer branches and
+ * more instruction-level parallelism.
+ */
+ beqz len, .Ldone
+ ADD t1, dst, len # t1 is just past last byte of dst
+ li bits, 8*NBYTES
+ SLL rem, len, 3 # rem = number of bits to keep
+ LOAD t0, 0(src)
+ SUB bits, bits, rem # bits = number of bits to discard
+ SHIFT_DISCARD t0, t0, bits
+EXC( STREST t0, -1(t1), .Ls_exc)
+ jr ra
+ move len, zero
+.LTdst_unaligned:
+ /*
+ * dst is unaligned
+ * t0 = src & ADDRMASK
+ * t1 = dst & ADDRMASK; T1 > 0
+ * len >= NBYTES
+ *
+ * Copy enough bytes to align dst
+ * Set match = (src and dst have same alignment)
+ */
+ LDFIRST t3, FIRST(0)(src)
+ ADD t2, zero, NBYTES
+ LDREST t3, REST(0)(src)
+ SUB t2, t2, t1 # t2 = number of bytes copied
+ xor match, t0, t1
+ R10KCBARRIER(0(ra))
+EXC( STFIRST t3, FIRST(0)(dst), .Ls_exc)
+ beq len, t2, .Ldone
+ SUB len, len, t2
+ ADD dst, dst, t2
+ beqz match, .LTboth_aligned
+ ADD src, src, t2
+
+.LTsrc_unaligned_dst_aligned:
+ SRL t0, len, LOG_NBYTES+2 # +2 for 4 units/iter
+ PREF( 0, 3*32(src) )
+ beqz t0, .LTcleanup_src_unaligned
+ and rem, len, (4*NBYTES-1) # rem = len % 4*NBYTES
+ PREFE( 1, 3*32(dst) )
+1:
+/*
+ * Avoid consecutive LD*'s to the same register since some mips
+ * implementations can't issue them in the same cycle.
+ * It's OK to load FIRST(N+1) before REST(N) because the two addresses
+ * are to the same unit (unless src is aligned, but it's not).
+ */
+ R10KCBARRIER(0(ra))
+ LDFIRST t0, FIRST(0)(src)
+ LDFIRST t1, FIRST(1)(src)
+ SUB len, len, 4*NBYTES
+ LDREST t0, REST(0)(src)
+ LDREST t1, REST(1)(src)
+ LDFIRST t2, FIRST(2)(src)
+ LDFIRST t3, FIRST(3)(src)
+ LDREST t2, REST(2)(src)
+ LDREST t3, REST(3)(src)
+ PREF( 0, 9*32(src) ) # 0 is PREF_LOAD (not streamed)
+ ADD src, src, 4*NBYTES
+#ifdef CONFIG_CPU_SB1
+ nop # improves slotting
+#endif
+EXC( STORE t0, UNIT(0)(dst), .Ls_exc_p4u)
+EXC( STORE t1, UNIT(1)(dst), .Ls_exc_p3u)
+EXC( STORE t2, UNIT(2)(dst), .Ls_exc_p2u)
+EXC( STORE t3, UNIT(3)(dst), .Ls_exc_p1u)
+ PREFE( 1, 9*32(dst) ) # 1 is PREF_STORE (not streamed)
+ .set reorder /* DADDI_WAR */
+ ADD dst, dst, 4*NBYTES
+ bne len, rem, 1b
+ .set noreorder
+
+.LTcleanup_src_unaligned:
+ beqz len, .Ldone
+ and rem, len, NBYTES-1 # rem = len % NBYTES
+ beq rem, len, .LTcopy_bytes
+ nop
+1:
+ R10KCBARRIER(0(ra))
+ LDFIRST t0, FIRST(0)(src)
+ LDREST t0, REST(0)(src)
+ ADD src, src, NBYTES
+ SUB len, len, NBYTES
+EXC( STORE t0, 0(dst), .Ls_exc_p1u)
+ .set reorder /* DADDI_WAR */
+ ADD dst, dst, NBYTES
+ bne len, rem, 1b
+ .set noreorder
+
+.LTcopy_bytes_checklen:
+ beqz len, .Ldone
+ nop
+.LTcopy_bytes:
+ /* 0 < len < NBYTES */
+ R10KCBARRIER(0(ra))
+#define COPY_BYTE(N) \
+ lb t0, N(src); \
+ SUB len, len, 1; \
+ beqz len, .Ldone; \
+EXC( sbe t0, N(dst), .Ls_exc_p1)
+
+ COPY_BYTE(0)
+ COPY_BYTE(1)
+#ifdef USE_DOUBLE
+ COPY_BYTE(2)
+ COPY_BYTE(3)
+ COPY_BYTE(4)
+ COPY_BYTE(5)
+#endif
+ lb t0, NBYTES-2(src)
+ SUB len, len, 1
+ jr ra
+EXC( sbe t0, NBYTES-2(dst), .Ls_exc_p1)
+ END(__copy_touser)
+
+
+#undef LOAD
+#undef LOADL
+#undef LOADR
+#undef STORE
+#undef STOREL
+#undef STORER
+#undef LDFIRST
+#undef LDREST
+#undef STFIRST
+#undef STREST
+#undef SHIFT_DISCARD
+#undef COPY_BYTE
+#undef SEXC
+
+#define LOAD lwe
+#define LOADL lwle
+#define LOADR lwre
+#define STOREL swle
+#define STORER swre
+#define STORE swe
+
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+#define LDFIRST LOADR
+#define LDREST LOADL
+#define STFIRST STORER
+#define STREST STOREL
+#define SHIFT_DISCARD SLLV
+#else
+#define LDFIRST LOADL
+#define LDREST LOADR
+#define STFIRST STOREL
+#define STREST STORER
+#define SHIFT_DISCARD SRLV
+#endif
+
+
+LEAF(__copy_inuser)
+ /*
+ * Note: dst & src may be unaligned, len may be 0
+ * Temps
+ */
+
+ R10KCBARRIER(0(ra))
+ /*
+ * The "issue break"s below are very approximate.
+ * Issue delays for dcache fills will perturb the schedule, as will
+ * load queue full replay traps, etc.
+ *
+ * If len < NBYTES use byte operations.
+ */
+ PREFE( 0, 0(src) )
+ PREFE( 1, 0(dst) )
+ sltu t2, len, NBYTES
+ and t1, dst, ADDRMASK
+ PREFE( 0, 1*32(src) )
+ PREFE( 1, 1*32(dst) )
+ bnez t2, .LIcopy_bytes_checklen
+ and t0, src, ADDRMASK
+ PREFE( 0, 2*32(src) )
+ PREFE( 1, 2*32(dst) )
+ bnez t1, .LIdst_unaligned
+ nop
+ bnez t0, .LIsrc_unaligned_dst_aligned
+ /*
+ * use delay slot for fall-through
+ * src and dst are aligned; need to compute rem
+ */
+.LIboth_aligned:
+ SRL t0, len, LOG_NBYTES+3 # +3 for 8 units/iter
+ beqz t0, .LIcleanup_both_aligned # len < 8*NBYTES
+ and rem, len, (8*NBYTES-1) # rem = len % (8*NBYTES)
+ PREFE( 0, 3*32(src) )
+ PREFE( 1, 3*32(dst) )
+ .align 4
+1:
+ R10KCBARRIER(0(ra))
+EXC( LOAD t0, UNIT(0)(src), .LIl_exc)
+EXC( LOAD t1, UNIT(1)(src), .LIl_exc_copy)
+EXC( LOAD t2, UNIT(2)(src), .LIl_exc_copy)
+EXC( LOAD t3, UNIT(3)(src), .LIl_exc_copy)
+ SUB len, len, 8*NBYTES
+EXC( LOAD t4, UNIT(4)(src), .LIl_exc_copy)
+EXC( LOAD t7, UNIT(5)(src), .LIl_exc_copy)
+EXC( STORE t0, UNIT(0)(dst), .Ls_exc_p8u)
+EXC( STORE t1, UNIT(1)(dst), .Ls_exc_p7u)
+EXC( LOAD t0, UNIT(6)(src), .LIl_exc_copy)
+EXC( LOAD t1, UNIT(7)(src), .LIl_exc_copy)
+ ADD src, src, 8*NBYTES
+ ADD dst, dst, 8*NBYTES
+EXC( STORE t2, UNIT(-6)(dst), .Ls_exc_p6u)
+EXC( STORE t3, UNIT(-5)(dst), .Ls_exc_p5u)
+EXC( STORE t4, UNIT(-4)(dst), .Ls_exc_p4u)
+EXC( STORE t7, UNIT(-3)(dst), .Ls_exc_p3u)
+EXC( STORE t0, UNIT(-2)(dst), .Ls_exc_p2u)
+EXC( STORE t1, UNIT(-1)(dst), .Ls_exc_p1u)
+ PREFE( 0, 8*32(src) )
+ PREFE( 1, 8*32(dst) )
+ bne len, rem, 1b
+ nop
+
+ /*
+ * len == rem == the number of bytes left to copy < 8*NBYTES
+ */
+.LIcleanup_both_aligned:
+ beqz len, .Ldone
+ sltu t0, len, 4*NBYTES
+ bnez t0, .LIless_than_4units
+ and rem, len, (NBYTES-1) # rem = len % NBYTES
+ /*
+ * len >= 4*NBYTES
+ */
+EXC( LOAD t0, UNIT(0)(src), .LIl_exc)
+EXC( LOAD t1, UNIT(1)(src), .LIl_exc_copy)
+EXC( LOAD t2, UNIT(2)(src), .LIl_exc_copy)
+EXC( LOAD t3, UNIT(3)(src), .LIl_exc_copy)
+ SUB len, len, 4*NBYTES
+ ADD src, src, 4*NBYTES
+ R10KCBARRIER(0(ra))
+EXC( STORE t0, UNIT(0)(dst), .Ls_exc_p4u)
+EXC( STORE t1, UNIT(1)(dst), .Ls_exc_p3u)
+EXC( STORE t2, UNIT(2)(dst), .Ls_exc_p2u)
+EXC( STORE t3, UNIT(3)(dst), .Ls_exc_p1u)
+ .set reorder /* DADDI_WAR */
+ ADD dst, dst, 4*NBYTES
+ beqz len, .Ldone
+ .set noreorder
+.LIless_than_4units:
+ /*
+ * rem = len % NBYTES
+ */
+ beq rem, len, .LIcopy_bytes
+ nop
+1:
+ R10KCBARRIER(0(ra))
+EXC( LOAD t0, 0(src), .LIl_exc)
+ ADD src, src, NBYTES
+ SUB len, len, NBYTES
+EXC( STORE t0, 0(dst), .Ls_exc_p1u)
+ .set reorder /* DADDI_WAR */
+ ADD dst, dst, NBYTES
+ bne rem, len, 1b
+ .set noreorder
+
+ /*
+ * src and dst are aligned, need to copy rem bytes (rem < NBYTES)
+ * A loop would do only a byte at a time with possible branch
+ * mispredicts. Can't do an explicit LOAD dst,mask,or,STORE
+ * because can't assume read-access to dst. Instead, use
+ * STREST dst, which doesn't require read access to dst.
+ *
+ * This code should perform better than a simple loop on modern,
+ * wide-issue mips processors because the code has fewer branches and
+ * more instruction-level parallelism.
+ */
+ beqz len, .Ldone
+ ADD t1, dst, len # t1 is just past last byte of dst
+ li bits, 8*NBYTES
+ SLL rem, len, 3 # rem = number of bits to keep
+EXC( LOAD t0, 0(src), .LIl_exc)
+ SUB bits, bits, rem # bits = number of bits to discard
+ SHIFT_DISCARD t0, t0, bits
+EXC( STREST t0, -1(t1), .Ls_exc)
+ jr ra
+ move len, zero
+.LIdst_unaligned:
+ /*
+ * dst is unaligned
+ * t0 = src & ADDRMASK
+ * t1 = dst & ADDRMASK; T1 > 0
+ * len >= NBYTES
+ *
+ * Copy enough bytes to align dst
+ * Set match = (src and dst have same alignment)
+ */
+EXC( LDFIRST t3, FIRST(0)(src), .LIl_exc)
+ ADD t2, zero, NBYTES
+EXC( LDREST t3, REST(0)(src), .LIl_exc_copy)
+ SUB t2, t2, t1 # t2 = number of bytes copied
+ xor match, t0, t1
+ R10KCBARRIER(0(ra))
+EXC( STFIRST t3, FIRST(0)(dst), .Ls_exc)
+ beq len, t2, .Ldone
+ SUB len, len, t2
+ ADD dst, dst, t2
+ beqz match, .LIboth_aligned
+ ADD src, src, t2
+
+.LIsrc_unaligned_dst_aligned:
+ SRL t0, len, LOG_NBYTES+2 # +2 for 4 units/iter
+ PREFE( 0, 3*32(src) )
+ beqz t0, .LIcleanup_src_unaligned
+ and rem, len, (4*NBYTES-1) # rem = len % 4*NBYTES
+ PREFE( 1, 3*32(dst) )
+1:
+/*
+ * Avoid consecutive LD*'s to the same register since some mips
+ * implementations can't issue them in the same cycle.
+ * It's OK to load FIRST(N+1) before REST(N) because the two addresses
+ * are to the same unit (unless src is aligned, but it's not).
+ */
+ R10KCBARRIER(0(ra))
+EXC( LDFIRST t0, FIRST(0)(src), .LIl_exc)
+EXC( LDFIRST t1, FIRST(1)(src), .LIl_exc_copy)
+ SUB len, len, 4*NBYTES
+EXC( LDREST t0, REST(0)(src), .LIl_exc_copy)
+EXC( LDREST t1, REST(1)(src), .LIl_exc_copy)
+EXC( LDFIRST t2, FIRST(2)(src), .LIl_exc_copy)
+EXC( LDFIRST t3, FIRST(3)(src), .LIl_exc_copy)
+EXC( LDREST t2, REST(2)(src), .LIl_exc_copy)
+EXC( LDREST t3, REST(3)(src), .LIl_exc_copy)
+ PREFE( 0, 9*32(src) ) # 0 is PREF_LOAD (not streamed)
+ ADD src, src, 4*NBYTES
+#ifdef CONFIG_CPU_SB1
+ nop # improves slotting
+#endif
+EXC( STORE t0, UNIT(0)(dst), .Ls_exc_p4u)
+EXC( STORE t1, UNIT(1)(dst), .Ls_exc_p3u)
+EXC( STORE t2, UNIT(2)(dst), .Ls_exc_p2u)
+EXC( STORE t3, UNIT(3)(dst), .Ls_exc_p1u)
+ PREFE( 1, 9*32(dst) ) # 1 is PREF_STORE (not streamed)
+ .set reorder /* DADDI_WAR */
+ ADD dst, dst, 4*NBYTES
+ bne len, rem, 1b
+ .set noreorder
+
+.LIcleanup_src_unaligned:
+ beqz len, .Ldone
+ and rem, len, NBYTES-1 # rem = len % NBYTES
+ beq rem, len, .LIcopy_bytes
+ nop
+1:
+ R10KCBARRIER(0(ra))
+EXC( LDFIRST t0, FIRST(0)(src), .LIl_exc)
+EXC( LDREST t0, REST(0)(src), .LIl_exc_copy)
+ ADD src, src, NBYTES
+ SUB len, len, NBYTES
+EXC( STORE t0, 0(dst), .Ls_exc_p1u)
+ .set reorder /* DADDI_WAR */
+ ADD dst, dst, NBYTES
+ bne len, rem, 1b
+ .set noreorder
+
+.LIcopy_bytes_checklen:
+ beqz len, .Ldone
+ nop
+.LIcopy_bytes:
+ /* 0 < len < NBYTES */
+ R10KCBARRIER(0(ra))
+#define COPY_BYTE(N) \
+EXC( lbe t0, N(src), .LIl_exc); \
+ SUB len, len, 1; \
+ beqz len, .Ldone; \
+EXC( sbe t0, N(dst), .Ls_exc_p1)
+
+ COPY_BYTE(0)
+ COPY_BYTE(1)
+#ifdef USE_DOUBLE
+ COPY_BYTE(2)
+ COPY_BYTE(3)
+ COPY_BYTE(4)
+ COPY_BYTE(5)
+#endif
+EXC( lbe t0, NBYTES-2(src), .LIl_exc)
+ SUB len, len, 1
+ jr ra
+EXC( sbe t0, NBYTES-2(dst), .Ls_exc_p1)
+
+.LIl_exc_copy:
+ /*
+ * Copy bytes from src until faulting load address (or until a
+ * lb faults)
+ *
+ * When reached by a faulting LDFIRST/LDREST, THREAD_BUADDR($28)
+ * may be more than a byte beyond the last address.
+ * Hence, the lb below may get an exception.
+ *
+ * Assumes src < THREAD_BUADDR($28)
+ */
+ LOADK t0, TI_TASK($28)
+ addi t0, t0, THREAD_BUADDR
+ LOADK t0, 0(t0)
+1:
+EXC( lbe t1, 0(src), .LIl_exc)
+ ADD src, src, 1
+EXC( sbe t1, 0(dst), .Ls_exc)
+ .set reorder /* DADDI_WAR */
+ ADD dst, dst, 1
+ SUB len, len, 1 # need this because of sbe above
+ bne src, t0, 1b
+ .set noreorder
+.LIl_exc:
+ LOADK t0, TI_TASK($28)
+ addi t0, t0, THREAD_BUADDR
+ LOADK t0, 0(t0) # t0 is just past last good address
+ SUB len, AT, t0 # len number of uncopied bytes
+ /*
+ * Here's where we rely on src and dst being incremented in tandem,
+ * See (3) above.
+ * dst += (fault addr - src) to put dst at first byte to clear
+ */
+ ADD dst, t0 # compute start address in a1
+ SUB dst, src
+ /*
+ * Clear len bytes starting at dst. Can't call __bzero because it
+ * might modify len. An inefficient loop for these rare times...
+ */
+ .set reorder /* DADDI_WAR */
+ SUB src, len, 1
+ beqz len, .Ldone
+ .set noreorder
+1:
+EXC( sbe zero, 0(dst), .Ls_exc)
+ ADD dst, dst, 1
+#ifndef CONFIG_CPU_DADDI_WORKAROUNDS
+ bnez src, 1b
+ SUB src, src, 1
+#else
+ .set push
+ .set noat
+ li v1, 1
+ bnez src, 1b
+ SUB src, src, v1
+ .set pop
+#endif
+ jr ra
+ nop
+ END(__copy_inuser)
+
+#endif
diff --git a/arch/mips/lib/memset.S b/arch/mips/lib/memset.S
index 0580194..274fb38 100644
--- a/arch/mips/lib/memset.S
+++ b/arch/mips/lib/memset.S
@@ -40,6 +40,12 @@
PTR 9b, handler; \
.previous
+#define EXE(insn,handler) \
+9: .word insn; \
+ .section __ex_table,"a"; \
+ PTR 9b, handler; \
+ .previous
+
.macro f_fill64 dst, offset, val, fixup
EX(LONG_S, \val, (\offset + 0 * STORSIZE)(\dst), \fixup)
EX(LONG_S, \val, (\offset + 1 * STORSIZE)(\dst), \fixup)
@@ -63,6 +69,26 @@
#endif
.endm
+ .macro f_fill64eva dst, offset, val, fixup
+ .set eva
+ EX(swe, \val, (\offset + 0 * STORSIZE)(\dst), \fixup)
+ EX(swe, \val, (\offset + 1 * STORSIZE)(\dst), \fixup)
+ EX(swe, \val, (\offset + 2 * STORSIZE)(\dst), \fixup)
+ EX(swe, \val, (\offset + 3 * STORSIZE)(\dst), \fixup)
+ EX(swe, \val, (\offset + 4 * STORSIZE)(\dst), \fixup)
+ EX(swe, \val, (\offset + 5 * STORSIZE)(\dst), \fixup)
+ EX(swe, \val, (\offset + 6 * STORSIZE)(\dst), \fixup)
+ EX(swe, \val, (\offset + 7 * STORSIZE)(\dst), \fixup)
+ EX(swe, \val, (\offset + 8 * STORSIZE)(\dst), \fixup)
+ EX(swe, \val, (\offset + 9 * STORSIZE)(\dst), \fixup)
+ EX(swe, \val, (\offset + 10 * STORSIZE)(\dst), \fixup)
+ EX(swe, \val, (\offset + 11 * STORSIZE)(\dst), \fixup)
+ EX(swe, \val, (\offset + 12 * STORSIZE)(\dst), \fixup)
+ EX(swe, \val, (\offset + 13 * STORSIZE)(\dst), \fixup)
+ EX(swe, \val, (\offset + 14 * STORSIZE)(\dst), \fixup)
+ EX(swe, \val, (\offset + 15 * STORSIZE)(\dst), \fixup)
+ .endm
+
/*
* memset(void *s, int c, size_t n)
*
@@ -107,6 +133,8 @@
.set at
#endif
+#ifndef CONFIG_CPU_MIPSR6
+
R10KCBARRIER(0(ra))
#ifdef __MIPSEB__
EX(LONG_S_L, a1, (a0), .Lfirst_fixup) /* make word/dword aligned */
@@ -114,9 +142,37 @@
#ifdef __MIPSEL__
EX(LONG_S_R, a1, (a0), .Lfirst_fixup) /* make word/dword aligned */
#endif
- PTR_SUBU a0, t0 /* long align ptr */
+ PTR_SUBU a0, t0 /* long align ptr */
PTR_ADDU a2, t0 /* correct size */
+#else /* CONFIG_CPU_MIPSR6 */
+
+#define STORE_BYTE(N) \
+EX( sb, a1, N(a0), .Lbyte_fixup); \
+ beqz t0, 0f; \
+ PTR_ADDU t0, 1;
+
+ PTR_ADDU a2, t0 /* correct size */
+
+ PTR_ADDU t0, 1
+ STORE_BYTE(0)
+ STORE_BYTE(1)
+#if LONGSIZE == 4
+ EX( sb, a1, 2(a0), .Lbyte_fixup)
+#else
+ STORE_BYTE(2)
+ STORE_BYTE(3)
+ STORE_BYTE(4)
+ STORE_BYTE(5)
+ EX( sb, a1, 6(a0), .Lbyte_fixup)
+#endif
+0:
+ ori a0, STORMASK
+ xori a0, STORMASK
+ PTR_ADDIU a0, STORSIZE
+
+#endif /* CONFIG_CPU_MIPSR6 */
+
1: ori t1, a2, 0x3f /* # of full blocks */
xori t1, 0x3f
beqz t1, .Lmemset_partial /* no block to fill */
@@ -155,7 +211,8 @@
andi a2, STORMASK /* At most one long to go */
beqz a2, 1f
- PTR_ADDU a0, a2 /* What's left */
+#ifndef CONFIG_CPU_MIPSR6
+ PTR_ADDU a0, a2 /* What's left */
R10KCBARRIER(0(ra))
#ifdef __MIPSEB__
EX(LONG_S_R, a1, -1(a0), .Llast_fixup)
@@ -163,6 +220,22 @@
#ifdef __MIPSEL__
EX(LONG_S_L, a1, -1(a0), .Llast_fixup)
#endif
+#else /* CONFIG_CPU_MIPSR6 */
+ PTR_SUBU t0, $0, a2
+ PTR_ADDIU t0, 1
+ STORE_BYTE(0)
+ STORE_BYTE(1)
+#if LONGSIZE == 4
+ EX( sb, a1, 2(a0), .Lbyte_fixup)
+#else
+ STORE_BYTE(2)
+ STORE_BYTE(3)
+ STORE_BYTE(4)
+ STORE_BYTE(5)
+ EX( sb, a1, 6(a0), .Lbyte_fixup)
+#endif
+0:
+#endif /* CONFIG_CPU_MIPSR6 */
1: jr ra
move a2, zero
@@ -202,3 +275,149 @@
.Llast_fixup:
jr ra
andi v1, a2, STORMASK
+
+#ifdef CONFIG_CPU_MIPSR6
+.Lbyte_fixup:
+ PTR_SUBU a2, $0, t0
+ jr ra
+ PTR_ADDIU a2, 1
+#endif /* CONFIG_CPU_MIPSR6 */
+
+#ifdef CONFIG_EVA
+/* ++++++++ */
+/* EVA stuff */
+/* ++++++++ */
+
+ .set eva
+
+#undef LONG_S_L
+#undef LONG_S_R
+
+#define LONG_S_L swle
+#define LONG_S_R swre
+
+LEAF(__bzero_user)
+ sltiu t0, a2, STORSIZE /* very small region? */
+ bnez t0, .LEsmall_memset
+ andi t0, a0, STORMASK /* aligned? */
+
+#ifdef CONFIG_CPU_MICROMIPS
+ move t8, a1
+ move t9, a1
+#endif
+#ifndef CONFIG_CPU_DADDI_WORKAROUNDS
+ beqz t0, 1f
+ PTR_SUBU t0, STORSIZE /* alignment in bytes */
+#else
+ .set noat
+ li AT, STORSIZE
+ beqz t0, 1f
+ PTR_SUBU t0, AT /* alignment in bytes */
+ .set at
+#endif
+
+ R10KCBARRIER(0(ra))
+#ifdef __MIPSEB__
+ EX(LONG_S_L, a1, (a0), .LEfirst_fixup) /* make word/dword aligned */
+#endif
+#ifdef __MIPSEL__
+ EX(LONG_S_R, a1, (a0), .LEfirst_fixup) /* make word/dword aligned */
+#endif
+ PTR_SUBU a0, t0 /* long align ptr */
+ PTR_ADDU a2, t0 /* correct size */
+
+1: ori t1, a2, 0x3f /* # of full blocks */
+ xori t1, 0x3f
+ beqz t1, .LEmemset_partial /* no block to fill */
+ andi t0, a2, 0x40-STORSIZE
+
+ PTR_ADDU t1, a0 /* end address */
+ .set reorder
+1: PTR_ADDIU a0, 64
+ R10KCBARRIER(0(ra))
+ f_fill64eva a0, -64, a1, .LEfwd_fixup
+ bne t1, a0, 1b
+ .set noreorder
+
+.LEmemset_partial:
+ R10KCBARRIER(0(ra))
+ PTR_LA t1, 2f /* where to start */
+#ifdef CONFIG_CPU_MICROMIPS
+ LONG_SRL t7, t0, 1
+#if LONGSIZE == 4
+ PTR_SUBU t1, t7
+#else
+ .set noat
+ LONG_SRL AT, t7, 1
+ PTR_SUBU t1, AT
+ .set at
+#endif
+#else
+#if LONGSIZE == 4
+ PTR_SUBU t1, t0
+#else
+ .set noat
+ LONG_SRL AT, t0, 1
+ PTR_SUBU t1, AT
+ .set at
+#endif
+#endif
+ jr t1
+ PTR_ADDU a0, t0 /* dest ptr */
+
+ .set push
+ .set noreorder
+ .set nomacro
+ f_fill64eva a0, -64, a1, .LEpartial_fixup /* ... but first do longs ... */
+2: .set pop
+ andi a2, STORMASK /* At most one long to go */
+
+ beqz a2, 1f
+ PTR_ADDU a0, a2 /* What's left */
+ R10KCBARRIER(0(ra))
+#ifdef __MIPSEB__
+ EX(LONG_S_R, a1, -1(a0), .LElast_fixup)
+#endif
+#ifdef __MIPSEL__
+ EX(LONG_S_L, a1, -1(a0), .LElast_fixup)
+#endif
+1: jr ra
+ move a2, zero
+
+.LEsmall_memset:
+ beqz a2, 2f
+ PTR_ADDU t1, a0, a2
+
+1: PTR_ADDIU a0, 1 /* fill bytewise */
+ R10KCBARRIER(0(ra))
+ bne t1, a0, 1b
+ sb a1, -1(a0)
+
+2: jr ra /* done */
+ move a2, zero
+
+.LEfirst_fixup:
+ jr ra
+ nop
+
+.LEfwd_fixup:
+ PTR_L t0, TI_TASK($28)
+ andi a2, 0x3f
+ LONG_L t0, THREAD_BUADDR(t0)
+ LONG_ADDU a2, t1
+ jr ra
+ LONG_SUBU a2, t0
+
+.LEpartial_fixup:
+ PTR_L t0, TI_TASK($28)
+ andi a2, STORMASK
+ LONG_L t0, THREAD_BUADDR(t0)
+ LONG_ADDU a2, t1
+ jr ra
+ LONG_SUBU a2, t0
+
+.LElast_fixup:
+ jr ra
+ andi v1, a2, STORMASK
+ END(__bzero_user)
+#endif
diff --git a/arch/mips/lib/mips-atomic.c b/arch/mips/lib/mips-atomic.c
index 6807f71..70ad0c3 100644
--- a/arch/mips/lib/mips-atomic.c
+++ b/arch/mips/lib/mips-atomic.c
@@ -15,7 +15,7 @@
#include <linux/export.h>
#include <linux/stringify.h>
-#if !defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_MIPS_MT_SMTC)
+#if (!defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_CPU_MIPSR6)) || defined(CONFIG_MIPS_MT_SMTC)
/*
* For cli() we have to insert nops to make sure that the new value
@@ -47,7 +47,7 @@
" ori $1, 0x400 \n"
" .set noreorder \n"
" mtc0 $1, $2, 1 \n"
-#elif defined(CONFIG_CPU_MIPSR2)
+#elif defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
/* see irqflags.h for inline function */
#else
" mfc0 $1,$12 \n"
@@ -83,7 +83,7 @@
" .set noreorder \n"
" mtc0 $1, $2, 1 \n"
" andi %[flags], %[flags], 0x400 \n"
-#elif defined(CONFIG_CPU_MIPSR2)
+#elif defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
/* see irqflags.h for inline function */
#else
" mfc0 %[flags], $12 \n"
@@ -130,9 +130,9 @@
" xori $1, 0x400 \n"
" or %[flags], $1 \n"
" mtc0 %[flags], $2, 1 \n"
-#elif defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU)
+#elif (defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)) && defined(CONFIG_IRQ_CPU)
/* see irqflags.h for inline function */
-#elif defined(CONFIG_CPU_MIPSR2)
+#elif defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
/* see irqflags.h for inline function */
#else
" mfc0 $1, $12 \n"
@@ -192,4 +192,4 @@
}
EXPORT_SYMBOL(__arch_local_irq_restore);
-#endif /* !defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_MIPS_MT_SMTC) */
+#endif /* (!defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_CPU_MIPSR6)) || defined(CONFIG_MIPS_MT_SMTC) */
diff --git a/arch/mips/lib/r3k_dump_tlb.c b/arch/mips/lib/r3k_dump_tlb.c
index 91615c2..ed9bd4d 100644
--- a/arch/mips/lib/r3k_dump_tlb.c
+++ b/arch/mips/lib/r3k_dump_tlb.c
@@ -1,9 +1,14 @@
/*
- * Dump R3000 TLB for debugging purposes.
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
*
* Copyright (C) 1994, 1995 by Waldorf Electronics, written by Ralf Baechle.
* Copyright (C) 1999 by Silicon Graphics, Inc.
* Copyright (C) 1999 by Harald Koerfgen
+ * Copyright (C) 2011 MIPS Technologies, Inc.
+ *
+ * Dump R3000 TLB for debugging purposes.
*/
#include <linux/kernel.h>
#include <linux/mm.h>
diff --git a/arch/mips/lib/strlen_user.S b/arch/mips/lib/strlen_user.S
index e362dcd..b3e070f 100644
--- a/arch/mips/lib/strlen_user.S
+++ b/arch/mips/lib/strlen_user.S
@@ -11,7 +11,7 @@
#include <asm/asm-offsets.h>
#include <asm/regdef.h>
-#define EX(insn,reg,addr,handler) \
+#define EX(insn,reg,addr,handler) \
9: insn reg, addr; \
.section __ex_table,"a"; \
PTR 9b, handler; \
@@ -22,19 +22,38 @@
*
* Return 0 for error
*/
+LEAF(__strlen_kernel_asm)
+ LONG_L v0, TI_ADDR_LIMIT($28) # pointer ok?
+ and v0, a0
+ bnez v0, .Lfault
+
+FEXPORT(__strlen_kernel_nocheck_asm)
+ move v0, a0
+1: EX(lbu, v1, (v0), .Lfault)
+ PTR_ADDIU v0, 1
+ bnez v1, 1b
+ PTR_SUBU v0, a0
+ jr ra
+ END(__strlen_kernel_asm)
+
+.Lfault: move v0, zero
+ jr ra
+
+#ifdef CONFIG_EVA
+
LEAF(__strlen_user_asm)
- LONG_L v0, TI_ADDR_LIMIT($28) # pointer ok?
- and v0, a0
- bnez v0, .Lfault
+ LONG_L v0, TI_ADDR_LIMIT($28) # pointer ok?
+ and v0, a0
+ bnez v0, .Lfault
FEXPORT(__strlen_user_nocheck_asm)
- move v0, a0
-1: EX(lbu, v1, (v0), .Lfault)
- PTR_ADDIU v0, 1
- bnez v1, 1b
- PTR_SUBU v0, a0
- jr ra
+ move v0, a0
+ .set eva
+1: EX(lbue, v1, (v0), .Lfault)
+ PTR_ADDIU v0, 1
+ bnez v1, 1b
+ PTR_SUBU v0, a0
+ jr ra
END(__strlen_user_asm)
-.Lfault: move v0, zero
- jr ra
+#endif
diff --git a/arch/mips/lib/strncpy_user.S b/arch/mips/lib/strncpy_user.S
index 92870b6..a613bb4 100644
--- a/arch/mips/lib/strncpy_user.S
+++ b/arch/mips/lib/strncpy_user.S
@@ -28,17 +28,17 @@
* it happens at most some bytes of the exceptions handlers will be copied.
*/
-LEAF(__strncpy_from_user_asm)
+LEAF(__strncpy_from_kernel_asm)
LONG_L v0, TI_ADDR_LIMIT($28) # pointer ok?
and v0, a1
bnez v0, .Lfault
-FEXPORT(__strncpy_from_user_nocheck_asm)
+FEXPORT(__strncpy_from_kernel_nocheck_asm)
.set noreorder
move t0, zero
move v1, a1
-1: EX(lbu, v0, (v1), .Lfault)
- PTR_ADDIU v1, 1
+1: EX(lbu, v0, (v1), .Lfault)
+ PTR_ADDIU v1, 1
R10KCBARRIER(0(ra))
beqz v0, 2f
sb v0, (a0)
@@ -50,12 +50,40 @@
bltz v0, .Lfault
nop
jr ra # return n
- move v0, t0
+ move v0, t0
+ END(__strncpy_from_kernel_asm)
+
+.Lfault:
+ jr ra
+ li v0, -EFAULT
+
+#ifdef CONFIG_EVA
+
+LEAF(__strncpy_from_user_asm)
+ LONG_L v0, TI_ADDR_LIMIT($28) # pointer ok?
+ and v0, a1
+ bnez v0, .Lfault
+
+FEXPORT(__strncpy_from_user_nocheck_asm)
+ .set noreorder
+ move t0, zero
+ move v1, a1
+1:
+ .set eva
+ EX(lbue, v0, (v1), .Lfault)
+ PTR_ADDIU v1, 1
+ R10KCBARRIER(0(ra))
+ beqz v0, 2f
+ sb v0, (a0)
+ PTR_ADDIU t0, 1
+ bne t0, a2, 1b
+ PTR_ADDIU a0, 1
+2: PTR_ADDU v0, a1, t0
+ xor v0, a1
+ bltz v0, .Lfault
+ nop
+ jr ra # return n
+ move v0, t0
END(__strncpy_from_user_asm)
-.Lfault: jr ra
- li v0, -EFAULT
-
- .section __ex_table,"a"
- PTR 1b, .Lfault
- .previous
+#endif
diff --git a/arch/mips/lib/strnlen_user.S b/arch/mips/lib/strnlen_user.S
index fcacea5..75ea45d 100644
--- a/arch/mips/lib/strnlen_user.S
+++ b/arch/mips/lib/strnlen_user.S
@@ -25,22 +25,45 @@
* bytes. There's nothing secret there. On 64-bit accessing beyond
* the maximum is a tad hairier ...
*/
+LEAF(__strnlen_kernel_asm)
+ LONG_L v0, TI_ADDR_LIMIT($28) # pointer ok?
+ and v0, a0
+ bnez v0, .Lfault
+
+FEXPORT(__strnlen_kernel_nocheck_asm)
+ move v0, a0
+ PTR_ADDU a1, a0 # stop pointer
+1: beq v0, a1, 2f # limit reached?
+ EX(lb, t0, (v0), .Lfault)
+ PTR_ADDIU v0, 1
+ bnez t0, 1b
+2: PTR_SUBU v0, a0
+ jr ra
+ END(__strnlen_kernel_asm)
+
+
+.Lfault:
+ move v0, zero
+ jr ra
+
+
+#ifdef CONFIG_EVA
+
LEAF(__strnlen_user_asm)
LONG_L v0, TI_ADDR_LIMIT($28) # pointer ok?
and v0, a0
bnez v0, .Lfault
FEXPORT(__strnlen_user_nocheck_asm)
- move v0, a0
- PTR_ADDU a1, a0 # stop pointer
-1: beq v0, a1, 1f # limit reached?
- EX(lb, t0, (v0), .Lfault)
- PTR_ADDIU v0, 1
- bnez t0, 1b
-1: PTR_SUBU v0, a0
- jr ra
+ move v0, a0
+ PTR_ADDU a1, a0 # stop pointer
+1: beq v0, a1, 2f # limit reached?
+ .set eva
+ EX(lbe, t0, (v0), .Lfault)
+ PTR_ADDIU v0, 1
+ bnez t0, 1b
+2: PTR_SUBU v0, a0
+ jr ra
END(__strnlen_user_asm)
-.Lfault:
- move v0, zero
- jr ra
+#endif
diff --git a/arch/mips/math-emu/Makefile b/arch/mips/math-emu/Makefile
index 121a848..651328f 100644
--- a/arch/mips/math-emu/Makefile
+++ b/arch/mips/math-emu/Makefile
@@ -8,4 +8,5 @@
dp_tint.o dp_fint.o dp_tlong.o dp_flong.o sp_frexp.o sp_modf.o \
sp_div.o sp_mul.o sp_sub.o sp_add.o sp_fdp.o sp_cmp.o sp_logb.o \
sp_scalb.o sp_simple.o sp_tint.o sp_fint.o sp_tlong.o sp_flong.o \
- dp_sqrt.o sp_sqrt.o kernel_linkage.o dsemul.o
+ dp_sqrt.o sp_sqrt.o kernel_linkage.o dsemul.o sp_class.o dp_class.o \
+ dp_maddf.o dp_msubf.o sp_maddf.o sp_msubf.o dp_minmax.o sp_minmax.o
diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c
index f03771900..5531c43 100644
--- a/arch/mips/math-emu/cp1emu.c
+++ b/arch/mips/math-emu/cp1emu.c
@@ -104,7 +104,7 @@
#if __mips >= 4
/* convert condition code register number to csr bit */
-static const unsigned int fpucondbit[8] = {
+const unsigned int fpucondbit[8] = {
FPU_CSR_COND0,
FPU_CSR_COND1,
FPU_CSR_COND2,
@@ -162,7 +162,7 @@
if ((insn.mm_i_format.rt == mm_bc1f_op) ||
(insn.mm_i_format.rt == mm_bc1t_op)) {
mips32_insn.fb_format.opcode = cop1_op;
- mips32_insn.fb_format.bc = bc_op;
+ mips32_insn.fb_format.bc = rs_bc_op;
mips32_insn.fb_format.flag =
(insn.mm_i_format.rt == mm_bc1t_op) ? 1 : 0;
} else
@@ -471,6 +471,9 @@
unsigned int fcr31;
unsigned int bit;
+ if (!cpu_has_mmips)
+ return 0;
+
switch (insn.mm_i_format.opcode) {
case mm_pool32a_op:
if ((insn.mm_i_format.simmediate & MM_POOL32A_MINOR_MASK) ==
@@ -558,7 +561,7 @@
case mm_bc1t_op:
preempt_disable();
if (is_fpu_owner())
- asm volatile("cfc1\t%0,$31" : "=r" (fcr31));
+ fcr31 = fpu_get_fcr31();
else
fcr31 = current->thread.fpu.fcr31;
preempt_enable();
@@ -673,12 +676,25 @@
* a single subroutine should be used across both
* modules.
*/
+/* Note on R6 compact branches:
+ * Compact branches doesn't do exception (besides BC1EQZ/BC1NEZ)
+ * and doesn't execute instruction in Forbidden Slot if branch is
+ * to be taken. It means that return EPC for them can be safely set
+ * to EPC + 8 because it is the only case to get a BD precise exception
+ * doing instruction in Forbidden Slot while no branch.
+ *
+ * Unconditional compact jump/branches added for full picture
+ * (not doing BD precise exception, actually).
+ */
static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
unsigned long *contpc)
{
union mips_instruction insn = (union mips_instruction)dec_insn.insn;
- unsigned int fcr31;
unsigned int bit = 0;
+ unsigned int fcr31;
+#ifdef CONFIG_CPU_MIPSR6
+ int reg;
+#endif
switch (insn.i_format.opcode) {
case spec_op:
@@ -687,8 +703,13 @@
regs->regs[insn.r_format.rd] =
regs->cp0_epc + dec_insn.pc_inc +
dec_insn.next_pc_inc;
- /* Fall through */
+ *contpc = regs->regs[insn.r_format.rs];
+ return 1;
case jr_op:
+#ifdef CONFIG_CPU_MIPSR6
+ if (!mipsr2_emulation)
+ break;
+#endif
*contpc = regs->regs[insn.r_format.rs];
return 1;
break;
@@ -696,14 +717,20 @@
break;
case bcond_op:
switch (insn.i_format.rt) {
- case bltzal_op:
case bltzall_op:
+#ifdef CONFIG_CPU_MIPSR6
+ if (!mipsr2_emulation)
+ break;
+#endif
+ case bltzal_op:
+#ifdef CONFIG_CPU_MIPSR6
+ /* MIPSR6: nal == bltzal $0 */
+ if (insn.i_format.rs && !mipsr2_emulation)
+ break;
+#endif
regs->regs[31] = regs->cp0_epc +
dec_insn.pc_inc +
dec_insn.next_pc_inc;
- /* Fall through */
- case bltz_op:
- case bltzl_op:
if ((long)regs->regs[insn.i_format.rs] < 0)
*contpc = regs->cp0_epc +
dec_insn.pc_inc +
@@ -714,14 +741,55 @@
dec_insn.next_pc_inc;
return 1;
break;
- case bgezal_op:
+
+ case bltzl_op:
+#ifdef CONFIG_CPU_MIPSR6
+ if (!mipsr2_emulation)
+ break;
+#endif
+ case bltz_op:
+ if ((long)regs->regs[insn.i_format.rs] < 0)
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc +
+ (insn.i_format.simmediate << 2);
+ else
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc +
+ dec_insn.next_pc_inc;
+ return 1;
+ break;
+
case bgezall_op:
+#ifdef CONFIG_CPU_MIPSR6
+ if (!mipsr2_emulation)
+ break;
+#endif
+ case bgezal_op:
+#ifdef CONFIG_CPU_MIPSR6
+ /* MIPSR6: bal == bgezal $0 */
+ if (insn.i_format.rs && !mipsr2_emulation)
+ break;
+#endif
regs->regs[31] = regs->cp0_epc +
dec_insn.pc_inc +
dec_insn.next_pc_inc;
- /* Fall through */
- case bgez_op:
+ if ((long)regs->regs[insn.i_format.rs] >= 0)
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc +
+ (insn.i_format.simmediate << 2);
+ else
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc +
+ dec_insn.next_pc_inc;
+ return 1;
+ break;
+
case bgezl_op:
+#ifdef CONFIG_CPU_MIPSR6
+ if (!mipsr2_emulation)
+ break;
+#endif
+ case bgez_op:
if ((long)regs->regs[insn.i_format.rs] >= 0)
*contpc = regs->cp0_epc +
dec_insn.pc_inc +
@@ -750,8 +818,12 @@
*contpc ^= bit;
return 1;
break;
- case beq_op:
case beql_op:
+#ifdef CONFIG_CPU_MIPSR6
+ if (!mipsr2_emulation)
+ break;
+#endif
+ case beq_op:
if (regs->regs[insn.i_format.rs] ==
regs->regs[insn.i_format.rt])
*contpc = regs->cp0_epc +
@@ -763,8 +835,12 @@
dec_insn.next_pc_inc;
return 1;
break;
- case bne_op:
case bnel_op:
+#ifdef CONFIG_CPU_MIPSR6
+ if (!mipsr2_emulation)
+ break;
+#endif
+ case bne_op:
if (regs->regs[insn.i_format.rs] !=
regs->regs[insn.i_format.rt])
*contpc = regs->cp0_epc +
@@ -778,6 +854,27 @@
break;
case blez_op:
case blezl_op:
+#ifdef CONFIG_CPU_MIPSR6
+ /*
+ * Compact branches: (blez:) blezalc, bgezalc, bgeuc
+ * Compact branches: (blezl:) blezc, bgezc, bgec
+ */
+ if (insn.i_format.rt) {
+ if (insn.i_format.opcode == blez_op)
+ if ((insn.i_format.rs == insn.i_format.rt) ||
+ !insn.i_format.rs) /* blezalc, bgezalc */
+ regs->regs[31] = regs->cp0_epc +
+ dec_insn.pc_inc;
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc +
+ dec_insn.next_pc_inc;
+ return 1;
+ break;
+ }
+
+ if ((insn.i_format.opcode != blez_op) && !mipsr2_emulation)
+ break;
+#endif
if ((long)regs->regs[insn.i_format.rs] <= 0)
*contpc = regs->cp0_epc +
dec_insn.pc_inc +
@@ -788,8 +885,30 @@
dec_insn.next_pc_inc;
return 1;
break;
+
case bgtz_op:
case bgtzl_op:
+#ifdef CONFIG_CPU_MIPSR6
+ /*
+ * Compact branches: (bgtz:) bltzalc, bgtzalc, bltuc
+ * Compact branches: (bgtzl:) bltc, bltzc, bgtzc
+ */
+ if (insn.i_format.rt) {
+ if (insn.i_format.opcode == bgtz_op)
+ if ((insn.i_format.rs == insn.i_format.rt) ||
+ !insn.i_format.rs) /* bltzalc, bgtzalc */
+ regs->regs[31] = regs->cp0_epc +
+ dec_insn.pc_inc;
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc +
+ dec_insn.next_pc_inc;
+ return 1;
+ break;
+ }
+
+ if ((insn.i_format.opcode != bgtz_op) && !mipsr2_emulation)
+ break;
+#endif
if ((long)regs->regs[insn.i_format.rs] > 0)
*contpc = regs->cp0_epc +
dec_insn.pc_inc +
@@ -800,14 +919,67 @@
dec_insn.next_pc_inc;
return 1;
break;
- case cop0_op:
+
+#ifdef CONFIG_CPU_MIPSR6
+ case cbcond0_op:
+ /*
+ * Compact branches: bovc, beqc, beqzalc
+ */
+
+ /* fall through */
+ case cbcond1_op:
+ /*
+ * Compact branches: bnvc, bnec, bnezalc
+ */
+ if (insn.i_format.rt && !insn.i_format.rs) /* beqzalc/bnezalc */
+ regs->regs[31] = regs->cp0_epc +
+ dec_insn.pc_inc;
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc +
+ dec_insn.next_pc_inc;
+ return 1;
+#endif
+
case cop1_op:
+#ifdef CONFIG_CPU_MIPSR6
+ if ((insn.i_format.rs == bc1eqz_op) ||
+ (insn.i_format.rs == bc1nez_op)) {
+
+ reg = insn.i_format.rt;
+ bit = 0;
+ switch (insn.i_format.rs) {
+ case bc1eqz_op:
+ if (!(get_fpr64(¤t->thread.fpu.fpr[reg], 0) & (__u64)0x1))
+ bit = 1;
+ break;
+ case bc1nez_op:
+ if (get_fpr64(¤t->thread.fpu.fpr[reg], 0) & (__u64)0x1)
+ bit = 1;
+ break;
+ }
+ if (bit)
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc +
+ (insn.i_format.simmediate << 2);
+ else
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc +
+ dec_insn.next_pc_inc;
+ return 1;
+ break;
+ }
+#endif
+ case cop0_op:
case cop2_op:
case cop1x_op:
- if (insn.i_format.rs == bc_op) {
+#ifdef CONFIG_CPU_MIPSR6
+ if (!mipsr2_emulation)
+ break;
+#endif
+ if (insn.i_format.rs == rs_bc_op) {
preempt_disable();
if (is_fpu_owner())
- asm volatile("cfc1\t%0,$31" : "=r" (fcr31));
+ fcr31 = fpu_get_fcr31();
else
fcr31 = current->thread.fpu.fcr31;
preempt_enable();
@@ -843,6 +1015,32 @@
}
}
break;
+
+#ifdef CONFIG_CPU_MIPSR6
+ case bc_op:
+ break;
+
+ case jump_op:
+ if (insn.i_format.rs) { /* beqzc */
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc +
+ dec_insn.next_pc_inc;
+ return 1;
+ }
+ break;
+
+ case balc_op:
+ break;
+
+ case jump2_op:
+ if (insn.i_format.rs) { /* bnezc */
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc +
+ dec_insn.next_pc_inc;
+ return 1;
+ }
+ break;
+#endif
}
return 0;
}
@@ -859,25 +1057,51 @@
*/
static inline int cop1_64bit(struct pt_regs *xcp)
{
-#if defined(CONFIG_64BIT) && !defined(CONFIG_MIPS32_O32)
- return 1;
-#elif defined(CONFIG_64BIT) && defined(CONFIG_MIPS32_O32)
- return !test_thread_flag(TIF_32BIT_REGS);
-#else
- return 0;
-#endif
+ return test_thread_local_flags(LTIF_FPU_FR);
}
-#define SIFROMREG(si, x) ((si) = cop1_64bit(xcp) || !(x & 1) ? \
- (int)ctx->fpr[x] : (int)(ctx->fpr[x & ~1] >> 32))
+static inline bool hybrid_fprs(void)
+{
+ return test_thread_local_flags(LTIF_FPU_FRE);
+}
-#define SITOREG(si, x) (ctx->fpr[x & ~(cop1_64bit(xcp) == 0)] = \
- cop1_64bit(xcp) || !(x & 1) ? \
- ctx->fpr[x & ~1] >> 32 << 32 | (u32)(si) : \
- ctx->fpr[x & ~1] << 32 >> 32 | (u64)(si) << 32)
+#define SIFROMREG(si, x) do { \
+ if (cop1_64bit(xcp) && !hybrid_fprs()) \
+ (si) = (int)get_fpr32(&ctx->fpr[x], 0); \
+ else \
+ (si) = (int)get_fpr32(&ctx->fpr[(x) & ~1], (x) & 1); \
+} while (0)
-#define DIFROMREG(di, x) ((di) = ctx->fpr[x & ~(cop1_64bit(xcp) == 0)])
-#define DITOREG(di, x) (ctx->fpr[x & ~(cop1_64bit(xcp) == 0)] = (di))
+#define SITOREG(si, x) do { \
+ if (cop1_64bit(xcp) && !hybrid_fprs()) { \
+ unsigned i; \
+ set_fpr32(&ctx->fpr[x], 0, si); \
+ for (i = 1; i < ARRAY_SIZE(ctx->fpr[x].val32); i++) \
+ set_fpr32(&ctx->fpr[x], i, 0); \
+ } else { \
+ set_fpr32(&ctx->fpr[(x) & ~1], (x) & 1, si); \
+ } \
+} while (0)
+
+#define SIFROMHREG(si, x) ((si) = (int)get_fpr32(&ctx->fpr[x], 1))
+
+#define SITOHREG(si, x) do { \
+ unsigned i; \
+ set_fpr32(&ctx->fpr[x], 1, si); \
+ for (i = 2; i < ARRAY_SIZE(ctx->fpr[x].val32); i++) \
+ set_fpr32(&ctx->fpr[x], i, 0); \
+} while (0)
+
+#define DIFROMREG(di, x) \
+ ((di) = get_fpr64(&ctx->fpr[(x) & ~(cop1_64bit(xcp) == 0)], 0))
+
+#define DITOREG(di, x) do { \
+ unsigned fpr, i; \
+ fpr = (x) & ~(cop1_64bit(xcp) == 0); \
+ set_fpr64(&ctx->fpr[fpr], 0, di); \
+ for (i = 1; i < ARRAY_SIZE(ctx->fpr[x].val64); i++) \
+ set_fpr64(&ctx->fpr[fpr], i, 0); \
+} while (0)
#define SPFROMREG(sp, x) SIFROMREG((sp).bits, x)
#define SPTOREG(sp, x) SITOREG((sp).bits, x)
@@ -894,9 +1118,14 @@
{
mips_instruction ir;
unsigned long contpc = xcp->cp0_epc + dec_insn.pc_inc;
+ unsigned long r31;
+ unsigned long s_epc;
unsigned int cond;
int pc_inc;
+ int likely;
+ s_epc = xcp->cp0_epc;
+ r31 = xcp->regs[31];
/* XXX NEC Vr54xx bug workaround */
if (xcp->cp0_cause & CAUSEF_BD) {
if (dec_insn.micro_mips_mode) {
@@ -960,6 +1189,7 @@
u64 val;
MIPS_FPU_EMU_INC_STATS(loads);
+ MIPS_FPU_EMU_INC_STATS(dprecision);
if (!access_ok(VERIFY_READ, va, sizeof(u64))) {
MIPS_FPU_EMU_INC_STATS(errors);
@@ -981,6 +1211,7 @@
u64 val;
MIPS_FPU_EMU_INC_STATS(stores);
+ MIPS_FPU_EMU_INC_STATS(dprecision);
DIFROMREG(val, MIPSInst_RT(ir));
if (!access_ok(VERIFY_WRITE, va, sizeof(u64))) {
MIPS_FPU_EMU_INC_STATS(errors);
@@ -1001,6 +1232,7 @@
u32 val;
MIPS_FPU_EMU_INC_STATS(loads);
+ MIPS_FPU_EMU_INC_STATS(sprecision);
if (!access_ok(VERIFY_READ, va, sizeof(u32))) {
MIPS_FPU_EMU_INC_STATS(errors);
*fault_addr = va;
@@ -1021,6 +1253,7 @@
u32 val;
MIPS_FPU_EMU_INC_STATS(stores);
+ MIPS_FPU_EMU_INC_STATS(sprecision);
SIFROMREG(val, MIPSInst_RT(ir));
if (!access_ok(VERIFY_WRITE, va, sizeof(u32))) {
MIPS_FPU_EMU_INC_STATS(errors);
@@ -1040,6 +1273,7 @@
#if defined(__mips64)
case dmfc_op:
+ MIPS_FPU_EMU_INC_STATS(dprecision);
/* copregister fs -> gpr[rt] */
if (MIPSInst_RT(ir) != 0) {
DIFROMREG(xcp->regs[MIPSInst_RT(ir)],
@@ -1048,12 +1282,31 @@
break;
case dmtc_op:
+ MIPS_FPU_EMU_INC_STATS(dprecision);
/* copregister fs <- rt */
DITOREG(xcp->regs[MIPSInst_RT(ir)], MIPSInst_RD(ir));
break;
#endif
+#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
+ case mfhc_op:
+ MIPS_FPU_EMU_INC_STATS(sprecision);
+ /* copregister rd -> gpr[rt] */
+ if (MIPSInst_RT(ir) != 0) {
+ SIFROMHREG(xcp->regs[MIPSInst_RT(ir)],
+ MIPSInst_RD(ir));
+ }
+ break;
+
+ case mthc_op:
+ MIPS_FPU_EMU_INC_STATS(sprecision);
+ /* copregister rd <- gpr[rt] */
+ SITOHREG(xcp->regs[MIPSInst_RT(ir)], MIPSInst_RD(ir));
+ break;
+#endif
+
case mfc_op:
+ MIPS_FPU_EMU_INC_STATS(sprecision);
/* copregister rd -> gpr[rt] */
if (MIPSInst_RT(ir) != 0) {
SIFROMREG(xcp->regs[MIPSInst_RT(ir)],
@@ -1062,6 +1315,7 @@
break;
case mtc_op:
+ MIPS_FPU_EMU_INC_STATS(sprecision);
/* copregister rd <- rt */
SITOREG(xcp->regs[MIPSInst_RT(ir)], MIPSInst_RD(ir));
break;
@@ -1121,8 +1375,42 @@
break;
}
- case bc_op:{
- int likely = 0;
+#ifdef CONFIG_CPU_MIPSR6
+ case bc1eqz_op:
+ case bc1nez_op:
+ {
+ int reg;
+
+ if (xcp->cp0_cause & CAUSEF_BD)
+ return SIGILL;
+
+ likely = 0;
+
+ reg = MIPSInst_FT(ir);
+ cond = 0;
+ MIPS_FPU_EMU_INC_STATS(dprecision);
+ switch (MIPSInst_RS(ir)) {
+ case bc1eqz_op:
+ if (!(get_fpr64(¤t->thread.fpu.fpr[reg], 0) & (__u64)0x1))
+ cond = 1;
+ break;
+ case bc1nez_op:
+ if (get_fpr64(¤t->thread.fpu.fpr[reg], 0) & (__u64)0x1)
+ cond = 1;
+ break;
+ }
+ }
+ goto branch_cont;
+
+#endif /* CONFIG_CPU_MIPSR6 */
+ case rs_bc_op:
+#ifdef CONFIG_CPU_MIPSR6
+ if (!mipsr2_emulation)
+ goto default_op;
+#endif
+
+ {
+ likely = 0;
if (xcp->cp0_cause & CAUSEF_BD)
return SIGILL;
@@ -1146,7 +1434,11 @@
/* thats an illegal instruction */
return SIGILL;
}
+#ifdef CONFIG_CPU_MIPSR6
+ }
+branch_cont: {
+#endif
xcp->cp0_cause |= CAUSEF_BD;
if (cond) {
/* branch taken: emulate dslot
@@ -1177,7 +1469,7 @@
* Single step the non-CP1
* instruction in the dslot.
*/
- return mips_dsemul(xcp, ir, contpc);
+ return mips_dsemul(xcp, ir, contpc, s_epc, r31);
}
} else
contpc = (xcp->cp0_epc + (contpc << 2));
@@ -1197,8 +1489,11 @@
goto emul;
#if __mips >= 4
case spec_op:
- if (MIPSInst_FUNC(ir) == movc_op)
- goto emul;
+#ifdef CONFIG_CPU_MIPSR6
+ if (mipsr2_emulation)
+#endif
+ if (MIPSInst_FUNC(ir) == movc_op)
+ goto emul;
break;
#endif
}
@@ -1207,9 +1502,8 @@
* Single step the non-cp1
* instruction in the dslot
*/
- return mips_dsemul(xcp, ir, contpc);
- }
- else {
+ return mips_dsemul(xcp, ir, contpc, s_epc, r31);
+ } else {
/* branch not taken */
if (likely) {
/*
@@ -1228,6 +1522,9 @@
}
default:
+#ifdef CONFIG_CPU_MIPSR6
+default_op:
+#endif
if (!(MIPSInst_RS(ir) & 0x10))
return SIGILL;
{
@@ -1251,6 +1548,10 @@
#if __mips >= 4
case spec_op:
+#ifdef CONFIG_CPU_MIPSR6
+ if (!mipsr2_emulation)
+ return SIGILL;
+#endif
if (MIPSInst_FUNC(ir) != movc_op)
return SIGILL;
cond = fpucondbit[MIPSInst_RT(ir) >> 2];
@@ -1261,6 +1562,7 @@
#endif
default:
+ xcp->regs[31] = r31;
return SIGILL;
}
@@ -1285,6 +1587,12 @@
IEEE754_CLT | IEEE754_CEQ, /* cmp_ole (sig) cmp_le */
IEEE754_CLT | IEEE754_CEQ | IEEE754_CUN, /* cmp_ule (sig) cmp_ngt */
};
+static const unsigned char ncmptab[8] = {
+ 0,
+ IEEE754_CLT | IEEE754_CGT | IEEE754_CEQ,
+ IEEE754_CLT | IEEE754_CGT | IEEE754_CUN,
+ IEEE754_CLT | IEEE754_CGT,
+};
#if __mips >= 4 && __mips != 32
@@ -1353,12 +1661,14 @@
u32 __user *va;
u32 val;
+ MIPS_FPU_EMU_INC_STATS(sprecision);
switch (MIPSInst_FUNC(ir)) {
case lwxc1_op:
va = (void __user *) (xcp->regs[MIPSInst_FR(ir)] +
xcp->regs[MIPSInst_FT(ir)]);
MIPS_FPU_EMU_INC_STATS(loads);
+ MIPS_FPU_EMU_INC_STATS(sprecision);
if (!access_ok(VERIFY_READ, va, sizeof(u32))) {
MIPS_FPU_EMU_INC_STATS(errors);
*fault_addr = va;
@@ -1377,6 +1687,7 @@
xcp->regs[MIPSInst_FT(ir)]);
MIPS_FPU_EMU_INC_STATS(stores);
+ MIPS_FPU_EMU_INC_STATS(sprecision);
SIFROMREG(val, MIPSInst_FS(ir));
if (!access_ok(VERIFY_WRITE, va, sizeof(u32))) {
@@ -1442,12 +1753,14 @@
u64 __user *va;
u64 val;
+ MIPS_FPU_EMU_INC_STATS(dprecision);
switch (MIPSInst_FUNC(ir)) {
case ldxc1_op:
va = (void __user *) (xcp->regs[MIPSInst_FR(ir)] +
xcp->regs[MIPSInst_FT(ir)]);
MIPS_FPU_EMU_INC_STATS(loads);
+ MIPS_FPU_EMU_INC_STATS(dprecision);
if (!access_ok(VERIFY_READ, va, sizeof(u64))) {
MIPS_FPU_EMU_INC_STATS(errors);
*fault_addr = va;
@@ -1466,6 +1779,7 @@
xcp->regs[MIPSInst_FT(ir)]);
MIPS_FPU_EMU_INC_STATS(stores);
+ MIPS_FPU_EMU_INC_STATS(dprecision);
DIFROMREG(val, MIPSInst_FS(ir));
if (!access_ok(VERIFY_WRITE, va, sizeof(u64))) {
MIPS_FPU_EMU_INC_STATS(errors);
@@ -1506,10 +1820,10 @@
break;
}
- case 0x7: /* 7 */
- if (MIPSInst_FUNC(ir) != pfetch_op) {
+ case 0x3:
+ if (MIPSInst_FUNC(ir) != pfetch_op)
return SIGILL;
- }
+
/* ignore prefx operation */
break;
@@ -1539,6 +1853,9 @@
#ifdef __mips64
s64 l;
#endif
+#ifdef CONFIG_CPU_MIPSR6
+ s64 ll;
+#endif
} rv; /* resulting value */
MIPS_FPU_EMU_INC_STATS(cp1ops);
@@ -1549,6 +1866,7 @@
ieee754sp(*u) (ieee754sp);
} handler;
+ MIPS_FPU_EMU_INC_STATS(sprecision);
switch (MIPSInst_FUNC(ir)) {
/* binary ops */
case fadd_op:
@@ -1677,6 +1995,108 @@
}
#endif /* __mips >= 2 */
+#ifdef CONFIG_CPU_MIPSR6
+ case fseleqz_op:{
+ SPFROMREG(rv.s, MIPSInst_FT(ir));
+ if (rv.w & 0x1)
+ rv.w = 0;
+ else
+ SPFROMREG(rv.s, MIPSInst_FS(ir));
+ break;
+ }
+
+ case fselnez_op:{
+ SPFROMREG(rv.s, MIPSInst_FT(ir));
+ if (rv.w & 0x1)
+ SPFROMREG(rv.s, MIPSInst_FS(ir));
+ else
+ rv.w = 0;
+ break;
+ }
+
+ case frint_op:{
+ ieee754sp fs;
+
+ SPFROMREG(fs, MIPSInst_FS(ir));
+ rv.ll = ieee754sp_tlong(fs);
+ rv.s = ieee754sp_flong(rv.ll);
+ goto copcsr;
+ }
+
+ case fclass_op:{
+ ieee754sp fs;
+
+ SPFROMREG(fs, MIPSInst_FS(ir));
+ rv.w = ieee754_2008sp_class(fs);
+ rfmt = w_fmt;
+ break;
+ }
+
+ case fmaddf_op:{
+ ieee754sp fs;
+ ieee754sp ft;
+ ieee754sp fd;
+
+ SPFROMREG(fs, MIPSInst_FS(ir));
+ SPFROMREG(ft, MIPSInst_FT(ir));
+ SPFROMREG(fd, MIPSInst_FD(ir));
+ rv.s = ieee754sp_maddf(fd, fs, ft);
+ break;
+ }
+
+ case fmsubf_op:{
+ ieee754sp fs;
+ ieee754sp ft;
+ ieee754sp fd;
+
+ SPFROMREG(fs, MIPSInst_FS(ir));
+ SPFROMREG(ft, MIPSInst_FT(ir));
+ SPFROMREG(fd, MIPSInst_FD(ir));
+ rv.s = ieee754sp_msubf(fd, fs, ft);
+ break;
+ }
+
+ case fmin_op:{
+ ieee754sp fs;
+ ieee754sp ft;
+
+ SPFROMREG(fs, MIPSInst_FS(ir));
+ SPFROMREG(ft, MIPSInst_FT(ir));
+ rv.s = ieee754sp_fmin(fs, ft);
+ break;
+ }
+
+ case fmax_op:{
+ ieee754sp fs;
+ ieee754sp ft;
+
+ SPFROMREG(fs, MIPSInst_FS(ir));
+ SPFROMREG(ft, MIPSInst_FT(ir));
+ rv.s = ieee754sp_fmax(fs, ft);
+ break;
+ }
+
+ case fmina_op:{
+ ieee754sp fs;
+ ieee754sp ft;
+
+ SPFROMREG(fs, MIPSInst_FS(ir));
+ SPFROMREG(ft, MIPSInst_FT(ir));
+ rv.s = ieee754sp_fmina(fs, ft);
+ break;
+ }
+
+ case fmaxa_op:{
+ ieee754sp fs;
+ ieee754sp ft;
+
+ SPFROMREG(fs, MIPSInst_FS(ir));
+ SPFROMREG(ft, MIPSInst_FT(ir));
+ rv.s = ieee754sp_fmaxa(fs, ft);
+ break;
+ }
+#endif
+
#if defined(__mips64)
case fcvtl_op:{
ieee754sp fs;
@@ -1703,6 +2123,20 @@
}
#endif /* defined(__mips64) */
+#ifdef CONFIG_CPU_MIPSR6
+ case fsel_op:{
+ ieee754sp ftmp;
+
+ SPFROMREG(ftmp, MIPSInst_FD(ir));
+ if (ftmp.bits & 0x1)
+ SPFROMREG(ftmp, MIPSInst_FT(ir));
+ else
+ SPFROMREG(ftmp, MIPSInst_FS(ir));
+ rv.s = ftmp;
+ break;
+ }
+#endif
+
default:
if (MIPSInst_FUNC(ir) >= fcmp_op) {
unsigned cmpop = MIPSInst_FUNC(ir) - fcmp_op;
@@ -1734,6 +2168,7 @@
ieee754dp(*u) (ieee754dp);
} handler;
+ MIPS_FPU_EMU_INC_STATS(dprecision);
switch (MIPSInst_FUNC(ir)) {
/* binary ops */
case fadd_op:
@@ -1851,6 +2286,108 @@
}
#endif
+#ifdef CONFIG_CPU_MIPSR6
+ case fseleqz_op:{
+ DPFROMREG(rv.d, MIPSInst_FT(ir));
+ if (rv.ll & 0x1)
+ rv.ll = 0;
+ else
+ DPFROMREG(rv.d, MIPSInst_FS(ir));
+ break;
+ }
+
+ case fselnez_op:{
+ DPFROMREG(rv.d, MIPSInst_FT(ir));
+ if (rv.ll & 0x1)
+ DPFROMREG(rv.d, MIPSInst_FS(ir));
+ else
+ rv.ll = 0;
+ break;
+ }
+
+ case frint_op:{
+ ieee754dp fs;
+
+ DPFROMREG(fs, MIPSInst_FS(ir));
+ rv.ll = ieee754dp_tlong(fs);
+ rv.d = ieee754dp_flong(rv.ll);
+ goto copcsr;
+ }
+
+ case fclass_op:{
+ ieee754dp fs;
+
+ DPFROMREG(fs, MIPSInst_FS(ir));
+ rv.w = ieee754_2008dp_class(fs);
+ rfmt = w_fmt;
+ break;
+ }
+
+ case fmaddf_op:{
+ ieee754dp fs;
+ ieee754dp ft;
+ ieee754dp fd;
+
+ DPFROMREG(fs, MIPSInst_FS(ir));
+ DPFROMREG(ft, MIPSInst_FT(ir));
+ DPFROMREG(fd, MIPSInst_FD(ir));
+ rv.d = ieee754dp_maddf(fd, fs, ft);
+ break;
+ }
+
+ case fmsubf_op:{
+ ieee754dp fs;
+ ieee754dp ft;
+ ieee754dp fd;
+
+ DPFROMREG(fs, MIPSInst_FS(ir));
+ DPFROMREG(ft, MIPSInst_FT(ir));
+ DPFROMREG(fd, MIPSInst_FD(ir));
+ rv.d = ieee754dp_msubf(fd, fs, ft);
+ break;
+ }
+
+ case fmin_op:{
+ ieee754dp fs;
+ ieee754dp ft;
+
+ DPFROMREG(fs, MIPSInst_FS(ir));
+ DPFROMREG(ft, MIPSInst_FT(ir));
+ rv.d = ieee754dp_fmin(fs, ft);
+ break;
+ }
+
+ case fmax_op:{
+ ieee754dp fs;
+ ieee754dp ft;
+
+ DPFROMREG(fs, MIPSInst_FS(ir));
+ DPFROMREG(ft, MIPSInst_FT(ir));
+ rv.d = ieee754dp_fmax(fs, ft);
+ break;
+ }
+
+ case fmina_op:{
+ ieee754dp fs;
+ ieee754dp ft;
+
+ DPFROMREG(fs, MIPSInst_FS(ir));
+ DPFROMREG(ft, MIPSInst_FT(ir));
+ rv.d = ieee754dp_fmina(fs, ft);
+ break;
+ }
+
+ case fmaxa_op:{
+ ieee754dp fs;
+ ieee754dp ft;
+
+ DPFROMREG(fs, MIPSInst_FS(ir));
+ DPFROMREG(ft, MIPSInst_FT(ir));
+ rv.d = ieee754dp_fmaxa(fs, ft);
+ break;
+ }
+#endif
+
#if defined(__mips64)
case fcvtl_op:{
ieee754dp fs;
@@ -1877,6 +2414,20 @@
}
#endif /* __mips >= 3 */
+#ifdef CONFIG_CPU_MIPSR6
+ case fsel_op:{
+ ieee754dp ftmp;
+
+ DPFROMREG(ftmp, MIPSInst_FD(ir));
+ if (ftmp.bits & 0x1)
+ DPFROMREG(ftmp, MIPSInst_FT(ir));
+ else
+ DPFROMREG(ftmp, MIPSInst_FS(ir));
+ rv.d = ftmp;
+ break;
+ }
+#endif
+
default:
if (MIPSInst_FUNC(ir) >= fcmp_op) {
unsigned cmpop = MIPSInst_FUNC(ir) - fcmp_op;
@@ -1909,42 +2460,144 @@
switch (MIPSInst_FUNC(ir)) {
case fcvts_op:
+ MIPS_FPU_EMU_INC_STATS(sprecision);
/* convert word to single precision real */
SPFROMREG(fs, MIPSInst_FS(ir));
rv.s = ieee754sp_fint(fs.bits);
rfmt = s_fmt;
goto copcsr;
case fcvtd_op:
+ MIPS_FPU_EMU_INC_STATS(dprecision);
/* convert word to double precision real */
SPFROMREG(fs, MIPSInst_FS(ir));
rv.d = ieee754dp_fint(fs.bits);
rfmt = d_fmt;
goto copcsr;
- default:
+ default: {
+#ifdef CONFIG_CPU_MIPSR6
+ unsigned cmpop = MIPSInst_FUNC(ir) & 0xf;
+ ieee754sp fs, ft;
+
+ if (!(MIPSInst_RS(ir) & 0x10))
+ return SIGILL;
+
+ MIPS_FPU_EMU_INC_STATS(sprecision);
+ rv.w = 0;
+ rfmt = s_fmt;
+ SPFROMREG(fs, MIPSInst_FS(ir));
+ SPFROMREG(ft, MIPSInst_FT(ir));
+ if ((MIPSInst_FUNC(ir) & 0x10) == 0) {
+ if (ieee754sp_cmp(fs, ft,
+ cmptab[cmpop & 0x7], cmpop & 0x8))
+ rv.w = -1;
+ if ((cmpop & 0x8) && ieee754_cxtest
+ (IEEE754_INVALID_OPERATION))
+ rcsr = FPU_CSR_INV_X | FPU_CSR_INV_S;
+ else
+ goto copcsr;
+ } else {
+ /* negative predicates */
+ switch (cmpop & 0x7) {
+ case 1:
+ case 2:
+ case 3:
+ if (ieee754sp_cmp(fs, ft,
+ ncmptab[cmpop & 0x7], cmpop & 0x8))
+ rv.w = -1;
+ if ((cmpop & 0x8) && ieee754_cxtest
+ (IEEE754_INVALID_OPERATION))
+ rcsr = FPU_CSR_INV_X | FPU_CSR_INV_S;
+ else
+ goto copcsr;
+ break;
+ default:
+ return SIGILL;
+ }
+ }
+ break;
+#else
return SIGILL;
+#endif
+ }
}
break;
}
-#if defined(__mips64)
case l_fmt:{
switch (MIPSInst_FUNC(ir)) {
- case fcvts_op:
+#if defined(__mips64)
+ case fcvts_op: {
+ u64 bits;
+ MIPS_FPU_EMU_INC_STATS(sprecision);
+ DIFROMREG(bits, MIPSInst_FS(ir));
+
/* convert long to single precision real */
- rv.s = ieee754sp_flong(ctx->fpr[MIPSInst_FS(ir)]);
+ rv.s = ieee754sp_flong(bits);
rfmt = s_fmt;
goto copcsr;
- case fcvtd_op:
+ }
+ case fcvtd_op: {
+ u64 bits;
+ MIPS_FPU_EMU_INC_STATS(dprecision);
+ DIFROMREG(bits, MIPSInst_FS(ir));
+
/* convert long to double precision real */
- rv.d = ieee754dp_flong(ctx->fpr[MIPSInst_FS(ir)]);
+ rv.d = ieee754dp_flong(bits);
rfmt = d_fmt;
goto copcsr;
- default:
+ }
+#endif
+ default: {
+#ifdef CONFIG_CPU_MIPSR6
+ unsigned cmpop = MIPSInst_FUNC(ir) & 0xf;
+ ieee754dp fs, ft;
+
+ if (!(MIPSInst_RS(ir) & 0x10))
+ return SIGILL;
+
+ MIPS_FPU_EMU_INC_STATS(dprecision);
+ rv.ll = 0;
+ rfmt = d_fmt;
+ DPFROMREG(fs, MIPSInst_FS(ir));
+ DPFROMREG(ft, MIPSInst_FT(ir));
+ if ((MIPSInst_FUNC(ir) & 0x10) == 0) {
+ if (ieee754dp_cmp(fs, ft,
+ cmptab[cmpop & 0x7], cmpop & 0x8))
+ rv.ll = -1LL;
+ if ((cmpop & 0x8)
+ &&
+ ieee754_cxtest
+ (IEEE754_INVALID_OPERATION))
+ rcsr = FPU_CSR_INV_X | FPU_CSR_INV_S;
+ else
+ goto copcsr;
+ } else {
+ /* negative predicates */
+ switch (cmpop & 0x7) {
+ case 1:
+ case 2:
+ case 3:
+ if (ieee754dp_cmp(fs, ft,
+ ncmptab[cmpop & 0x7], cmpop & 0x8))
+ rv.ll = -1LL;
+ if ((cmpop & 0x8) && ieee754_cxtest
+ (IEEE754_INVALID_OPERATION))
+ rcsr = FPU_CSR_INV_X | FPU_CSR_INV_S;
+ else
+ goto copcsr;
+ break;
+ default:
+ return SIGILL;
+ }
+ }
+ break;
+#else
return SIGILL;
+#endif
+ }
}
break;
}
-#endif
default:
return SIGILL;
@@ -2093,6 +2746,7 @@
break;
cond_resched();
+ thread_fpu_flags_update();
} while (xcp->cp0_epc > prevepc);
/* SIGILL indicates a non-fpu instruction */
@@ -2147,6 +2801,8 @@
FPU_STAT_CREATE(cp1ops);
FPU_STAT_CREATE(cp1xops);
FPU_STAT_CREATE(errors);
+ FPU_STAT_CREATE(sprecision);
+ FPU_STAT_CREATE(dprecision);
return 0;
}
diff --git a/arch/mips/math-emu/dp_class.c b/arch/mips/math-emu/dp_class.c
new file mode 100644
index 0000000..16ad70c
--- /dev/null
+++ b/arch/mips/math-emu/dp_class.c
@@ -0,0 +1,78 @@
+/* IEEE754-2008 floating point arithmetic
+ * double precision: CLASS
+ */
+/*
+ * MIPS floating point support
+ * Copyright (C) 2015, Imagination Technologies, LTD.
+ * Author: Leonid.Yegoshin@imgtec.com
+ *
+ * ########################################################################
+ *
+ * This program is free software; you can distribute it and/or modify it
+ * under the terms of the GNU General Public License (Version 2) as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * ########################################################################
+ */
+
+
+#include "ieee754dp.h"
+
+/*
+ * CLASS.fmt returns:
+ *
+ * bit 0: SNAN
+ * 1: QNAN
+ * 2: INF if value < 0
+ * 3: xNORM if value < 0
+
+ * 4: subNORM if value < 0
+ * 5: ZERO (-0)
+ * 6: INF if value > 0
+ * 7: xNORM if value > 0
+
+ * 8: subNORM if value > 0
+ * 9: ZERO (+0)
+ */
+
+int ieee754_2008dp_class(ieee754dp x)
+{
+ COMPXDP;
+
+ EXPLODEXDP;
+
+ /* convert IEEE754 to 754_2008 */
+ switch (xc) {
+ case IEEE754_CLASS_QNAN:
+ return 1;
+ case IEEE754_CLASS_SNAN:
+ return 0;
+ case IEEE754_CLASS_ZERO:
+ if (xs)
+ return 0x20;
+ return 0x200;
+ case IEEE754_CLASS_INF:
+ if (xs)
+ return 0x4;
+ return 0x40;
+ case IEEE754_CLASS_DNORM:
+ if (xs)
+ return 0x10;
+ return 0x100;
+ case IEEE754_CLASS_NORM:
+ if (xs)
+ return 0x8;
+ return 0x80;
+ default:
+ return 0x0;
+ }
+}
diff --git a/arch/mips/math-emu/dp_maddf.c b/arch/mips/math-emu/dp_maddf.c
new file mode 100644
index 0000000..86dc530
--- /dev/null
+++ b/arch/mips/math-emu/dp_maddf.c
@@ -0,0 +1,268 @@
+/* IEEE754 floating point arithmetic
+ * double precision: MADDF.df
+ */
+/*
+ * MIPS floating point support
+ * Copyright (C) 2015 Imagination Technologies, Ltd.
+ *
+ * ########################################################################
+ *
+ * This program is free software; you can distribute it and/or modify it
+ * under the terms of the GNU General Public License (Version 2) as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * ########################################################################
+ */
+
+
+#include "ieee754dp.h"
+
+ieee754dp ieee754dp_maddf(ieee754dp z, ieee754dp x, ieee754dp y)
+{
+ int re;
+ int rs;
+ u64 rm;
+ COMPXDP;
+ COMPYDP;
+ u64 zm; int ze; int zs __maybe_unused; int zc;
+
+ EXPLODEXDP;
+ EXPLODEYDP;
+ EXPLODEDP(z, zc, zs, ze, zm)
+
+ CLEARCX;
+
+ FLUSHXDP;
+ FLUSHYDP;
+ FLUSHDP(z, zc, zs, ze, zm);
+
+ switch (zc) {
+ case IEEE754_CLASS_SNAN:
+ SETCX(IEEE754_INVALID_OPERATION);
+ return ieee754dp_nanxcpt(ieee754dp_indef(), "maddf", z, x, y);
+ case IEEE754_CLASS_DNORM:
+ DPDNORMx(zm, ze);
+ break;
+ }
+
+ /* ==== mult ==== */
+ switch (CLPAIR(xc, yc)) {
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
+ SETCX(IEEE754_INVALID_OPERATION);
+ return ieee754dp_nanxcpt(ieee754dp_indef(), "maddf", x, y);
+
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
+ return y;
+
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_INF):
+ return x;
+
+
+ /* Infinity handling */
+
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
+ if (zc == IEEE754_CLASS_QNAN)
+ return z;
+ SETCX(IEEE754_INVALID_OPERATION);
+ return ieee754dp_xcpt(ieee754dp_indef(), "mul", x, y);
+
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
+ if (zc == IEEE754_CLASS_QNAN)
+ return z;
+ return ieee754dp_inf(xs ^ ys);
+
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM):
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
+ if (zc == IEEE754_CLASS_QNAN)
+ return z;
+ if (zc == IEEE754_CLASS_INF)
+ return ieee754dp_inf(zs);
+ return z;
+
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
+ DPDNORMX;
+
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_DNORM):
+ if (zc == IEEE754_CLASS_QNAN)
+ return z;
+ if (zc == IEEE754_CLASS_INF)
+ return ieee754dp_inf(zs);
+ DPDNORMY;
+ break;
+
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_NORM):
+ if (zc == IEEE754_CLASS_QNAN)
+ return z;
+ if (zc == IEEE754_CLASS_INF)
+ return ieee754dp_inf(zs);
+ DPDNORMX;
+ break;
+
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_NORM):
+ if (zc == IEEE754_CLASS_QNAN)
+ return z;
+ if (zc == IEEE754_CLASS_INF)
+ return ieee754dp_inf(zs);
+ break;
+ }
+ /* rm = xm * ym, re = xe+ye basically */
+ assert(xm & DP_HIDDEN_BIT);
+ assert(ym & DP_HIDDEN_BIT);
+
+ re = xe + ye;
+ rs = xs ^ ys;
+
+ /* shunt to top of word */
+ xm <<= 64 - (DP_MBITS + 1);
+ ym <<= 64 - (DP_MBITS + 1);
+
+ /* multiply 32bits xm,ym to give high 32bits rm with stickness
+ */
+
+ /* 32 * 32 => 64 */
+#define DPXMULT(x, y) ((u64)(x) * (u64)y)
+
+ {
+ unsigned lxm = xm;
+ unsigned hxm = xm >> 32;
+ unsigned lym = ym;
+ unsigned hym = ym >> 32;
+ u64 lrm;
+ u64 hrm;
+
+ lrm = DPXMULT(lxm, lym);
+ hrm = DPXMULT(hxm, hym);
+
+ {
+ u64 t = DPXMULT(lxm, hym);
+ {
+ u64 at =
+ lrm + (t << 32);
+ hrm += at < lrm;
+ lrm = at;
+ }
+ hrm = hrm + (t >> 32);
+ }
+
+ {
+ u64 t = DPXMULT(hxm, lym);
+ {
+ u64 at =
+ lrm + (t << 32);
+ hrm += at < lrm;
+ lrm = at;
+ }
+ hrm = hrm + (t >> 32);
+ }
+ rm = hrm | (lrm != 0);
+ }
+
+ /*
+ * sticky shift down to normal rounding precision
+ */
+ if ((s64) rm < 0) {
+ rm =
+ (rm >> (64 - (DP_MBITS + 1 + 3))) |
+ ((rm << (DP_MBITS + 1 + 3)) != 0);
+ re++;
+ } else {
+ rm =
+ (rm >> (64 - (DP_MBITS + 1 + 3 + 1))) |
+ ((rm << (DP_MBITS + 1 + 3 + 1)) != 0);
+ }
+
+ assert(rm & (DP_HIDDEN_BIT << 3));
+
+ /* ==== add ==== */
+
+ assert(zm & DP_HIDDEN_BIT);
+
+ /* provide guard,round and stick bit space */
+ zm <<= 3;
+
+ if (ze > re) {
+ /* have to shift y fraction right to align
+ */
+ int s = ze - re;
+ rm = XDPSRS(rm, s);
+ re += s;
+ } else if (re > ze) {
+ /* have to shift x fraction right to align
+ */
+ int s = re - ze;
+ zm = XDPSRS(zm, s);
+ ze += s;
+ }
+ assert(ze == re);
+ assert(ze <= DP_EMAX);
+
+ if (zs == rs) {
+ /* generate 60 bit result of adding two 59 bit numbers
+ * leaving result in zm,zs,ze
+ */
+ zm = zm + rm;
+ ze = ze;
+ zs = zs;
+
+ if (zm >> (DP_MBITS + 1 + 3)) { /* carry out */
+ zm = XDPSRS1(zm);
+ ze++;
+ }
+ } else {
+ if (zm >= rm) {
+ zm = zm - rm;
+ ze = ze;
+ zs = zs;
+ } else {
+ zm = rm - zm;
+ ze = ze;
+ zs = rs;
+ }
+ if (zm == 0)
+ return ieee754dp_zero(ieee754_csr.rm ==
+ IEEE754_RD);
+
+ /* normalize to rounding precision */
+ while ((zm >> (DP_MBITS + 3)) == 0) {
+ zm <<= 1;
+ ze--;
+ }
+
+ }
+ DPNORMRET3(zs, ze, zm, "maddf", z, x, y);
+
+}
diff --git a/arch/mips/math-emu/dp_minmax.c b/arch/mips/math-emu/dp_minmax.c
new file mode 100644
index 0000000..e435aec
--- /dev/null
+++ b/arch/mips/math-emu/dp_minmax.c
@@ -0,0 +1,432 @@
+/* IEEE754 floating point arithmetic
+ * double precision: MIN/MAX/MINA/MAXA
+ */
+/*
+ * MIPS floating point support
+ * Copyright (C) 2015 Imagination technologies, Ltd.
+ *
+ * ########################################################################
+ *
+ * This program is free software; you can distribute it and/or modify it
+ * under the terms of the GNU General Public License (Version 2) as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * ########################################################################
+ *
+ */
+
+
+#include "ieee754dp.h"
+
+ieee754dp ieee754dp_fmin(ieee754dp x, ieee754dp y)
+{
+ COMPXDP;
+ COMPYDP;
+
+ EXPLODEXDP;
+ EXPLODEYDP;
+
+ CLEARCX;
+
+ FLUSHXDP;
+ FLUSHYDP;
+
+ switch (CLPAIR(xc, yc)) {
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
+ SETCX(IEEE754_INVALID_OPERATION);
+ return ieee754dp_nanxcpt(ieee754dp_indef(), "min", x, y);
+
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
+ return x;
+
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_INF):
+ return y;
+
+ /* Infinity and zero handling
+ */
+
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
+ if (xs)
+ return x;
+ return y;
+
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM):
+ if (ys)
+ return y;
+ return x;
+
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
+ if (xs == ys)
+ return x;
+ return ieee754dp_zero(1);
+
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
+ DPDNORMX;
+
+ /* FALL THROUGH */
+
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_DNORM):
+ DPDNORMY;
+ break;
+
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_NORM):
+ DPDNORMX;
+ break;
+
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_NORM):
+ break;
+ }
+ assert(xm & DP_HIDDEN_BIT);
+ assert(ym & DP_HIDDEN_BIT);
+
+ if (xs ^ ys) {
+ if (xs)
+ return x;
+ return y;
+ }
+
+ if (xe > ye)
+ return y;
+ if (ye > xe)
+ return x;
+
+ assert(xe == ye);
+ assert(xe <= DP_EMAX);
+
+ if (xm <= ym)
+ return x;
+ return y;
+}
+
+ieee754dp ieee754dp_fmax(ieee754dp x, ieee754dp y)
+{
+ COMPXDP;
+ COMPYDP;
+
+ EXPLODEXDP;
+ EXPLODEYDP;
+
+ CLEARCX;
+
+ FLUSHXDP;
+ FLUSHYDP;
+
+ switch (CLPAIR(xc, yc)) {
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
+ SETCX(IEEE754_INVALID_OPERATION);
+ return ieee754dp_nanxcpt(ieee754dp_indef(), "max", x, y);
+
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
+ return x;
+
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_INF):
+ return y;
+
+ /* Infinity and zero handling
+ */
+
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
+ if (!xs)
+ return x;
+ return y;
+
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM):
+ if (!ys)
+ return y;
+ return x;
+
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
+ if (xs == ys)
+ return x;
+ return ieee754dp_zero(0);
+
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
+ DPDNORMX;
+
+ /* FALL THROUGH */
+
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_DNORM):
+ DPDNORMY;
+ break;
+
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_NORM):
+ DPDNORMX;
+ break;
+
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_NORM):
+ break;
+ }
+ assert(xm & DP_HIDDEN_BIT);
+ assert(ym & DP_HIDDEN_BIT);
+
+ if (xs ^ ys) {
+ if (!xs)
+ return x;
+ return y;
+ }
+
+ if (xe > ye)
+ return x;
+ if (ye > xe)
+ return y;
+
+ assert(xe == ye);
+ assert(xe <= DP_EMAX);
+
+ if (xm < ym)
+ return y;
+ return x;
+}
+
+ieee754dp ieee754dp_fmina(ieee754dp x, ieee754dp y)
+{
+ COMPXDP;
+ COMPYDP;
+
+ EXPLODEXDP;
+ EXPLODEYDP;
+
+ CLEARCX;
+
+ FLUSHXDP;
+ FLUSHYDP;
+
+ switch (CLPAIR(xc, yc)) {
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
+ SETCX(IEEE754_INVALID_OPERATION);
+ return ieee754dp_nanxcpt(ieee754dp_indef(), "mina", x, y);
+
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
+ return x;
+
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_INF):
+ return y;
+
+ /* Infinity and zero handling
+ */
+
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
+ return y;
+
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM):
+ return x;
+
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
+ if (xs == ys)
+ return x;
+ return ieee754dp_zero(1);
+
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
+ DPDNORMX;
+
+ /* FALL THROUGH */
+
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_DNORM):
+ DPDNORMY;
+ break;
+
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_NORM):
+ DPDNORMX;
+ break;
+
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_NORM):
+ break;
+ }
+ assert(xm & DP_HIDDEN_BIT);
+ assert(ym & DP_HIDDEN_BIT);
+
+ if (xe > ye)
+ return y;
+ if (ye > xe)
+ return x;
+
+ assert(xe == ye);
+ assert(xe <= DP_EMAX);
+
+ if (xm <= ym)
+ return x;
+ return y;
+}
+
+ieee754dp ieee754dp_fmaxa(ieee754dp x, ieee754dp y)
+{
+ COMPXDP;
+ COMPYDP;
+
+ EXPLODEXDP;
+ EXPLODEYDP;
+
+ CLEARCX;
+
+ FLUSHXDP;
+ FLUSHYDP;
+
+ switch (CLPAIR(xc, yc)) {
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
+ SETCX(IEEE754_INVALID_OPERATION);
+ return ieee754dp_nanxcpt(ieee754dp_indef(), "maxa", x, y);
+
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
+ return x;
+
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_INF):
+ return y;
+
+ /* Infinity and zero handling
+ */
+
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
+ return x;
+
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM):
+ return y;
+
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
+ if (xs == ys)
+ return x;
+ return ieee754dp_zero(0);
+
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
+ DPDNORMX;
+
+ /* FALL THROUGH */
+
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_DNORM):
+ DPDNORMY;
+ break;
+
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_NORM):
+ DPDNORMX;
+ break;
+
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_NORM):
+ break;
+ }
+ assert(xm & DP_HIDDEN_BIT);
+ assert(ym & DP_HIDDEN_BIT);
+
+ if (xe > ye)
+ return x;
+ if (ye > xe)
+ return y;
+
+ assert(xe == ye);
+ assert(xe <= DP_EMAX);
+
+ if (xm < ym)
+ return y;
+ return x;
+}
diff --git a/arch/mips/math-emu/dp_msubf.c b/arch/mips/math-emu/dp_msubf.c
new file mode 100644
index 0000000..8410ce4
--- /dev/null
+++ b/arch/mips/math-emu/dp_msubf.c
@@ -0,0 +1,271 @@
+/* IEEE754 floating point arithmetic
+ * double precision: MSUBF.df
+ */
+/*
+ * MIPS floating point support
+ * Copyright (C) 2015 Imagination Technologies, Ltd.
+ *
+ * ########################################################################
+ *
+ * This program is free software; you can distribute it and/or modify it
+ * under the terms of the GNU General Public License (Version 2) as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * ########################################################################
+ */
+
+
+#include "ieee754dp.h"
+
+ieee754dp ieee754dp_msubf(ieee754dp z, ieee754dp x, ieee754dp y)
+{
+ int re;
+ int rs;
+ u64 rm;
+ COMPXDP;
+ COMPYDP;
+ u64 zm; int ze; int zs __maybe_unused; int zc;
+
+ EXPLODEXDP;
+ EXPLODEYDP;
+ EXPLODEDP(z, zc, zs, ze, zm)
+
+ CLEARCX;
+
+ FLUSHXDP;
+ FLUSHYDP;
+ FLUSHDP(z, zc, zs, ze, zm);
+
+ switch (zc) {
+ case IEEE754_CLASS_SNAN:
+ SETCX(IEEE754_INVALID_OPERATION);
+ return ieee754dp_nanxcpt(ieee754dp_indef(), "maddf", z, x, y);
+ case IEEE754_CLASS_DNORM:
+ DPDNORMx(zm, ze);
+ break;
+ }
+
+ /* ==== mult ==== */
+ switch (CLPAIR(xc, yc)) {
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
+ SETCX(IEEE754_INVALID_OPERATION);
+ return ieee754dp_nanxcpt(ieee754dp_indef(), "maddf", x, y);
+
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
+ return y;
+
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_INF):
+ return x;
+
+
+ /* Infinity handling */
+
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
+ if (zc == IEEE754_CLASS_QNAN)
+ return z;
+ SETCX(IEEE754_INVALID_OPERATION);
+ return ieee754dp_xcpt(ieee754dp_indef(), "mul", x, y);
+
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
+ if (zc == IEEE754_CLASS_QNAN)
+ return z;
+ return ieee754dp_inf(xs ^ ys);
+
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM):
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
+ if (zc == IEEE754_CLASS_QNAN)
+ return z;
+ if (zc == IEEE754_CLASS_INF)
+ return ieee754dp_inf(zs);
+ return z;
+
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
+ DPDNORMX;
+
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_DNORM):
+ if (zc == IEEE754_CLASS_QNAN)
+ return z;
+ if (zc == IEEE754_CLASS_INF)
+ return ieee754dp_inf(zs);
+ DPDNORMY;
+ break;
+
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_NORM):
+ if (zc == IEEE754_CLASS_QNAN)
+ return z;
+ if (zc == IEEE754_CLASS_INF)
+ return ieee754dp_inf(zs);
+ DPDNORMX;
+ break;
+
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_NORM):
+ if (zc == IEEE754_CLASS_QNAN)
+ return z;
+ if (zc == IEEE754_CLASS_INF)
+ return ieee754dp_inf(zs);
+ break;
+ }
+ /* rm = xm * ym, re = xe+ye basically */
+ assert(xm & DP_HIDDEN_BIT);
+ assert(ym & DP_HIDDEN_BIT);
+
+ re = xe + ye;
+ rs = xs ^ ys;
+
+ /* shunt to top of word */
+ xm <<= 64 - (DP_MBITS + 1);
+ ym <<= 64 - (DP_MBITS + 1);
+
+ /* multiply 32bits xm,ym to give high 32bits rm with stickness
+ */
+
+ /* 32 * 32 => 64 */
+#define DPXMULT(x, y) ((u64)(x) * (u64)y)
+
+ {
+ unsigned lxm = xm;
+ unsigned hxm = xm >> 32;
+ unsigned lym = ym;
+ unsigned hym = ym >> 32;
+ u64 lrm;
+ u64 hrm;
+
+ lrm = DPXMULT(lxm, lym);
+ hrm = DPXMULT(hxm, hym);
+
+ {
+ u64 t = DPXMULT(lxm, hym);
+ {
+ u64 at =
+ lrm + (t << 32);
+ hrm += at < lrm;
+ lrm = at;
+ }
+ hrm = hrm + (t >> 32);
+ }
+
+ {
+ u64 t = DPXMULT(hxm, lym);
+ {
+ u64 at =
+ lrm + (t << 32);
+ hrm += at < lrm;
+ lrm = at;
+ }
+ hrm = hrm + (t >> 32);
+ }
+ rm = hrm | (lrm != 0);
+ }
+
+ /*
+ * sticky shift down to normal rounding precision
+ */
+ if ((s64) rm < 0) {
+ rm =
+ (rm >> (64 - (DP_MBITS + 1 + 3))) |
+ ((rm << (DP_MBITS + 1 + 3)) != 0);
+ re++;
+ } else {
+ rm =
+ (rm >> (64 - (DP_MBITS + 1 + 3 + 1))) |
+ ((rm << (DP_MBITS + 1 + 3 + 1)) != 0);
+ }
+
+ assert(rm & (DP_HIDDEN_BIT << 3));
+
+ /* ==== add ==== */
+
+ /* flip sign of r and handle as add */
+ rs ^= 1;
+
+ assert(zm & DP_HIDDEN_BIT);
+
+ /* provide guard,round and stick bit space */
+ zm <<= 3;
+
+ if (ze > re) {
+ /* have to shift y fraction right to align
+ */
+ int s = ze - re;
+ rm = XDPSRS(rm, s);
+ re += s;
+ } else if (re > ze) {
+ /* have to shift x fraction right to align
+ */
+ int s = re - ze;
+ zm = XDPSRS(zm, s);
+ ze += s;
+ }
+ assert(ze == re);
+ assert(ze <= DP_EMAX);
+
+ if (zs == rs) {
+ /* generate 60 bit result of adding two 59 bit numbers
+ * leaving result in zm,zs,ze
+ */
+ zm = zm + rm;
+ ze = ze;
+ zs = zs;
+
+ if (zm >> (DP_MBITS + 1 + 3)) { /* carry out */
+ zm = XDPSRS1(zm);
+ ze++;
+ }
+ } else {
+ if (zm >= rm) {
+ zm = zm - rm;
+ ze = ze;
+ zs = zs;
+ } else {
+ zm = rm - zm;
+ ze = ze;
+ zs = rs;
+ }
+ if (zm == 0)
+ return ieee754dp_zero(ieee754_csr.rm ==
+ IEEE754_RD);
+
+ /* normalize to rounding precision */
+ while ((zm >> (DP_MBITS + 3)) == 0) {
+ zm <<= 1;
+ ze--;
+ }
+
+ }
+ DPNORMRET3(zs, ze, zm, "maddf", z, x, y);
+
+}
diff --git a/arch/mips/math-emu/dsemul.c b/arch/mips/math-emu/dsemul.c
index 7ea622a..7b72b12 100644
--- a/arch/mips/math-emu/dsemul.c
+++ b/arch/mips/math-emu/dsemul.c
@@ -10,6 +10,7 @@
#include <asm/inst.h>
#include <asm/processor.h>
#include <asm/uaccess.h>
+#include <asm/vdso.h>
#include <asm/branch.h>
#include <asm/mipsregs.h>
#include <asm/cacheflush.h>
@@ -18,6 +19,95 @@
#include "ieee754.h"
+#ifdef CONFIG_CPU_MIPSR6
+
+static int mipsr6_pc(struct pt_regs *regs, mips_instruction inst, unsigned long cpc,
+ unsigned long bpc, unsigned long r31)
+{
+ union mips_instruction ir = (union mips_instruction)inst;
+ register unsigned long vaddr;
+ unsigned int val;
+ int err = SIGILL;
+
+ if (ir.rel_format.opcode != pcrel_op)
+ return SIGILL;
+
+ switch (ir.rel_format.op) {
+ case addiupc_op:
+ vaddr = regs->cp0_epc + (ir.rel_format.simmediate << 2);
+ if (config_enabled(CONFIG_64BIT) && !(regs->cp0_status & ST0_UX))
+ __asm__ __volatile__("sll %0, %0, 0":"+&r"(vaddr)::);
+ regs->regs[ir.rel_format.rs] = vaddr;
+ return 0;
+#ifdef CONFIG_CPU_MIPS64
+ case lwupc_op:
+ vaddr = regs->cp0_epc + (ir.rel_format.simmediate << 2);
+ if (config_enabled(CONFIG_64BIT) && !(regs->cp0_status & ST0_UX))
+ __asm__ __volatile__("sll %0, %0, 0":"+&r"(vaddr)::);
+ if (get_user(val, (u32 __user *)vaddr)) {
+ current->thread.cp0_baduaddr = vaddr;
+ err = SIGSEGV;
+ break;
+ }
+ regs->regs[ir.rel_format.rs] = val;
+ return 0;
+#endif
+ case lwpc_op:
+ vaddr = regs->cp0_epc + (ir.rel_format.simmediate << 2);
+ if (config_enabled(CONFIG_64BIT) && !(regs->cp0_status & ST0_UX))
+ __asm__ __volatile__("sll %0, %0, 0":"+&r"(vaddr)::);
+ if (get_user(val, (u32 __user *)vaddr)) {
+ current->thread.cp0_baduaddr = vaddr;
+ err = SIGSEGV;
+ break;
+ }
+ regs->regs[ir.rel_format.rs] = (int)val;
+ return 0;
+ default:
+ switch (ir.rl16_format.func) {
+ case aluipc_func:
+ vaddr = regs->cp0_epc + (ir.rl16_format.simmediate << 16);
+ if (config_enabled(CONFIG_64BIT) &&
+ !(regs->cp0_status & ST0_UX))
+ __asm__ __volatile__("sll %0, %0, 0":"+&r"(vaddr)::);
+ vaddr &= ~0xffff;
+ regs->regs[ir.rel_format.rs] = vaddr;
+ return 0;
+ case auipc_func:
+ vaddr = regs->cp0_epc + (ir.rl16_format.simmediate << 16);
+ if (config_enabled(CONFIG_64BIT) &&
+ !(regs->cp0_status & ST0_UX))
+ __asm__ __volatile__("sll %0, %0, 0":"+&r"(vaddr)::);
+ regs->regs[ir.rel_format.rs] = vaddr;
+ return 0;
+ default: {
+#ifdef CONFIG_CPU_MIPS64
+ unsigned long dval;
+
+ if (ir.rl18_format.unused)
+ break;
+ /* LDPC */
+ vaddr = regs->cp0_epc & ~0x7;
+ vaddr += (ir.rl18_format.simmediate << 3);
+ if (config_enabled(CONFIG_64BIT) &&
+ !(regs->cp0_status & ST0_UX))
+ __asm__ __volatile__("sll %0, %0, 0":"+&r"(vaddr)::);
+ if (get_user(dval, (u64 __user *)vaddr)) {
+ current->thread.cp0_baduaddr = vaddr;
+ err = SIGSEGV;
+ break;
+ }
+ regs->regs[ir.rel_format.rs] = dval;
+ return 0;
+#endif
+ }
+ }
+ break;
+ }
+ return err;
+}
+#endif
+
/* Strap kernel emulator for full MIPS IV emulation */
#ifdef __mips
@@ -47,13 +137,19 @@
mips_instruction badinst;
mips_instruction cookie;
unsigned long epc;
+ unsigned long bpc;
+ unsigned long r31;
};
+/* round structure size to N*8 to force a fit two instructions in a single cache line */
+#define EMULFRAME_ROUNDED_SIZE ((sizeof(struct emuframe) + 0x7) & ~0x7)
-int mips_dsemul(struct pt_regs *regs, mips_instruction ir, unsigned long cpc)
+int mips_dsemul(struct pt_regs *regs, mips_instruction ir, unsigned long cpc,
+ unsigned long bpc, unsigned long r31)
{
extern asmlinkage void handle_dsemulret(void);
struct emuframe __user *fr;
int err;
+ unsigned char *pg_addr;
if ((get_isa16_mode(regs->cp0_epc) && ((ir >> 16) == MM_NOP16)) ||
(ir == 0)) {
@@ -67,8 +163,13 @@
#endif
+#ifdef CONFIG_CPU_MIPSR6
+ err = mipsr6_pc(regs, ir, cpc, bpc, r31);
+ if (err != SIGILL)
+ return err;
+#endif
/*
- * The strategy is to push the instruction onto the user stack
+ * The strategy is to push the instruction onto the user stack/VDSO page
* and put a trap after it which we can catch and jump to
* the required address any alternative apart from full
* instruction emulation!!.
@@ -85,37 +186,82 @@
* handler (single entry point).
*/
- /* Ensure that the two instructions are in the same cache line */
- fr = (struct emuframe __user *)
- ((regs->regs[29] - sizeof(struct emuframe)) & ~0x7);
+ if (current_thread_info()->vdso_page) {
+ /*
+ * Use VDSO page and fill structure via kernel VA,
+ * user write is disabled
+ */
+ pg_addr = (unsigned char *)page_address(current_thread_info()->vdso_page);
+ fr = (struct emuframe __user *)
+ (pg_addr + current_thread_info()->vdso_offset -
+ EMULFRAME_ROUNDED_SIZE);
- /* Verify that the stack pointer is not competely insane */
- if (unlikely(!access_ok(VERIFY_WRITE, fr, sizeof(struct emuframe))))
- return SIGBUS;
+ /* verify that we don't overflow into trampoline areas */
+ if ((unsigned char *)fr < (unsigned char *)(((struct mips_vdso *)pg_addr) + 1)) {
+ MIPS_FPU_EMU_INC_STATS(errors);
+ return SIGBUS;
+ }
- if (get_isa16_mode(regs->cp0_epc)) {
- err = __put_user(ir >> 16, (u16 __user *)(&fr->emul));
- err |= __put_user(ir & 0xffff, (u16 __user *)((long)(&fr->emul) + 2));
- err |= __put_user(BREAK_MATH >> 16, (u16 __user *)(&fr->badinst));
- err |= __put_user(BREAK_MATH & 0xffff, (u16 __user *)((long)(&fr->badinst) + 2));
+ current_thread_info()->vdso_offset -= EMULFRAME_ROUNDED_SIZE;
+
+ if (get_isa16_mode(regs->cp0_epc)) {
+ *(u16 *)&fr->emul = (u16)(ir >> 16);
+ *((u16 *)(&fr->emul) + 1) = (u16)(ir & 0xffff);
+ *((u16 *)(&fr->emul) + 2) = (u16)(BREAK_MATH >> 16);
+ *((u16 *)(&fr->emul) + 3) = (u16)(BREAK_MATH &0xffff);
+ } else {
+ fr->emul = ir;
+ fr->badinst = (mips_instruction)BREAK_MATH;
+ }
+ fr->cookie = (mips_instruction)BD_COOKIE;
+ fr->epc = cpc;
+ fr->bpc = bpc;
+ fr->r31 = r31;
+
+ /* fill CP0_EPC with user VA */
+ regs->cp0_epc = ((unsigned long)(current->mm->context.vdso +
+ current_thread_info()->vdso_offset)) |
+ get_isa16_mode(regs->cp0_epc);
+ if (cpu_has_dc_aliases || cpu_has_ic_aliases)
+ mips_flush_data_cache_range(current->mm->context.vdso_vma,
+ regs->cp0_epc, current_thread_info()->vdso_page,
+ (unsigned long)fr, sizeof(struct emuframe));
+ else
+ /* it is a less expensive on CPU with correct SYNCI */
+ flush_cache_sigtramp((unsigned long)fr);
} else {
- err = __put_user(ir, &fr->emul);
- err |= __put_user((mips_instruction)BREAK_MATH, &fr->badinst);
+ /* Ensure that the two instructions are in the same cache line */
+ fr = (struct emuframe __user *)
+ ((regs->regs[29] - sizeof(struct emuframe)) & ~0x7);
+
+ /* Verify that the stack pointer is not competely insane */
+ if (unlikely(!access_ok(VERIFY_WRITE, fr, sizeof(struct emuframe))))
+ return SIGBUS;
+
+ if (get_isa16_mode(regs->cp0_epc)) {
+ err = __put_user(ir >> 16, (u16 __user *)(&fr->emul));
+ err |= __put_user(ir & 0xffff, (u16 __user *)((long)(&fr->emul) + 2));
+ err |= __put_user(BREAK_MATH >> 16, (u16 __user *)(&fr->badinst));
+ err |= __put_user(BREAK_MATH & 0xffff, (u16 __user *)((long)(&fr->badinst) + 2));
+ } else {
+ err = __put_user(ir, &fr->emul);
+ err |= __put_user((mips_instruction)BREAK_MATH, &fr->badinst);
+ }
+
+ err |= __put_user((mips_instruction)BD_COOKIE, &fr->cookie);
+ err |= __put_user(cpc, &fr->epc);
+
+ if (unlikely(err)) {
+ MIPS_FPU_EMU_INC_STATS(errors);
+ return SIGBUS;
+ }
+
+ regs->cp0_epc = ((unsigned long) &fr->emul) |
+ get_isa16_mode(regs->cp0_epc);
+
+ flush_cache_sigtramp((unsigned long)&fr->badinst);
}
- err |= __put_user((mips_instruction)BD_COOKIE, &fr->cookie);
- err |= __put_user(cpc, &fr->epc);
-
- if (unlikely(err)) {
- MIPS_FPU_EMU_INC_STATS(errors);
- return SIGBUS;
- }
-
- regs->cp0_epc = ((unsigned long) &fr->emul) |
- get_isa16_mode(regs->cp0_epc);
-
- flush_cache_sigtramp((unsigned long)&fr->badinst);
-
return SIGILL; /* force out of emulation loop */
}
@@ -152,7 +298,10 @@
}
err |= __get_user(cookie, &fr->cookie);
- if (unlikely(err || (insn != BREAK_MATH) || (cookie != BD_COOKIE))) {
+ if (unlikely(err || (insn != BREAK_MATH) || (cookie != BD_COOKIE) ||
+ (current_thread_info()->vdso_page &&
+ ((xcp->cp0_epc & PAGE_MASK) !=
+ (unsigned long)current->mm->context.vdso)))) {
MIPS_FPU_EMU_INC_STATS(errors);
return 0;
}
@@ -177,8 +326,53 @@
return 0;
}
+ if (current_thread_info()->vdso_page) {
+ /* restore VDSO stack level */
+ current_thread_info()->vdso_offset += EMULFRAME_ROUNDED_SIZE;
+ if (current_thread_info()->vdso_offset > PAGE_SIZE) {
+ /* This is not a good situation to be in */
+ current_thread_info()->vdso_offset -= EMULFRAME_ROUNDED_SIZE;
+ force_sig(SIGBUS, current);
+
+ return 0;
+ }
+ }
/* Set EPC to return to post-branch instruction */
xcp->cp0_epc = epc;
-
return 1;
}
+
+/* check and adjust an emulation stack before start a signal handler */
+void vdso_epc_adjust(struct pt_regs *xcp)
+{
+ struct emuframe __user *fr;
+ unsigned long epc;
+ unsigned long r31;
+
+ while (current_thread_info()->vdso_offset < PAGE_SIZE) {
+ epc = msk_isa16_mode(xcp->cp0_epc);
+ if ((epc >= ((unsigned long)current->mm->context.vdso + PAGE_SIZE)) ||
+ (epc < (unsigned long)((struct mips_vdso *)current->mm->context.vdso + 1)))
+ return;
+
+ fr = (struct emuframe __user *)
+ ((unsigned long)current->mm->context.vdso +
+ current_thread_info()->vdso_offset);
+
+ /*
+ * epc must point to emul instruction or badinst
+ * in case of emul - it is not executed, so return to start
+ * and restore GPR31
+ * in case of badinst - instruction is executed, return to destination
+ */
+ if (epc == (unsigned long)&fr->emul) {
+ __get_user(r31, &fr->r31);
+ xcp->regs[31] = r31;
+ __get_user(epc, &fr->bpc);
+ } else {
+ __get_user(epc, &fr->epc);
+ }
+ xcp->cp0_epc = epc;
+ current_thread_info()->vdso_offset += EMULFRAME_ROUNDED_SIZE;
+ }
+}
diff --git a/arch/mips/math-emu/ieee754.h b/arch/mips/math-emu/ieee754.h
index 22796e0..1b12f73 100644
--- a/arch/mips/math-emu/ieee754.h
+++ b/arch/mips/math-emu/ieee754.h
@@ -104,6 +104,7 @@
*/
int ieee754sp_finite(ieee754sp x);
int ieee754sp_class(ieee754sp x);
+int ieee754_2008sp_class(ieee754sp x);
ieee754sp ieee754sp_abs(ieee754sp x);
ieee754sp ieee754sp_neg(ieee754sp x);
@@ -123,6 +124,12 @@
ieee754sp ieee754sp_flong(s64 x);
ieee754sp ieee754sp_fulong(u64 x);
ieee754sp ieee754sp_fdp(ieee754dp x);
+ieee754sp ieee754sp_maddf(ieee754sp z, ieee754sp x, ieee754sp y);
+ieee754sp ieee754sp_msubf(ieee754sp z, ieee754sp x, ieee754sp y);
+ieee754sp ieee754sp_fmin(ieee754sp x, ieee754sp y);
+ieee754sp ieee754sp_fmina(ieee754sp x, ieee754sp y);
+ieee754sp ieee754sp_fmax(ieee754sp x, ieee754sp y);
+ieee754sp ieee754sp_fmaxa(ieee754sp x, ieee754sp y);
int ieee754sp_tint(ieee754sp x);
unsigned int ieee754sp_tuns(ieee754sp x);
@@ -148,6 +155,7 @@
*/
int ieee754dp_finite(ieee754dp x);
int ieee754dp_class(ieee754dp x);
+int ieee754_2008dp_class(ieee754dp x);
/* x with sign of y */
ieee754dp ieee754dp_copysign(ieee754dp x, ieee754dp y);
@@ -174,6 +182,12 @@
ieee754dp ieee754dp_ceil(ieee754dp x);
ieee754dp ieee754dp_floor(ieee754dp x);
ieee754dp ieee754dp_trunc(ieee754dp x);
+ieee754dp ieee754dp_maddf(ieee754dp z, ieee754dp x, ieee754dp y);
+ieee754dp ieee754dp_msubf(ieee754dp z, ieee754dp x, ieee754dp y);
+ieee754dp ieee754dp_fmin(ieee754dp x, ieee754dp y);
+ieee754dp ieee754dp_fmina(ieee754dp x, ieee754dp y);
+ieee754dp ieee754dp_fmax(ieee754dp x, ieee754dp y);
+ieee754dp ieee754dp_fmaxa(ieee754dp x, ieee754dp y);
int ieee754dp_tint(ieee754dp x);
unsigned int ieee754dp_tuns(ieee754dp x);
@@ -311,7 +325,10 @@
unsigned pad0:7;
unsigned nod:1; /* set 1 for no denormalised numbers */
unsigned c:1; /* condition */
- unsigned pad1:5;
+ unsigned pad1a:2;
+ unsigned mac2008:1;
+ unsigned abs2008:1;
+ unsigned nan2008:1;
unsigned cx:6; /* exceptions this operation */
unsigned mx:5; /* exception enable mask */
unsigned sx:5; /* exceptions total */
@@ -322,7 +339,10 @@
unsigned sx:5; /* exceptions total */
unsigned mx:5; /* exception enable mask */
unsigned cx:6; /* exceptions this operation */
- unsigned pad1:5;
+ unsigned nan2008:1;
+ unsigned abs2008:1;
+ unsigned mac2008:1;
+ unsigned pad1a:2;
unsigned c:1; /* condition */
unsigned nod:1; /* set 1 for no denormalised numbers */
unsigned pad0:7;
diff --git a/arch/mips/math-emu/ieee754dp.c b/arch/mips/math-emu/ieee754dp.c
index 068e56b..5afad3d 100644
--- a/arch/mips/math-emu/ieee754dp.c
+++ b/arch/mips/math-emu/ieee754dp.c
@@ -41,7 +41,9 @@
int ieee754dp_issnan(ieee754dp x)
{
assert(ieee754dp_isnan(x));
- return ((DPMANT(x) & DP_MBIT(DP_MBITS-1)) == DP_MBIT(DP_MBITS-1));
+ if (ieee754_csr.nan2008)
+ return !(DPMANT(x) & DP_MBIT(DP_MBITS-1));
+ return (DPMANT(x) & DP_MBIT(DP_MBITS-1));
}
diff --git a/arch/mips/math-emu/ieee754dp.h b/arch/mips/math-emu/ieee754dp.h
index f139c72..3042e9b 100644
--- a/arch/mips/math-emu/ieee754dp.h
+++ b/arch/mips/math-emu/ieee754dp.h
@@ -79,4 +79,13 @@
return V; \
}
+#define DPNORMRET3(s, e, m, name, a0, a1, a2) \
+{ \
+ ieee754dp V = ieee754dp_format(s, e, m); \
+ if(TSTX()) \
+ return ieee754dp_xcpt(V, name, a0, a1, a2); \
+ else \
+ return V; \
+}
+
#define DPNORMRET1(s, e, m, name, a0) DPNORMRET2(s, e, m, name, a0, a0)
diff --git a/arch/mips/math-emu/ieee754int.h b/arch/mips/math-emu/ieee754int.h
index 4b6c6fb3..1abe83a 100644
--- a/arch/mips/math-emu/ieee754int.h
+++ b/arch/mips/math-emu/ieee754int.h
@@ -83,10 +83,17 @@
if(ve == SP_EMAX+1+SP_EBIAS){\
if(vm == 0)\
vc = IEEE754_CLASS_INF;\
- else if(vm & SP_MBIT(SP_MBITS-1)) \
- vc = IEEE754_CLASS_SNAN;\
- else \
- vc = IEEE754_CLASS_QNAN;\
+ else if (ieee754_csr.nan2008) { \
+ if(vm & SP_MBIT(SP_MBITS-1)) \
+ vc = IEEE754_CLASS_QNAN;\
+ else \
+ vc = IEEE754_CLASS_SNAN;\
+ } else { \
+ if(vm & SP_MBIT(SP_MBITS-1)) \
+ vc = IEEE754_CLASS_SNAN;\
+ else \
+ vc = IEEE754_CLASS_QNAN;\
+ } \
} else if(ve == SP_EMIN-1+SP_EBIAS) {\
if(vm) {\
ve = SP_EMIN;\
@@ -117,10 +124,17 @@
if(ve == DP_EMAX+1+DP_EBIAS){\
if(vm == 0)\
vc = IEEE754_CLASS_INF;\
- else if(vm & DP_MBIT(DP_MBITS-1)) \
- vc = IEEE754_CLASS_SNAN;\
- else \
- vc = IEEE754_CLASS_QNAN;\
+ else if (ieee754_csr.nan2008) { \
+ if(vm & DP_MBIT(DP_MBITS-1)) \
+ vc = IEEE754_CLASS_QNAN;\
+ else \
+ vc = IEEE754_CLASS_SNAN;\
+ } else { \
+ if(vm & DP_MBIT(DP_MBITS-1)) \
+ vc = IEEE754_CLASS_SNAN;\
+ else \
+ vc = IEEE754_CLASS_QNAN;\
+ } \
} else if(ve == DP_EMIN-1+DP_EBIAS) {\
if(vm) {\
ve = DP_EMIN;\
diff --git a/arch/mips/math-emu/ieee754sp.c b/arch/mips/math-emu/ieee754sp.c
index 15d1e36..18ba835 100644
--- a/arch/mips/math-emu/ieee754sp.c
+++ b/arch/mips/math-emu/ieee754sp.c
@@ -41,6 +41,8 @@
int ieee754sp_issnan(ieee754sp x)
{
assert(ieee754sp_isnan(x));
+ if (ieee754_csr.nan2008)
+ return !(SPMANT(x) & SP_MBIT(SP_MBITS-1));
return (SPMANT(x) & SP_MBIT(SP_MBITS-1));
}
diff --git a/arch/mips/math-emu/ieee754sp.h b/arch/mips/math-emu/ieee754sp.h
index 754fd54..13c3fcb 100644
--- a/arch/mips/math-emu/ieee754sp.h
+++ b/arch/mips/math-emu/ieee754sp.h
@@ -30,6 +30,8 @@
#define assert(expr) ((void)0)
/* 3bit extended single precision sticky right shift */
+#define XSPSRS(v,rs) \
+ ((rs > (SP_MBITS+3))?1:((v) >> (rs)) | ((v) << (32-(rs)) != 0))
#define SPXSRSXn(rs) \
(xe += rs, \
xm = (rs > (SP_MBITS+3))?1:((xm) >> (rs)) | ((xm) << (32-(rs)) != 0))
@@ -41,6 +43,8 @@
(ye+=rs, \
ym = (rs > (SP_MBITS+3))?1:((ym) >> (rs)) | ((ym) << (32-(rs)) != 0))
+#define XSPSRS1(v) \
+ (((v) >> 1) | ((v) & 1))
#define SPXSRSY1() \
(ye++, (ym = (ym >> 1) | (ym & 1)))
@@ -85,4 +89,13 @@
return V; \
}
+#define SPNORMRET3(s, e, m, name, a0, a1, a2) \
+{ \
+ ieee754sp V = ieee754sp_format(s, e, m); \
+ if(TSTX()) \
+ return ieee754sp_xcpt(V, name, a0, a1, a2); \
+ else \
+ return V; \
+}
+
#define SPNORMRET1(s, e, m, name, a0) SPNORMRET2(s, e, m, name, a0, a0)
diff --git a/arch/mips/math-emu/kernel_linkage.c b/arch/mips/math-emu/kernel_linkage.c
index 1c58657..8738981 100644
--- a/arch/mips/math-emu/kernel_linkage.c
+++ b/arch/mips/math-emu/kernel_linkage.c
@@ -27,89 +27,52 @@
#include <asm/fpu.h>
#include <asm/fpu_emulator.h>
-#define SIGNALLING_NAN 0x7ff800007ff80000LL
+#define SIGNALLING_NAN 0x7ff800007ff80000LL
+#define SIGNALLING_NAN2008 0x7ff000007fa00000LL
-void fpu_emulator_init_fpu(void)
+extern unsigned int fpu_fcr31 __read_mostly;
+extern unsigned int system_has_fpu __read_mostly;
+static int nan2008 __read_mostly = -1;
+
+static int __init setup_nan2008(char *str)
+{
+ get_option (&str, &nan2008);
+
+ return 1;
+}
+
+__setup("nan2008=", setup_nan2008);
+
+void fpu_emulator_init_fpu(struct task_struct *target)
{
static int first = 1;
int i;
+ int j;
if (first) {
first = 0;
printk("Algorithmics/MIPS FPU Emulator v1.5\n");
}
- current->thread.fpu.fcr31 = 0;
- for (i = 0; i < 32; i++) {
- current->thread.fpu.fpr[i] = SIGNALLING_NAN;
+ if (system_has_fpu)
+ target->thread.fpu.fcr31 = fpu_fcr31;
+ else if (nan2008 < 0) {
+ if (test_thread_local_flags(LTIF_FPU_FR))
+ target->thread.fpu.fcr31 = FPU_CSR_DEFAULT|FPU_CSR_MAC2008|FPU_CSR_ABS2008|FPU_CSR_NAN2008;
+ else
+ target->thread.fpu.fcr31 = FPU_CSR_DEFAULT;
+ } else {
+ if (nan2008)
+ target->thread.fpu.fcr31 = FPU_CSR_DEFAULT|FPU_CSR_MAC2008|FPU_CSR_ABS2008|FPU_CSR_NAN2008;
+ else
+ target->thread.fpu.fcr31 = FPU_CSR_DEFAULT;
}
+
+ if (target->thread.fpu.fcr31 & FPU_CSR_NAN2008) {
+ for (j = 0; j < (FPU_REG_WIDTH/64); j++)
+ for (i = 0; i < NUM_FPU_REGS; i++)
+ set_fpr64(&target->thread.fpu.fpr[i], j, SIGNALLING_NAN2008);
+ } else
+ for (i = 0; i < NUM_FPU_REGS; i++)
+ set_fpr64(&target->thread.fpu.fpr[i], 0, SIGNALLING_NAN);
}
-
-
-/*
- * Emulator context save/restore to/from a signal context
- * presumed to be on the user stack, and therefore accessed
- * with appropriate macros from uaccess.h
- */
-
-int fpu_emulator_save_context(struct sigcontext __user *sc)
-{
- int i;
- int err = 0;
-
- for (i = 0; i < 32; i++) {
- err |=
- __put_user(current->thread.fpu.fpr[i], &sc->sc_fpregs[i]);
- }
- err |= __put_user(current->thread.fpu.fcr31, &sc->sc_fpc_csr);
-
- return err;
-}
-
-int fpu_emulator_restore_context(struct sigcontext __user *sc)
-{
- int i;
- int err = 0;
-
- for (i = 0; i < 32; i++) {
- err |=
- __get_user(current->thread.fpu.fpr[i], &sc->sc_fpregs[i]);
- }
- err |= __get_user(current->thread.fpu.fcr31, &sc->sc_fpc_csr);
-
- return err;
-}
-
-#ifdef CONFIG_64BIT
-/*
- * This is the o32 version
- */
-
-int fpu_emulator_save_context32(struct sigcontext32 __user *sc)
-{
- int i;
- int err = 0;
-
- for (i = 0; i < 32; i+=2) {
- err |=
- __put_user(current->thread.fpu.fpr[i], &sc->sc_fpregs[i]);
- }
- err |= __put_user(current->thread.fpu.fcr31, &sc->sc_fpc_csr);
-
- return err;
-}
-
-int fpu_emulator_restore_context32(struct sigcontext32 __user *sc)
-{
- int i;
- int err = 0;
-
- for (i = 0; i < 32; i+=2) {
- err |=
- __get_user(current->thread.fpu.fpr[i], &sc->sc_fpregs[i]);
- }
- err |= __get_user(current->thread.fpu.fcr31, &sc->sc_fpc_csr);
-
- return err;
-}
-#endif
diff --git a/arch/mips/math-emu/sp_class.c b/arch/mips/math-emu/sp_class.c
new file mode 100644
index 0000000..e1cbb07
--- /dev/null
+++ b/arch/mips/math-emu/sp_class.c
@@ -0,0 +1,78 @@
+/* IEEE754-2008 floating point arithmetic
+ * single precision: CLASS
+ */
+/*
+ * MIPS floating point support
+ * Copyright (C) 2015, Imagination Technologies, LTD.
+ * Author: Leonid.Yegoshin@imgtec.com
+ *
+ * ########################################################################
+ *
+ * This program is free software; you can distribute it and/or modify it
+ * under the terms of the GNU General Public License (Version 2) as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * ########################################################################
+ */
+
+
+#include "ieee754sp.h"
+
+/*
+ * CLASS.fmt returns:
+ *
+ * bit 0: SNAN
+ * 1: QNAN
+ * 2: INF if value < 0
+ * 3: xNORM if value < 0
+
+ * 4: subNORM if value < 0
+ * 5: ZERO (-0)
+ * 6: INF if value > 0
+ * 7: xNORM if value > 0
+
+ * 8: subNORM if value > 0
+ * 9: ZERO (+0)
+ */
+
+int ieee754_2008sp_class(ieee754sp x)
+{
+ COMPXSP;
+
+ EXPLODEXSP;
+
+ /* convert IEEE754 to 754_2008 */
+ switch (xc) {
+ case IEEE754_CLASS_QNAN:
+ return 1;
+ case IEEE754_CLASS_SNAN:
+ return 0;
+ case IEEE754_CLASS_ZERO:
+ if (xs)
+ return 0x20;
+ return 0x200;
+ case IEEE754_CLASS_INF:
+ if (xs)
+ return 0x4;
+ return 0x40;
+ case IEEE754_CLASS_DNORM:
+ if (xs)
+ return 0x10;
+ return 0x100;
+ case IEEE754_CLASS_NORM:
+ if (xs)
+ return 0x8;
+ return 0x80;
+ default:
+ return 0x0;
+ }
+}
diff --git a/arch/mips/math-emu/sp_maddf.c b/arch/mips/math-emu/sp_maddf.c
new file mode 100644
index 0000000..416aac3
--- /dev/null
+++ b/arch/mips/math-emu/sp_maddf.c
@@ -0,0 +1,265 @@
+/* IEEE754 floating point arithmetic
+ * single precision: MADDF.f
+ */
+/*
+ * MIPS floating point support
+ * Copyright (C) 2015 Imagination Technologies, Ltd.
+ *
+ * ########################################################################
+ *
+ * This program is free software; you can distribute it and/or modify it
+ * under the terms of the GNU General Public License (Version 2) as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * ########################################################################
+ */
+
+
+#include "ieee754sp.h"
+
+ieee754sp ieee754sp_maddf(ieee754sp z, ieee754sp x, ieee754sp y)
+{
+ int re;
+ int rs;
+ u32 rm;
+ COMPXSP;
+ COMPYSP;
+ u32 zm; int ze; int zs __maybe_unused; int zc;
+
+ EXPLODEXSP;
+ EXPLODEYSP;
+ EXPLODESP(z, zc, zs, ze, zm)
+
+ CLEARCX;
+
+ FLUSHXSP;
+ FLUSHYSP;
+ FLUSHSP(z, zc, zs, ze, zm);
+
+ switch (zc) {
+ case IEEE754_CLASS_SNAN:
+ SETCX(IEEE754_INVALID_OPERATION);
+ return ieee754sp_nanxcpt(ieee754sp_indef(), "maddf", z, x, y);
+ case IEEE754_CLASS_DNORM:
+ SPDNORMx(zm, ze);
+ break;
+ }
+
+ /* ==== mult ==== */
+ switch (CLPAIR(xc, yc)) {
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
+ SETCX(IEEE754_INVALID_OPERATION);
+ return ieee754sp_nanxcpt(ieee754sp_indef(), "maddf", x, y);
+
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
+ return y;
+
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_INF):
+ return x;
+
+
+ /* Infinity handling */
+
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
+ if (zc == IEEE754_CLASS_QNAN)
+ return z;
+ SETCX(IEEE754_INVALID_OPERATION);
+ return ieee754sp_xcpt(ieee754sp_indef(), "mul", x, y);
+
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
+ if (zc == IEEE754_CLASS_QNAN)
+ return z;
+ return ieee754sp_inf(xs ^ ys);
+
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM):
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
+ if (zc == IEEE754_CLASS_QNAN)
+ return z;
+ if (zc == IEEE754_CLASS_INF)
+ return ieee754sp_inf(zs);
+ return z;
+
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
+ SPDNORMX;
+
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_DNORM):
+ if (zc == IEEE754_CLASS_QNAN)
+ return z;
+ if (zc == IEEE754_CLASS_INF)
+ return ieee754sp_inf(zs);
+ SPDNORMY;
+ break;
+
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_NORM):
+ if (zc == IEEE754_CLASS_QNAN)
+ return z;
+ if (zc == IEEE754_CLASS_INF)
+ return ieee754sp_inf(zs);
+ SPDNORMX;
+ break;
+
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_NORM):
+ if (zc == IEEE754_CLASS_QNAN)
+ return z;
+ if (zc == IEEE754_CLASS_INF)
+ return ieee754sp_inf(zs);
+ break;
+ }
+ /* rm = xm * ym, re = xe+ye basically */
+ assert(xm & SP_HIDDEN_BIT);
+ assert(ym & SP_HIDDEN_BIT);
+
+ re = xe + ye;
+ rs = xs ^ ys;
+
+ /* shunt to top of word */
+ xm <<= 32 - (SP_MBITS + 1);
+ ym <<= 32 - (SP_MBITS + 1);
+
+ /* multiply 32bits xm,ym to give high 32bits rm with stickness
+ */
+
+ {
+ unsigned short lxm = xm & 0xffff;
+ unsigned short hxm = xm >> 16;
+ unsigned short lym = ym & 0xffff;
+ unsigned short hym = ym >> 16;
+ unsigned lrm;
+ unsigned hrm;
+
+ lrm = lxm * lym; /* 16bit * 16bit => 32bit */
+ hrm = hxm * hym; /* 16bit * 16bit => 32bit */
+
+ {
+ unsigned t = lxm * hym; /* 16 * 16 => 32 */
+ {
+ unsigned at = lrm + (t << 16);
+
+ hrm += at < lrm;
+ lrm = at;
+ }
+ hrm = hrm + (t >> 16);
+ }
+
+ {
+ unsigned t = hxm * lym; /* 16 * 16 => 32 */
+ {
+ unsigned at = lrm + (t << 16);
+
+ hrm += at < lrm;
+ lrm = at;
+ }
+ hrm = hrm + (t >> 16);
+ }
+ rm = hrm | (lrm != 0);
+ }
+
+ /*
+ * sticky shift down to normal rounding precision
+ */
+ if ((int) rm < 0) {
+ rm =
+ (rm >> (32 - (SP_MBITS + 1 + 3))) |
+ ((rm << (SP_MBITS + 1 + 3)) != 0);
+ re++;
+ } else {
+ rm =
+ (rm >> (32 - (SP_MBITS + 1 + 3 + 1))) |
+ ((rm << (SP_MBITS + 1 + 3 + 1)) != 0);
+ }
+
+ assert(rm & (SP_HIDDEN_BIT << 3));
+
+ /* ==== add ==== */
+
+ assert(zm & SP_HIDDEN_BIT);
+
+ /* provide guard,round and stick bit space */
+ zm <<= 3;
+
+ if (ze > re) {
+ /* have to shift y fraction right to align
+ */
+ int s = ze - re;
+ rm = XSPSRS(rm, s);
+ re += s;
+ } else if (re > ze) {
+ /* have to shift x fraction right to align
+ */
+ int s = re - ze;
+ zm = XSPSRS(zm, s);
+ ze += s;
+ }
+ assert(ze == re);
+ assert(ze <= SP_EMAX);
+
+ if (zs == rs) {
+ /* generate 28 bit result of adding two 27 bit numbers
+ * leaving result in zm,zs,ze
+ */
+ zm = zm + rm;
+ ze = ze;
+ zs = zs;
+
+ if (zm >> (SP_MBITS + 1 + 3)) { /* carry out */
+ zm = XSPSRS1(zm); /* shift preserving sticky */
+ ze++;
+ }
+ } else {
+ if (zm >= rm) {
+ zm = zm - rm;
+ ze = ze;
+ zs = zs;
+ } else {
+ zm = rm - zm;
+ ze = ze;
+ zs = rs;
+ }
+ if (zm == 0)
+ return ieee754sp_zero(ieee754_csr.rm ==
+ IEEE754_RD);
+
+ /* normalize to rounding precision */
+ while ((zm >> (SP_MBITS + 3)) == 0) {
+ zm <<= 1;
+ ze--;
+ }
+
+ }
+ SPNORMRET3(zs, ze, zm, "maddf", z, x, y);
+
+}
diff --git a/arch/mips/math-emu/sp_minmax.c b/arch/mips/math-emu/sp_minmax.c
new file mode 100644
index 0000000..7d39f1e
--- /dev/null
+++ b/arch/mips/math-emu/sp_minmax.c
@@ -0,0 +1,432 @@
+/* IEEE754 floating point arithmetic
+ * single precision: MIN/MAX/MINA/MAXA
+ */
+/*
+ * MIPS floating point support
+ * Copyright (C) 2015 Imagination technologies, Ltd.
+ *
+ * ########################################################################
+ *
+ * This program is free software; you can distribute it and/or modify it
+ * under the terms of the GNU General Public License (Version 2) as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * ########################################################################
+ *
+ */
+
+
+#include "ieee754sp.h"
+
+ieee754sp ieee754sp_fmin(ieee754sp x, ieee754sp y)
+{
+ COMPXSP;
+ COMPYSP;
+
+ EXPLODEXSP;
+ EXPLODEYSP;
+
+ CLEARCX;
+
+ FLUSHXSP;
+ FLUSHYSP;
+
+ switch (CLPAIR(xc, yc)) {
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
+ SETCX(IEEE754_INVALID_OPERATION);
+ return ieee754sp_nanxcpt(ieee754sp_indef(), "min", x, y);
+
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
+ return x;
+
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_INF):
+ return y;
+
+ /* Infinity and zero handling
+ */
+
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
+ if (xs)
+ return x;
+ return y;
+
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM):
+ if (ys)
+ return y;
+ return x;
+
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
+ if (xs == ys)
+ return x;
+ return ieee754sp_zero(1);
+
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
+ SPDNORMX;
+
+ /* FALL THROUGH */
+
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_DNORM):
+ SPDNORMY;
+ break;
+
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_NORM):
+ SPDNORMX;
+ break;
+
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_NORM):
+ break;
+ }
+ assert(xm & SP_HIDDEN_BIT);
+ assert(ym & SP_HIDDEN_BIT);
+
+ if (xs ^ ys) {
+ if (xs)
+ return x;
+ return y;
+ }
+
+ if (xe > ye)
+ return y;
+ if (ye > xe)
+ return x;
+
+ assert(xe == ye);
+ assert(xe <= SP_EMAX);
+
+ if (xm <= ym)
+ return x;
+ return y;
+}
+
+ieee754sp ieee754sp_fmax(ieee754sp x, ieee754sp y)
+{
+ COMPXSP;
+ COMPYSP;
+
+ EXPLODEXSP;
+ EXPLODEYSP;
+
+ CLEARCX;
+
+ FLUSHXSP;
+ FLUSHYSP;
+
+ switch (CLPAIR(xc, yc)) {
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
+ SETCX(IEEE754_INVALID_OPERATION);
+ return ieee754sp_nanxcpt(ieee754sp_indef(), "max", x, y);
+
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
+ return x;
+
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_INF):
+ return y;
+
+ /* Infinity and zero handling
+ */
+
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
+ if (!xs)
+ return x;
+ return y;
+
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM):
+ if (!ys)
+ return y;
+ return x;
+
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
+ if (xs == ys)
+ return x;
+ return ieee754sp_zero(0);
+
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
+ SPDNORMX;
+
+ /* FALL THROUGH */
+
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_DNORM):
+ SPDNORMY;
+ break;
+
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_NORM):
+ SPDNORMX;
+ break;
+
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_NORM):
+ break;
+ }
+ assert(xm & SP_HIDDEN_BIT);
+ assert(ym & SP_HIDDEN_BIT);
+
+ if (xs ^ ys) {
+ if (!xs)
+ return x;
+ return y;
+ }
+
+ if (xe > ye)
+ return x;
+ if (ye > xe)
+ return y;
+
+ assert(xe == ye);
+ assert(xe <= SP_EMAX);
+
+ if (xm < ym)
+ return y;
+ return x;
+}
+
+ieee754sp ieee754sp_fmina(ieee754sp x, ieee754sp y)
+{
+ COMPXSP;
+ COMPYSP;
+
+ EXPLODEXSP;
+ EXPLODEYSP;
+
+ CLEARCX;
+
+ FLUSHXSP;
+ FLUSHYSP;
+
+ switch (CLPAIR(xc, yc)) {
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
+ SETCX(IEEE754_INVALID_OPERATION);
+ return ieee754sp_nanxcpt(ieee754sp_indef(), "mina", x, y);
+
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
+ return x;
+
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_INF):
+ return y;
+
+ /* Infinity and zero handling
+ */
+
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
+ return y;
+
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM):
+ return x;
+
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
+ if (xs == ys)
+ return x;
+ return ieee754sp_zero(1);
+
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
+ SPDNORMX;
+
+ /* FALL THROUGH */
+
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_DNORM):
+ SPDNORMY;
+ break;
+
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_NORM):
+ SPDNORMX;
+ break;
+
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_NORM):
+ break;
+ }
+ assert(xm & SP_HIDDEN_BIT);
+ assert(ym & SP_HIDDEN_BIT);
+
+ if (xe > ye)
+ return y;
+ if (ye > xe)
+ return x;
+
+ assert(xe == ye);
+ assert(xe <= SP_EMAX);
+
+ if (xm <= ym)
+ return x;
+ return y;
+}
+
+ieee754sp ieee754sp_fmaxa(ieee754sp x, ieee754sp y)
+{
+ COMPXSP;
+ COMPYSP;
+
+ EXPLODEXSP;
+ EXPLODEYSP;
+
+ CLEARCX;
+
+ FLUSHXSP;
+ FLUSHYSP;
+
+ switch (CLPAIR(xc, yc)) {
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
+ SETCX(IEEE754_INVALID_OPERATION);
+ return ieee754sp_nanxcpt(ieee754sp_indef(), "maxa", x, y);
+
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
+ return x;
+
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_INF):
+ return y;
+
+ /* Infinity and zero handling
+ */
+
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
+ return x;
+
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM):
+ return y;
+
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
+ if (xs == ys)
+ return x;
+ return ieee754sp_zero(0);
+
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
+ SPDNORMX;
+
+ /* FALL THROUGH */
+
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_DNORM):
+ SPDNORMY;
+ break;
+
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_NORM):
+ SPDNORMX;
+ break;
+
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_NORM):
+ break;
+ }
+ assert(xm & SP_HIDDEN_BIT);
+ assert(ym & SP_HIDDEN_BIT);
+
+ if (xe > ye)
+ return x;
+ if (ye > xe)
+ return y;
+
+ assert(xe == ye);
+ assert(xe <= SP_EMAX);
+
+ if (xm < ym)
+ return y;
+ return x;
+}
diff --git a/arch/mips/math-emu/sp_msubf.c b/arch/mips/math-emu/sp_msubf.c
new file mode 100644
index 0000000..106591c
--- /dev/null
+++ b/arch/mips/math-emu/sp_msubf.c
@@ -0,0 +1,268 @@
+/* IEEE754 floating point arithmetic
+ * single precision: MSUBF.f
+ */
+/*
+ * MIPS floating point support
+ * Copyright (C) 2015 Imagination Technologies, Ltd.
+ *
+ * ########################################################################
+ *
+ * This program is free software; you can distribute it and/or modify it
+ * under the terms of the GNU General Public License (Version 2) as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ *
+ * ########################################################################
+ */
+
+
+#include "ieee754sp.h"
+
+ieee754sp ieee754sp_msubf(ieee754sp z, ieee754sp x, ieee754sp y)
+{
+ int re;
+ int rs;
+ u32 rm;
+ COMPXSP;
+ COMPYSP;
+ u32 zm; int ze; int zs __maybe_unused; int zc;
+
+ EXPLODEXSP;
+ EXPLODEYSP;
+ EXPLODESP(z, zc, zs, ze, zm)
+
+ CLEARCX;
+
+ FLUSHXSP;
+ FLUSHYSP;
+ FLUSHSP(z, zc, zs, ze, zm);
+
+ switch (zc) {
+ case IEEE754_CLASS_SNAN:
+ SETCX(IEEE754_INVALID_OPERATION);
+ return ieee754sp_nanxcpt(ieee754sp_indef(), "maddf", z, x, y);
+ case IEEE754_CLASS_DNORM:
+ SPDNORMx(zm, ze);
+ break;
+ }
+
+ /* ==== mult ==== */
+ switch (CLPAIR(xc, yc)) {
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_SNAN):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM):
+ case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
+ SETCX(IEEE754_INVALID_OPERATION);
+ return ieee754sp_nanxcpt(ieee754sp_indef(), "maddf", x, y);
+
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
+ return y;
+
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
+ case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_INF):
+ return x;
+
+
+ /* Infinity handling */
+
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
+ if (zc == IEEE754_CLASS_QNAN)
+ return z;
+ SETCX(IEEE754_INVALID_OPERATION);
+ return ieee754sp_xcpt(ieee754sp_indef(), "mul", x, y);
+
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
+ case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
+ if (zc == IEEE754_CLASS_QNAN)
+ return z;
+ return ieee754sp_inf(xs ^ ys);
+
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM):
+ case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM):
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO):
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
+ if (zc == IEEE754_CLASS_QNAN)
+ return z;
+ if (zc == IEEE754_CLASS_INF)
+ return ieee754sp_inf(zs);
+ return z;
+
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
+ SPDNORMX;
+
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_DNORM):
+ if (zc == IEEE754_CLASS_QNAN)
+ return z;
+ if (zc == IEEE754_CLASS_INF)
+ return ieee754sp_inf(zs);
+ SPDNORMY;
+ break;
+
+ case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_NORM):
+ if (zc == IEEE754_CLASS_QNAN)
+ return z;
+ if (zc == IEEE754_CLASS_INF)
+ return ieee754sp_inf(zs);
+ SPDNORMX;
+ break;
+
+ case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_NORM):
+ if (zc == IEEE754_CLASS_QNAN)
+ return z;
+ if (zc == IEEE754_CLASS_INF)
+ return ieee754sp_inf(zs);
+ break;
+ }
+ /* rm = xm * ym, re = xe+ye basically */
+ assert(xm & SP_HIDDEN_BIT);
+ assert(ym & SP_HIDDEN_BIT);
+
+ re = xe + ye;
+ rs = xs ^ ys;
+
+ /* shunt to top of word */
+ xm <<= 32 - (SP_MBITS + 1);
+ ym <<= 32 - (SP_MBITS + 1);
+
+ /* multiply 32bits xm,ym to give high 32bits rm with stickness
+ */
+
+ {
+ unsigned short lxm = xm & 0xffff;
+ unsigned short hxm = xm >> 16;
+ unsigned short lym = ym & 0xffff;
+ unsigned short hym = ym >> 16;
+ unsigned lrm;
+ unsigned hrm;
+
+ lrm = lxm * lym; /* 16bit * 16bit => 32bit */
+ hrm = hxm * hym; /* 16bit * 16bit => 32bit */
+
+ {
+ unsigned t = lxm * hym; /* 16 * 16 => 32 */
+ {
+ unsigned at = lrm + (t << 16);
+
+ hrm += at < lrm;
+ lrm = at;
+ }
+ hrm = hrm + (t >> 16);
+ }
+
+ {
+ unsigned t = hxm * lym; /* 16 * 16 => 32 */
+ {
+ unsigned at = lrm + (t << 16);
+
+ hrm += at < lrm;
+ lrm = at;
+ }
+ hrm = hrm + (t >> 16);
+ }
+ rm = hrm | (lrm != 0);
+ }
+
+ /*
+ * sticky shift down to normal rounding precision
+ */
+ if ((int) rm < 0) {
+ rm =
+ (rm >> (32 - (SP_MBITS + 1 + 3))) |
+ ((rm << (SP_MBITS + 1 + 3)) != 0);
+ re++;
+ } else {
+ rm =
+ (rm >> (32 - (SP_MBITS + 1 + 3 + 1))) |
+ ((rm << (SP_MBITS + 1 + 3 + 1)) != 0);
+ }
+
+ assert(rm & (SP_HIDDEN_BIT << 3));
+
+ /* ==== add ==== */
+
+ /* flip sign of r and handle as add */
+ rs ^= 1;
+
+ assert(zm & SP_HIDDEN_BIT);
+
+ /* provide guard,round and stick bit space */
+ zm <<= 3;
+
+ if (ze > re) {
+ /* have to shift y fraction right to align
+ */
+ int s = ze - re;
+ rm = XSPSRS(rm, s);
+ re += s;
+ } else if (re > ze) {
+ /* have to shift x fraction right to align
+ */
+ int s = re - ze;
+ zm = XSPSRS(zm, s);
+ ze += s;
+ }
+ assert(ze == re);
+ assert(ze <= SP_EMAX);
+
+ if (zs == rs) {
+ /* generate 28 bit result of adding two 27 bit numbers
+ * leaving result in zm,zs,ze
+ */
+ zm = zm + rm;
+ ze = ze;
+ zs = zs;
+
+ if (zm >> (SP_MBITS + 1 + 3)) { /* carry out */
+ zm = XSPSRS1(zm); /* shift preserving sticky */
+ ze++;
+ }
+ } else {
+ if (zm >= rm) {
+ zm = zm - rm;
+ ze = ze;
+ zs = zs;
+ } else {
+ zm = rm - zm;
+ ze = ze;
+ zs = rs;
+ }
+ if (zm == 0)
+ return ieee754sp_zero(ieee754_csr.rm ==
+ IEEE754_RD);
+
+ /* normalize to rounding precision */
+ while ((zm >> (SP_MBITS + 3)) == 0) {
+ zm <<= 1;
+ ze--;
+ }
+
+ }
+ SPNORMRET3(zs, ze, zm, "maddf", z, x, y);
+
+}
diff --git a/arch/mips/mm/Makefile b/arch/mips/mm/Makefile
index e87aae1..a93f7e7 100644
--- a/arch/mips/mm/Makefile
+++ b/arch/mips/mm/Makefile
@@ -21,6 +21,6 @@
obj-$(CONFIG_IP22_CPU_SCACHE) += sc-ip22.o
obj-$(CONFIG_R5000_CPU_SCACHE) += sc-r5k.o
obj-$(CONFIG_RM7000_CPU_SCACHE) += sc-rm7k.o
-obj-$(CONFIG_MIPS_CPU_SCACHE) += sc-mips.o
+obj-$(CONFIG_MIPS_CPU_SCACHE) += sc-mips.o cm3-l2-init.o
obj-$(CONFIG_SYS_SUPPORTS_MICROMIPS) += uasm-micromips.o
diff --git a/arch/mips/mm/c-octeon.c b/arch/mips/mm/c-octeon.c
index 8557fb5..4e082e1 100644
--- a/arch/mips/mm/c-octeon.c
+++ b/arch/mips/mm/c-octeon.c
@@ -176,6 +176,14 @@
BUG();
}
+static void octeon_flush_data_cache_range(struct vm_area_struct *vma,
+ unsigned long addr, struct page *page, unsigned long addr,
+ unsigned long size)
+{
+ octeon_flush_cache_page(vma, addr, page_to_pfn(page));
+}
+
+
/**
* Probe Octeon's caches
*
@@ -275,6 +283,7 @@
flush_cache_sigtramp = octeon_flush_cache_sigtramp;
flush_icache_all = octeon_flush_icache_all;
flush_data_cache_page = octeon_flush_data_cache_page;
+ mips_flush_data_cache_range = octeon_flush_data_cache_range;
flush_icache_range = octeon_flush_icache_range;
local_flush_icache_range = local_octeon_flush_icache_range;
diff --git a/arch/mips/mm/c-r3k.c b/arch/mips/mm/c-r3k.c
index 704dc73..cad7d0f 100644
--- a/arch/mips/mm/c-r3k.c
+++ b/arch/mips/mm/c-r3k.c
@@ -274,6 +274,13 @@
{
}
+static void r3k_mips_flush_data_cache_range(struct vm_area_struct *vma,
+ unsigned long vaddr, struct page *page, unsigned long addr,
+ unsigned long size)
+{
+ r3k_flush_cache_page(vma, addr, page_to_pfn(page));
+}
+
static void r3k_flush_cache_sigtramp(unsigned long addr)
{
unsigned long flags;
@@ -322,7 +329,7 @@
flush_cache_all = r3k_flush_cache_all;
__flush_cache_all = r3k___flush_cache_all;
flush_cache_mm = r3k_flush_cache_mm;
- flush_cache_range = r3k_flush_cache_range;
+ mips_flush_data_cache_range = r3k_mips_flush_data_cache_range;
flush_cache_page = r3k_flush_cache_page;
flush_icache_range = r3k_flush_icache_range;
local_flush_icache_range = r3k_flush_icache_range;
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index 21813be..60f509e 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -6,12 +6,14 @@
* Copyright (C) 1996 David S. Miller (davem@davemloft.net)
* Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002 Ralf Baechle (ralf@gnu.org)
* Copyright (C) 1999, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2012, MIPS Technology, Leonid Yegoshin (yegoshin@mips.com)
*/
#include <linux/hardirq.h>
#include <linux/init.h>
#include <linux/highmem.h>
#include <linux/kernel.h>
#include <linux/linkage.h>
+#include <linux/preempt.h>
#include <linux/sched.h>
#include <linux/smp.h>
#include <linux/mm.h>
@@ -34,6 +36,7 @@
#include <asm/cacheflush.h> /* for run_uncached() */
#include <asm/traps.h>
#include <asm/dma-coherence.h>
+#include <asm/gcmpregs.h>
/*
* Special Variant of smp_call_function for use by cache functions:
@@ -43,6 +46,13 @@
* o collapses to normal function call on systems with a single shared
* primary cache.
* o doesn't disable interrupts on the local CPU
+ *
+ * Note: this function is used now for address cacheops only
+ *
+ * Note2: It is unsafe to use address cacheops via SMP call, other CPU may not
+ * have this process address map (ASID) loaded into EntryHI and
+ * it usualy requires some tricks, which are absent from this file.
+ * Cross-CPU address cacheops are much easy and safely.
*/
static inline void r4k_on_each_cpu(void (*func) (void *info), void *info)
{
@@ -55,13 +65,67 @@
preempt_enable();
}
-#if defined(CONFIG_MIPS_CMP)
+#if defined(CONFIG_MIPS_CMP) && defined(CONFIG_SMP)
#define cpu_has_safe_index_cacheops 0
#else
#define cpu_has_safe_index_cacheops 1
#endif
/*
+ * This variant of smp_call_function is used for index cacheops only.
+ */
+static inline void r4k_indexop_on_each_cpu(void (*func) (void *info), void *info)
+{
+ preempt_disable();
+
+#ifdef CONFIG_SMP
+ if (!cpu_has_safe_index_cacheops) {
+
+ if (smp_num_siblings > 1) {
+ cpumask_t tmp_mask = INIT_CPUMASK;
+ int cpu, this_cpu, n = 0;
+
+ /* If processor hasn't safe index cachops (likely)
+ then run cache flush on other CPUs.
+ But I assume that siblings have common L1 cache, so -
+ - run cache flush only once per sibling group. LY22 */
+
+ this_cpu = smp_processor_id();
+ for_each_online_cpu(cpu) {
+
+ if (cpumask_test_cpu(cpu, (&per_cpu(cpu_sibling_map, this_cpu))))
+ continue;
+
+ if (cpumask_intersects(&tmp_mask, (&per_cpu(cpu_sibling_map, cpu))))
+ continue;
+ cpu_set(cpu, tmp_mask);
+ n++;
+ }
+ if (n)
+ smp_call_function_many(&tmp_mask, func, info, 1);
+ } else
+ smp_call_function(func, info, 1);
+ }
+#endif
+ func(info);
+ preempt_enable();
+}
+
+/* Define a rough size where address cacheops are still more optimal than
+ * index cacheops on whole cache (in D/I-cache size terms).
+ * Value "2" reflects an expense of smp_call_function() on top of
+ * whole cache flush via index cacheops.
+ */
+#ifndef CACHE_CPU_LATENCY
+#ifdef CONFIG_SMP
+#define CACHE_CPU_LATENCY (2)
+#else
+#define CACHE_CPU_LATENCY (1)
+#endif
+#endif
+
+
+/*
* Must die.
*/
static unsigned long icache_size __read_mostly;
@@ -121,6 +185,28 @@
r4k_blast_dcache_page = r4k_blast_dcache_page_dc64;
}
+#ifndef CONFIG_EVA
+#define r4k_blast_dcache_user_page r4k_blast_dcache_page
+#else
+
+static void (*r4k_blast_dcache_user_page)(unsigned long addr);
+
+static void __cpuinit r4k_blast_dcache_user_page_setup(void)
+{
+ unsigned long dc_lsize = cpu_dcache_line_size();
+
+ if (dc_lsize == 0)
+ r4k_blast_dcache_user_page = (void *)cache_noop;
+ else if (dc_lsize == 16)
+ r4k_blast_dcache_user_page = blast_dcache16_user_page;
+ else if (dc_lsize == 32)
+ r4k_blast_dcache_user_page = blast_dcache32_user_page;
+ else if (dc_lsize == 64)
+ r4k_blast_dcache_user_page = blast_dcache64_user_page;
+}
+
+#endif
+
static void (* r4k_blast_dcache_page_indexed)(unsigned long addr);
static void __cpuinit r4k_blast_dcache_page_indexed_setup(void)
@@ -241,6 +327,27 @@
r4k_blast_icache_page = blast_icache64_page;
}
+#ifndef CONFIG_EVA
+#define r4k_blast_icache_user_page r4k_blast_icache_page
+#else
+
+static void (* r4k_blast_icache_user_page)(unsigned long addr);
+
+static void __cpuinit r4k_blast_icache_user_page_setup(void)
+{
+ unsigned long ic_lsize = cpu_icache_line_size();
+
+ if (ic_lsize == 0)
+ r4k_blast_icache_user_page = (void *)cache_noop;
+ else if (ic_lsize == 16)
+ r4k_blast_icache_user_page = blast_icache16_user_page;
+ else if (ic_lsize == 32)
+ r4k_blast_icache_user_page = blast_icache32_user_page;
+ else if (ic_lsize == 64)
+ r4k_blast_icache_user_page = blast_icache64_user_page;
+}
+
+#endif
static void (* r4k_blast_icache_page_indexed)(unsigned long addr);
@@ -349,6 +456,8 @@
return;
#endif
r4k_blast_dcache();
+ if (!cpu_has_ic_fills_f_dc)
+ mb();
r4k_blast_icache();
switch (current_cpu_type()) {
@@ -365,7 +474,7 @@
static void r4k___flush_cache_all(void)
{
- r4k_on_each_cpu(local_r4k___flush_cache_all, NULL);
+ r4k_indexop_on_each_cpu(local_r4k___flush_cache_all, NULL);
}
static inline int has_valid_asid(const struct mm_struct *mm)
@@ -383,16 +492,79 @@
#endif
}
-static void r4k__flush_cache_vmap(void)
+
+static inline void local_r4__flush_dcache(void *args)
{
r4k_blast_dcache();
}
-static void r4k__flush_cache_vunmap(void)
+struct vmap_args {
+ unsigned long start;
+ unsigned long end;
+};
+
+static inline void local_r4__flush_cache_vmap(void *args)
{
- r4k_blast_dcache();
+ blast_dcache_range(((struct vmap_args *)args)->start,((struct vmap_args *)args)->end);
}
+static void r4k__flush_cache_vmap(unsigned long start, unsigned long end)
+{
+ unsigned long size = end - start;
+
+ if (cpu_has_cm3_inclusive_pcaches)
+ return;
+
+ if (cpu_has_safe_index_cacheops && size >= dcache_size) {
+ r4k_blast_dcache();
+ } else {
+/* Commented out until bug in free_unmap_vmap_area() is fixed - it calls
+ with unmapped page and address cache op does TLB refill exception
+ if (size >= (dcache_size * CACHE_CPU_LATENCY))
+ */
+ r4k_indexop_on_each_cpu(local_r4__flush_dcache, NULL);
+/* Commented out until bug in free_unmap_vmap_area() is fixed - it calls
+ with unmapped page and address cache op does TLB refill exception
+ else {
+ struct vmap_args args;
+
+ args.start = start;
+ args.end = end;
+ r4k_on_each_cpu(local_r4__flush_cache_vmap, (void *)&args);
+ }
+ */
+ }
+}
+
+static void r4k__flush_cache_vunmap(unsigned long start, unsigned long end)
+{
+ unsigned long size = end - start;
+
+ if (cpu_has_cm3_inclusive_pcaches)
+ return;
+
+ if (cpu_has_safe_index_cacheops && size >= dcache_size)
+ r4k_blast_dcache();
+ else {
+/* Commented out until bug in free_unmap_vmap_area() is fixed - it calls
+ with unmapped page and address cache op does TLB refill exception
+ if (size >= (dcache_size * CACHE_CPU_LATENCY))
+ */
+ r4k_indexop_on_each_cpu(local_r4__flush_dcache, NULL);
+/* Commented out until bug in free_unmap_vmap_area() is fixed - it calls
+ with unmapped page and address cache op does TLB refill exception
+ else {
+ struct vmap_args args;
+
+ args.start = start;
+ args.end = end;
+ r4k_on_each_cpu(local_r4__flush_cache_vmap, (void *)&args);
+ }
+ */
+ }
+}
+
+
static inline void local_r4k_flush_cache_range(void * args)
{
struct vm_area_struct *vma = args;
@@ -401,9 +573,13 @@
if (!(has_valid_asid(vma->vm_mm)))
return;
- r4k_blast_dcache();
- if (exec)
+ if (!cpu_has_cm3_inclusive_pcaches)
+ r4k_blast_dcache();
+ if (exec) {
+ if ((!cpu_has_cm3_inclusive_pcaches) && !cpu_has_ic_fills_f_dc)
+ mb();
r4k_blast_icache();
+ }
}
static void r4k_flush_cache_range(struct vm_area_struct *vma,
@@ -412,7 +588,7 @@
int exec = vma->vm_flags & VM_EXEC;
if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc))
- r4k_on_each_cpu(local_r4k_flush_cache_range, vma);
+ r4k_indexop_on_each_cpu(local_r4k_flush_cache_range, vma);
}
static inline void local_r4k_flush_cache_mm(void * args)
@@ -444,7 +620,7 @@
if (!cpu_has_dc_aliases)
return;
- r4k_on_each_cpu(local_r4k_flush_cache_mm, mm);
+ r4k_indexop_on_each_cpu(local_r4k_flush_cache_mm, mm);
}
struct flush_cache_page_args {
@@ -467,6 +643,7 @@
pmd_t *pmdp;
pte_t *ptep;
void *vaddr;
+ int dontflash = 0;
/*
* If ownes no valid ASID yet, cannot possibly have gotten
@@ -488,9 +665,26 @@
if (!(pte_present(*ptep)))
return;
- if ((mm == current->active_mm) && (pte_val(*ptep) & _PAGE_VALID))
- vaddr = NULL;
- else {
+ /* accelerate it! See below, just skipping kmap_*()/kunmap_*() */
+ if ((!exec) && !cpu_has_dc_aliases)
+ return;
+
+ if ((mm == current->active_mm) && (pte_val(*ptep) & _PAGE_VALID)) {
+ if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) {
+ if (!cpu_has_cm3_inclusive_pcaches) {
+ r4k_blast_dcache_user_page(addr);
+ if (exec && (!cpu_has_cm2) && !cpu_has_ic_fills_f_dc)
+ mb();
+ }
+ if (exec && !cpu_icache_snoops_remote_store)
+ r4k_blast_scache_page(addr);
+ }
+ if (exec) {
+ r4k_blast_icache_user_page(addr);
+ if (gcmp_present)
+ mb();
+ }
+ } else {
/*
* Use kmap_coherent or kmap_atomic to do flushes for
* another ASID than the current one.
@@ -502,28 +696,44 @@
else
vaddr = kmap_atomic(page);
addr = (unsigned long)vaddr;
- }
- if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) {
- r4k_blast_dcache_page(addr);
- if (exec && !cpu_icache_snoops_remote_store)
- r4k_blast_scache_page(addr);
- }
- if (exec) {
- if (vaddr && cpu_has_vtag_icache && mm == current->active_mm) {
- int cpu = smp_processor_id();
+ if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) {
+ if (!cpu_has_cm3_inclusive_pcaches) {
+ r4k_blast_dcache_page(addr);
+ if (exec && (!cpu_has_cm2) && !cpu_has_ic_fills_f_dc)
+ mb();
+ }
+ if (exec && !cpu_icache_snoops_remote_store)
+ r4k_blast_scache_page(addr);
+ }
+ if (exec) {
+ if (cpu_has_vtag_icache && mm == current->active_mm) {
+ int cpu = smp_processor_id();
- if (cpu_context(cpu, mm) != 0)
- drop_mmu_context(mm, cpu);
- } else
- r4k_blast_icache_page(addr);
- }
+ if (cpu_context(cpu, mm) != 0)
+ drop_mmu_context(mm, cpu);
+ dontflash = 1;
+ } else
+ if (map_coherent || !cpu_has_ic_aliases) {
+ r4k_blast_icache_page(addr);
+ if (gcmp_present)
+ mb();
+ }
+ }
- if (vaddr) {
if (map_coherent)
kunmap_coherent();
else
kunmap_atomic(vaddr);
+
+ /* in case of I-cache aliasing - blast it via coherent page */
+ if (exec && cpu_has_ic_aliases && (!dontflash) && !map_coherent) {
+ vaddr = kmap_coherent(page, addr);
+ r4k_blast_icache_page((unsigned long)vaddr);
+ if (gcmp_present)
+ mb();
+ kunmap_coherent();
+ }
}
}
@@ -537,6 +747,8 @@
args.pfn = pfn;
r4k_on_each_cpu(local_r4k_flush_cache_page, &args);
+ if (cpu_has_dc_aliases)
+ ClearPageDcacheDirty(pfn_to_page(pfn));
}
static inline void local_r4k_flush_data_cache_page(void * addr)
@@ -546,32 +758,82 @@
static void r4k_flush_data_cache_page(unsigned long addr)
{
+ if (cpu_has_cm3_inclusive_pcaches)
+ return;
+
if (in_atomic())
local_r4k_flush_data_cache_page((void *)addr);
else
r4k_on_each_cpu(local_r4k_flush_data_cache_page, (void *) addr);
}
+
+struct mips_flush_data_cache_range_args {
+ struct vm_area_struct *vma;
+ unsigned long vaddr;
+ unsigned long start;
+ unsigned long len;
+};
+
+static inline void local_r4k_mips_flush_data_cache_range(void *args)
+{
+ struct mips_flush_data_cache_range_args *f_args = args;
+ unsigned long vaddr = f_args->vaddr;
+ unsigned long start = f_args->start;
+ unsigned long len = f_args->len;
+ struct vm_area_struct * vma = f_args->vma;
+
+ if (!cpu_has_cm3_inclusive_pcaches)
+ blast_dcache_range(start, start + len);
+
+ if ((vma->vm_flags & VM_EXEC) && !cpu_has_ic_fills_f_dc) {
+ if ((!cpu_has_cm3_inclusive_pcaches) && !cpu_has_cm2)
+ mb();
+
+ /* vma is given for exec check only, mmap is current,
+ so - no non-current vma page flush, just user or kernel */
+ protected_blast_icache_range(vaddr, vaddr + len);
+ if (gcmp_present)
+ mb();
+ }
+}
+
+/* flush dirty kernel data and a corresponding user instructions (if needed).
+ used in copy_to_user_page() */
+static void r4k_mips_flush_data_cache_range(struct vm_area_struct *vma,
+ unsigned long vaddr, struct page *page, unsigned long start,
+ unsigned long len)
+{
+ struct mips_flush_data_cache_range_args args;
+
+ if (cpu_has_cm3_inclusive_pcaches && (cpu_has_ic_fills_f_dc ||
+ !(vma->vm_flags & VM_EXEC)))
+ return;
+
+ args.vma = vma;
+ args.vaddr = vaddr;
+ args.start = start;
+ args.len = len;
+
+ r4k_on_each_cpu(local_r4k_mips_flush_data_cache_range, (void *)&args);
+}
+
+
struct flush_icache_range_args {
unsigned long start;
unsigned long end;
};
-static inline void local_r4k_flush_icache_range(unsigned long start, unsigned long end)
+static inline void local_r4k_flush_icache(void *args)
{
- if (!cpu_has_ic_fills_f_dc) {
- if (end - start >= dcache_size) {
- r4k_blast_dcache();
- } else {
- R4600_HIT_CACHEOP_WAR_IMPL;
- protected_blast_dcache_range(start, end);
- }
+ if ((!cpu_has_ic_fills_f_dc) && !cpu_has_cm3_inclusive_pcaches) {
+ r4k_blast_dcache();
+ mb();
}
- if (end - start > icache_size)
- r4k_blast_icache();
- else
- protected_blast_icache_range(start, end);
+ r4k_blast_icache();
+ if (gcmp_present)
+ mb();
}
static inline void local_r4k_flush_icache_range_ipi(void *args)
@@ -580,20 +842,73 @@
unsigned long start = fir_args->start;
unsigned long end = fir_args->end;
- local_r4k_flush_icache_range(start, end);
+ if ((!cpu_has_ic_fills_f_dc) && !cpu_has_cm3_inclusive_pcaches) {
+ R4600_HIT_CACHEOP_WAR_IMPL;
+ protected_blast_dcache_range(start, end);
+
+ if (!cpu_has_cm2)
+ mb();
+ }
+
+ protected_blast_icache_range(start, end);
+ if (gcmp_present)
+ mb();
}
+/* This function is used only for local CPU only while boot etc */
+static inline void local_r4k_flush_icache_range(unsigned long start, unsigned long end)
+{
+ if ((!cpu_has_ic_fills_f_dc) && !cpu_has_cm3_inclusive_pcaches) {
+ if (end - start >= dcache_size) {
+ r4k_blast_dcache();
+ } else {
+ R4600_HIT_CACHEOP_WAR_IMPL;
+ blast_dcache_range(start, end);
+ }
+ mb();
+ }
+
+ if (end - start > icache_size)
+ r4k_blast_icache();
+ else
+ blast_icache_range(start, end);
+#ifdef CONFIG_EVA
+ /* This is here to smooth effect of any kind of address aliasing.
+ It is used only during boot, so - it doesn't create an impact on
+ performance. LY22 */
+ bc_wback_inv(start, (end - start));
+#endif
+ __sync();
+}
+
+/* this function can be called for kernel OR user addresses,
+ * kernel is for module, *gdb*. User is for binfmt_a.out/flat
+ * So - take care, check get_fs() */
static void r4k_flush_icache_range(unsigned long start, unsigned long end)
{
struct flush_icache_range_args args;
+ unsigned long size = end - start;
args.start = start;
args.end = end;
- r4k_on_each_cpu(local_r4k_flush_icache_range_ipi, &args);
+ if (cpu_has_safe_index_cacheops &&
+ (((size >= icache_size) && !cpu_has_ic_fills_f_dc) ||
+ (size >= dcache_size)))
+ local_r4k_flush_icache((void *)&args);
+ else if (((size < (icache_size * CACHE_CPU_LATENCY)) && !cpu_has_ic_fills_f_dc) ||
+ (size < (dcache_size * CACHE_CPU_LATENCY))) {
+ struct flush_icache_range_args args;
+
+ args.start = start;
+ args.end = end;
+ r4k_on_each_cpu(local_r4k_flush_icache_range_ipi, (void *)&args);
+ } else
+ r4k_indexop_on_each_cpu(local_r4k_flush_icache, NULL);
instruction_hazard();
}
+
#ifdef CONFIG_DMA_NONCOHERENT
static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
@@ -601,11 +916,13 @@
/* Catch bad driver code */
BUG_ON(size == 0);
+ preempt_disable();
if (cpu_has_inclusive_pcaches) {
if (size >= scache_size)
r4k_blast_scache();
else
blast_scache_range(addr, addr + size);
+ preempt_enable();
__sync();
return;
}
@@ -615,15 +932,19 @@
* subset property so we have to flush the primary caches
* explicitly
*/
- if (cpu_has_safe_index_cacheops && size >= dcache_size) {
- r4k_blast_dcache();
- } else {
- R4600_HIT_CACHEOP_WAR_IMPL;
- blast_dcache_range(addr, addr + size);
+ if (!cpu_has_cm3_inclusive_pcaches) {
+ if (cpu_has_safe_index_cacheops && size >= dcache_size) {
+ r4k_blast_dcache();
+ } else {
+ R4600_HIT_CACHEOP_WAR_IMPL;
+ blast_dcache_range(addr, addr + size);
+ }
}
+ preempt_enable();
bc_wback_inv(addr, size);
- __sync();
+ if (!cpu_has_cm2_l2sync)
+ __sync();
}
static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
@@ -631,6 +952,7 @@
/* Catch bad driver code */
BUG_ON(size == 0);
+ preempt_disable();
if (cpu_has_inclusive_pcaches) {
if (size >= scache_size)
r4k_blast_scache();
@@ -645,16 +967,20 @@
*/
blast_inv_scache_range(addr, addr + size);
}
+ preempt_enable();
__sync();
return;
}
- if (cpu_has_safe_index_cacheops && size >= dcache_size) {
- r4k_blast_dcache();
- } else {
- R4600_HIT_CACHEOP_WAR_IMPL;
- blast_inv_dcache_range(addr, addr + size);
+ if (!cpu_has_cm3_inclusive_pcaches) {
+ if (cpu_has_safe_index_cacheops && size >= dcache_size) {
+ r4k_blast_dcache();
+ } else {
+ R4600_HIT_CACHEOP_WAR_IMPL;
+ blast_inv_dcache_range(addr, addr + size);
+ }
}
+ preempt_enable();
bc_inv(addr, size);
__sync();
@@ -666,6 +992,17 @@
* very much about what happens in that case. Usually a segmentation
* fault will dump the process later on anyway ...
*/
+#ifdef CONFIG_CPU_MIPSR6
+static void local_r4k_flush_cache_sigtramp(void * arg)
+{
+ register unsigned long addr = (unsigned long) arg;
+
+ __asm__ __volatile__(
+ "synci 0(%0) \n"
+ "sync 0x10 \n" /* SYNC MB */
+ ::"r"(addr):"memory");
+}
+#else
static void local_r4k_flush_cache_sigtramp(void * arg)
{
unsigned long ic_lsize = cpu_icache_line_size();
@@ -701,6 +1038,7 @@
if (MIPS_CACHE_SYNC_WAR)
__asm__ __volatile__ ("sync");
}
+#endif
static void r4k_flush_cache_sigtramp(unsigned long addr)
{
@@ -740,10 +1078,16 @@
{
struct flush_kernel_vmap_range_args args;
+ if (cpu_has_cm3_inclusive_pcaches)
+ return;
+
args.vaddr = (unsigned long) vaddr;
args.size = size;
- r4k_on_each_cpu(local_r4k_flush_kernel_vmap_range, &args);
+ if (cpu_has_safe_index_cacheops && size >= dcache_size)
+ r4k_indexop_on_each_cpu(local_r4k_flush_kernel_vmap_range, &args);
+ else
+ r4k_on_each_cpu(local_r4k_flush_kernel_vmap_range, &args);
}
static inline void rm7k_erratum31(void)
@@ -780,25 +1124,39 @@
static inline void alias_74k_erratum(struct cpuinfo_mips *c)
{
+ unsigned int imp = c->processor_id & 0xff00;
+ unsigned int rev = c->processor_id & PRID_REV_MASK;
+
/*
* Early versions of the 74K do not update the cache tags on a
* vtag miss/ptag hit which can occur in the case of KSEG0/KUSEG
* aliases. In this case it is better to treat the cache as always
* having aliases.
*/
- if ((c->processor_id & 0xff) <= PRID_REV_ENCODE_332(2, 4, 0))
- c->dcache.flags |= MIPS_CACHE_VTAG;
- if ((c->processor_id & 0xff) == PRID_REV_ENCODE_332(2, 4, 0))
- write_c0_config6(read_c0_config6() | MIPS_CONF6_SYND);
- if (((c->processor_id & 0xff00) == PRID_IMP_1074K) &&
- ((c->processor_id & 0xff) <= PRID_REV_ENCODE_332(1, 1, 0))) {
- c->dcache.flags |= MIPS_CACHE_VTAG;
- write_c0_config6(read_c0_config6() | MIPS_CONF6_SYND);
+ switch (imp) {
+ case PRID_IMP_74K:
+ if (rev <= PRID_REV_ENCODE_332(2, 4, 0))
+ c->dcache.flags |= MIPS_CACHE_VTAG;
+ if (rev == PRID_REV_ENCODE_332(2, 4, 0))
+ write_c0_config6(read_c0_config6() | MIPS_CONF6_SYND);
+ break;
+ case PRID_IMP_1074K:
+ if (rev <= PRID_REV_ENCODE_332(1, 1, 0)) {
+ c->dcache.flags |= MIPS_CACHE_VTAG;
+ write_c0_config6(read_c0_config6() | MIPS_CONF6_SYND);
+ }
+ break;
+ default:
+ BUG();
}
}
static char *way_string[] __cpuinitdata = { NULL, "direct mapped", "2-way",
- "3-way", "4-way", "5-way", "6-way", "7-way", "8-way"
+ "3-way", "4-way", "5-way", "6-way", "7-way", "8-way",
+ "9-way", "10-way", "11-way", "12-way", "13-way", "14-way", "15-way",
+ "16-way", "17-way", "18-way", "19-way", "20-way", "21-way", "22-way",
+ "23-way", "24-way", "25-way", "26-way", "27-way", "28-way", "29-way",
+ "30-way", "31-way", "32-way"
};
static void __cpuinit probe_pcache(void)
@@ -1065,9 +1423,18 @@
case CPU_34K:
case CPU_74K:
case CPU_1004K:
+ case CPU_PROAPTIV:
+ case CPU_INTERAPTIV:
+ case CPU_VIRTUOSO:
+ case CPU_P5600:
+ case CPU_SAMURAI:
if (c->cputype == CPU_74K)
alias_74k_erratum(c);
- if ((read_c0_config7() & (1 << 16))) {
+ if (!(read_c0_config7() & MIPS_CONF7_IAR)) {
+ if (c->icache.waysize > PAGE_SIZE)
+ c->icache.flags |= MIPS_CACHE_ALIASES;
+ }
+ if (read_c0_config7() & MIPS_CONF7_AR) {
/* effectively physically indexed dcache,
thus no virtual aliases. */
c->dcache.flags |= MIPS_CACHE_PINDEX;
@@ -1078,6 +1445,14 @@
c->dcache.flags |= MIPS_CACHE_ALIASES;
}
+#ifdef CONFIG_HIGHMEM
+ if (((c->dcache.flags & MIPS_CACHE_ALIASES) &&
+ ((c->dcache.waysize / PAGE_SIZE) > FIX_N_COLOURS)) ||
+ ((c->icache.flags & MIPS_CACHE_ALIASES) &&
+ ((c->icache.waysize / PAGE_SIZE) > FIX_N_COLOURS)))
+ panic("PAGE_SIZE*WAYS too small for L1 size, too many colors");
+#endif
+
switch (c->cputype) {
case CPU_20KC:
/*
@@ -1100,10 +1475,12 @@
c->icache.ways = 1;
#endif
- printk("Primary instruction cache %ldkB, %s, %s, linesize %d bytes.\n",
- icache_size >> 10,
+ printk("Primary instruction cache %ldkB, %s, %s, %slinesize %d bytes.\n",
+ icache_size >> 10, way_string[c->icache.ways],
c->icache.flags & MIPS_CACHE_VTAG ? "VIVT" : "VIPT",
- way_string[c->icache.ways], c->icache.linesz);
+ (c->icache.flags & MIPS_CACHE_ALIASES) ?
+ "I-cache aliases, " : "",
+ c->icache.linesz);
printk("Primary data cache %ldkB, %s, %s, %s, linesize %d bytes\n",
dcache_size >> 10, way_string[c->dcache.ways],
@@ -1251,7 +1628,8 @@
default:
if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 |
- MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2)) {
+ MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2 |
+ MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R6)) {
#ifdef CONFIG_MIPS_CPU_SCACHE
if (mips_sc_init ()) {
scache_size = c->scache.ways * c->scache.sets * c->scache.linesz;
@@ -1329,11 +1707,11 @@
NXP_BARRIER();
}
-static int __cpuinitdata cca = -1;
+unsigned int mips_cca = INT_MIN | K_CALG_NONCOHERENT;
static int __init cca_setup(char *str)
{
- get_option(&str, &cca);
+ get_option(&str, &mips_cca);
return 0;
}
@@ -1342,12 +1720,12 @@
static void __cpuinit coherency_setup(void)
{
- if (cca < 0 || cca > 7)
- cca = read_c0_config() & CONF_CM_CMASK;
- _page_cachable_default = cca << _CACHE_SHIFT;
+ if (mips_cca < 0 || mips_cca > 7)
+ mips_cca = read_c0_config() & CONF_CM_CMASK;
+ _page_cachable_default = mips_cca << _CACHE_SHIFT;
- pr_debug("Using cache attribute %d\n", cca);
- change_c0_config(CONF_CM_CMASK, cca);
+ pr_debug("Using cache attribute %d\n", mips_cca);
+ change_c0_config(CONF_CM_CMASK, mips_cca);
/*
* c0_status.cu=0 specifies that updates by the sc instruction use
@@ -1416,6 +1794,10 @@
r4k_blast_scache_page_setup();
r4k_blast_scache_page_indexed_setup();
r4k_blast_scache_setup();
+#ifdef CONFIG_EVA
+ r4k_blast_dcache_user_page_setup();
+ r4k_blast_icache_user_page_setup();
+#endif
/*
* Some MIPS32 and MIPS64 processors have physically indexed caches.
@@ -1444,11 +1826,12 @@
flush_icache_all = r4k_flush_icache_all;
local_flush_data_cache_page = local_r4k_flush_data_cache_page;
flush_data_cache_page = r4k_flush_data_cache_page;
+ mips_flush_data_cache_range = r4k_mips_flush_data_cache_range;
flush_icache_range = r4k_flush_icache_range;
local_flush_icache_range = local_r4k_flush_icache_range;
#if defined(CONFIG_DMA_NONCOHERENT)
- if (coherentio) {
+ if (coherentio > 0) {
_dma_cache_wback_inv = (void *)cache_noop;
_dma_cache_wback = (void *)cache_noop;
_dma_cache_inv = (void *)cache_noop;
@@ -1468,6 +1851,13 @@
* or not to flush caches.
*/
local_r4k___flush_cache_all(NULL);
+#ifdef CONFIG_EVA
+ /* this is done just in case if some address aliasing does exist in
+ board like old Malta memory map. Doesn't hurt anyway. LY22 */
+ smp_wmb();
+ r4k_blast_scache();
+ smp_wmb();
+#endif
coherency_setup();
board_cache_error_setup = r4k_cache_error_setup;
diff --git a/arch/mips/mm/c-tx39.c b/arch/mips/mm/c-tx39.c
index ba9da27..99d497a 100644
--- a/arch/mips/mm/c-tx39.c
+++ b/arch/mips/mm/c-tx39.c
@@ -122,12 +122,12 @@
local_irq_restore(flags);
}
-static void tx39__flush_cache_vmap(void)
+static void tx39__flush_cache_vmap(unsigned long start, unsigned long end)
{
tx39_blast_dcache();
}
-static void tx39__flush_cache_vunmap(void)
+static void tx39__flush_cache_vunmap(unsigned long start, unsigned long end)
{
tx39_blast_dcache();
}
@@ -230,6 +230,13 @@
tx39_blast_dcache_page(addr);
}
+static void local_flush_data_cache_range(struct vm_area_struct *vma,
+ unsigned long vaddr, struct page *page, unsigned long addr,
+ unsigned long size)
+{
+ flush_cache_page(vma, addr, page_to_pfn(page));
+}
+
static void tx39_flush_icache_range(unsigned long start, unsigned long end)
{
if (end - start > dcache_size)
@@ -371,6 +378,7 @@
flush_cache_sigtramp = (void *) tx39h_flush_icache_all;
local_flush_data_cache_page = (void *) tx39h_flush_icache_all;
+ mips_flush_data_cache_range = (void *) local_flush_data_cache_range;
flush_data_cache_page = (void *) tx39h_flush_icache_all;
_dma_cache_wback_inv = tx39h_dma_cache_wback_inv;
@@ -402,6 +410,7 @@
flush_cache_sigtramp = tx39_flush_cache_sigtramp;
local_flush_data_cache_page = local_tx39_flush_data_cache_page;
+ mips_flush_data_cache_range = (void *) local_flush_data_cache_range;
flush_data_cache_page = tx39_flush_data_cache_page;
_dma_cache_wback_inv = tx39_dma_cache_wback_inv;
diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c
index 5aeb3eb..71a60ff 100644
--- a/arch/mips/mm/cache.c
+++ b/arch/mips/mm/cache.c
@@ -20,6 +20,7 @@
#include <asm/processor.h>
#include <asm/cpu.h>
#include <asm/cpu-features.h>
+#include <linux/highmem.h>
/* Cache operations. */
void (*flush_cache_all)(void);
@@ -32,8 +33,8 @@
void (*flush_icache_range)(unsigned long start, unsigned long end);
void (*local_flush_icache_range)(unsigned long start, unsigned long end);
-void (*__flush_cache_vmap)(void);
-void (*__flush_cache_vunmap)(void);
+void (*__flush_cache_vmap)(unsigned long start, unsigned long end);
+void (*__flush_cache_vunmap)(unsigned long start, unsigned long end);
void (*__flush_kernel_vmap_range)(unsigned long vaddr, int size);
void (*__invalidate_kernel_vmap_range)(unsigned long vaddr, int size);
@@ -44,10 +45,14 @@
void (*flush_cache_sigtramp)(unsigned long addr);
void (*local_flush_data_cache_page)(void * addr);
void (*flush_data_cache_page)(unsigned long addr);
+void (*mips_flush_data_cache_range)(struct vm_area_struct *vma,
+ unsigned long vaddr, struct page *page, unsigned long addr,
+ unsigned long size);
void (*flush_icache_all)(void);
EXPORT_SYMBOL_GPL(local_flush_data_cache_page);
EXPORT_SYMBOL(flush_data_cache_page);
+EXPORT_SYMBOL(mips_flush_data_cache_range);
EXPORT_SYMBOL(flush_icache_all);
#ifdef CONFIG_DMA_NONCOHERENT
@@ -80,12 +85,9 @@
void __flush_dcache_page(struct page *page)
{
- struct address_space *mapping = page_mapping(page);
- unsigned long addr;
+ void *addr;
- if (PageHighMem(page))
- return;
- if (mapping && !mapping_mapped(mapping)) {
+ if (page_mapping(page) && !page_mapped(page)) {
SetPageDcacheDirty(page);
return;
}
@@ -95,25 +97,55 @@
* case is for exec env/arg pages and those are %99 certainly going to
* get faulted into the tlb (and thus flushed) anyways.
*/
- addr = (unsigned long) page_address(page);
- flush_data_cache_page(addr);
+ if (PageHighMem(page)) {
+ addr = kmap_atomic(page);
+ flush_data_cache_page((unsigned long)addr);
+ kunmap_atomic(addr);
+ } else {
+ addr = (void *) page_address(page);
+ flush_data_cache_page((unsigned long)addr);
+ }
+ ClearPageDcacheDirty(page);
}
EXPORT_SYMBOL(__flush_dcache_page);
void __flush_anon_page(struct page *page, unsigned long vmaddr)
{
- unsigned long addr = (unsigned long) page_address(page);
+ if (!PageHighMem(page)) {
+ unsigned long addr = (unsigned long) page_address(page);
- if (pages_do_alias(addr, vmaddr)) {
- if (page_mapped(page) && !Page_dcache_dirty(page)) {
- void *kaddr;
+ if (pages_do_alias(addr, vmaddr & PAGE_MASK)) {
+ if (page_mapped(page) && !Page_dcache_dirty(page)) {
+ void *kaddr;
- kaddr = kmap_coherent(page, vmaddr);
- flush_data_cache_page((unsigned long)kaddr);
- kunmap_coherent();
- } else
- flush_data_cache_page(addr);
+ kaddr = kmap_coherent(page, vmaddr);
+ flush_data_cache_page((unsigned long)kaddr);
+ kunmap_coherent();
+ } else {
+ flush_data_cache_page(addr);
+ ClearPageDcacheDirty(page);
+ }
+ }
+ } else {
+ void *laddr = lowmem_page_address(page);
+
+ if (pages_do_alias((unsigned long)laddr, vmaddr & PAGE_MASK)) {
+ if (page_mapped(page) && !Page_dcache_dirty(page)) {
+ void *kaddr;
+
+ kaddr = kmap_coherent(page, vmaddr);
+ flush_data_cache_page((unsigned long)kaddr);
+ kunmap_coherent();
+ } else {
+ void *kaddr;
+
+ kaddr = kmap_atomic(page);
+ flush_data_cache_page((unsigned long)kaddr);
+ kunmap_atomic(kaddr);
+ ClearPageDcacheDirty(page);
+ }
+ }
}
}
@@ -127,15 +159,28 @@
int exec = (vma->vm_flags & VM_EXEC) && !cpu_has_ic_fills_f_dc;
pfn = pte_pfn(pte);
- if (unlikely(!pfn_valid(pfn)))
+ if (unlikely(!pfn_valid(pfn))) {
+ wmb();
return;
- page = pfn_to_page(pfn);
- if (page_mapping(page) && Page_dcache_dirty(page)) {
- addr = (unsigned long) page_address(page);
- if (exec || pages_do_alias(addr, address & PAGE_MASK))
- flush_data_cache_page(addr);
- ClearPageDcacheDirty(page);
}
+ page = pfn_to_page(pfn);
+ if (page_mapped(page) && Page_dcache_dirty(page)) {
+ void *kaddr = NULL;
+ if (PageHighMem(page)) {
+ addr = (unsigned long)kmap_atomic(page);
+ kaddr = (void *)addr;
+ } else
+ addr = (unsigned long) page_address(page);
+ if (exec || (cpu_has_dc_aliases &&
+ pages_do_alias(addr, address & PAGE_MASK))) {
+ flush_data_cache_page(addr);
+ ClearPageDcacheDirty(page);
+ }
+
+ if (kaddr)
+ kunmap_atomic((void *)kaddr);
+ }
+ wmb(); /* finish any outstanding arch cache flushes before ret to user */
}
unsigned long _page_cachable_default;
diff --git a/arch/mips/mm/cm3-l2-init.S b/arch/mips/mm/cm3-l2-init.S
new file mode 100644
index 0000000..fa961b8
--- /dev/null
+++ b/arch/mips/mm/cm3-l2-init.S
@@ -0,0 +1,135 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2014 Imagination Technologies Limited. All rights reserved.
+ * Written by Leonid Yegoshin on base of Tom Berg code example
+ */
+/*
+ * CM3 L2 cache initialization (if it is missed in uboot/zboot/*boot/etc)
+ * Runs once, during boot.
+ *
+ * args:
+ * r4 - DCache linesize
+ * r5 - INDEX BASE for DCache flush
+ * r6 - DCache size
+ * r7 - GCR (GCMP) base VA
+ */
+
+#ifdef CONFIG_MIPS_CMP
+#include <asm/asm.h>
+#include <asm/asmmacro.h>
+#include <asm/regdef.h>
+
+
+LEAF(cm3_l2_init)
+ .set noreorder
+
+init_l2:
+ # L2 Cache initialization routine
+
+ # Check L2 cache size
+ lw $10, 0x0130($7) # load GCR_L2_CONFIG
+
+ # Isolate L2 Line Size
+ ext $11, $10, 11, 4 # line size
+
+ # Skip ahead if No L2
+ beq $11, $0, done_l2cache
+ move $2, $0
+ li $2, 1
+
+ li $14, 2
+ sllv $11, $14, $11 # Now have true L2 line size in bytes
+
+ # Isolate L2 Sets per Way
+ ext $12, $10, 12, 4 # sets per way
+ li $14, 64
+ sllv $12, $14, $12 # L2 Sets per way
+
+ # Isolate L2 Associativity
+ # L2 Assoc (-1)
+ ext $13, $10, 0, 8 # assoc
+ addiu $13, $13, 1
+
+ mul $12, $12, $13 # Get total number of sets
+ lui $14, 0x8000 # Get a KSeg0 address for cacheops
+
+ # Clear GCR Tag/Data registers
+ sw $0, 0x600($7) # GCR_TAG_ADDR = 0
+ sw $0, 0x604($7) # GCR_TAG_ADDR = 0
+ sw $0, 0x608($7) # GCR_TAG_STATE = 0
+ sw $0, 0x60c($7) # GCR_TAG_STATE = 0
+ sw $0, 0x610($7) # GCR_TAG_DATA = 0
+ sw $0, 0x614($7) # GCR_TAG_DATA = 0
+
+ li $8, 0xfcffffff
+ and $10, $8 # clear bits 25:24 to make sure ECC is calculated by HW
+ li $8, 0x04000000
+ or $10, $8 # set bit 26 of GCR_L2_CONFIG to make sure LRU is written
+ sw $10, 0x130($7) # write the GCR_L2_CONFIG
+
+ sync # make sure these are complete
+
+ move $15, $12
+
+ # L2 Index Store Tag Cache Op
+ # Will invalidate the tag entry
+1: cache 0xB, 0($14) # SCIndexStTag
+ addiu $15, $15, -1 # Decrement set counter
+
+ bne $15, $0, 1b
+ add $14, $11 # Get next line address
+
+done_l2cache:
+
+ # *** if the dcache has been enabled before this point,
+ # then it should be flushed at this point.
+ beqz $4, 3f
+ nop
+2:
+ cache 0x1, 0($5)
+ subu $6, $4
+ bgtz $6, 2b
+ add $5, $4
+ sync
+
+3:
+ # Enable coherence on this core
+ li $9, 0x01
+ sw $9, 0x2008($7) # GCR_CL_COHERENCE
+ sync
+
+/* The following is only needed in a multi-core system
+ * if you want to enable coherence on other cores from this core:
+ */
+
+ lw $8, 0x0($7) # GCR_CONFIG
+ ext $8, $8, 0, 8 # num of cores-1
+ beq $8, $0, cm3_init_done # if only single core system
+ nop
+
+next_coherent_core:
+ sll $11, $8, 8 # move to bits 13:8
+ sw $11, 0x2018($7) # GCR_CL_OTHER[Core] = core number
+ sync # make sure write is complete before moving on
+ sw $9, 0x4008($7) # GCR_CO_COHERENCE = 1
+ sync # make sure write is complete before moving on
+ addiu $8, $8, -1
+ bne $8, $9, next_coherent_core
+ nop
+
+cm3_init_done:
+ # turn off l2 bypass (may not be required if L1 caches are not enabled yet)
+ li $8, 20
+ sllv $25, $9, $8 # bit 20 = 1
+ or $10, $10, $25 # clear bit 20 in old value of GCR_L2_CONFIG
+ xor $10, $10, $25
+ sw $10, 0x0130($7) # set GCR_L2_CONFIG.L2_BYPASS = 0
+ sync
+ jr $31
+ nop
+
+END(cm3_l2_init)
+#endif
diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c
index caf92ec..4ab1843 100644
--- a/arch/mips/mm/dma-default.c
+++ b/arch/mips/mm/dma-default.c
@@ -22,7 +22,7 @@
#include <dma-coherence.h>
-int coherentio = 0; /* User defined DMA coherency from command line. */
+int coherentio = -1; /* User defined DMA coherency is not defined yet. */
EXPORT_SYMBOL_GPL(coherentio);
int hw_coherentio = 0; /* Actual hardware supported DMA coherency setting. */
diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
index 0fead53..da8c5155 100644
--- a/arch/mips/mm/fault.c
+++ b/arch/mips/mm/fault.c
@@ -25,6 +25,7 @@
#include <asm/uaccess.h>
#include <asm/ptrace.h>
#include <asm/highmem.h> /* For VMALLOC_END */
+#include <asm/tlbmisc.h>
#include <linux/kdebug.h>
/*
@@ -125,7 +126,7 @@
#endif
goto bad_area;
}
- if (!(vma->vm_flags & VM_READ)) {
+ if ((!(vma->vm_flags & VM_READ)) && (regs->cp0_epc != address)) {
#if 0
pr_notice("Cpu%d[%s:%d:%0*lx:%ld:%0*lx] RI violation\n",
raw_smp_processor_id(),
@@ -135,6 +136,9 @@
#endif
goto bad_area;
}
+ if (((address & PAGE_MASK) == (unsigned long)(mm->context.vdso)) &&
+ install_vdso_tlb())
+ goto up_return;
} else {
if (!(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)))
goto bad_area;
@@ -183,6 +187,7 @@
}
}
+up_return:
up_read(&mm->mmap_sem);
return;
diff --git a/arch/mips/mm/highmem.c b/arch/mips/mm/highmem.c
index da815d2..10fc041 100644
--- a/arch/mips/mm/highmem.c
+++ b/arch/mips/mm/highmem.c
@@ -9,6 +9,7 @@
static pte_t *kmap_pte;
unsigned long highstart_pfn, highend_pfn;
+unsigned int last_pkmap_nr_arr[FIX_N_COLOURS] = { 0, 1, 2, 3, 4, 5, 6, 7 };
void *kmap(struct page *page)
{
@@ -53,8 +54,12 @@
return page_address(page);
type = kmap_atomic_idx_push();
- idx = type + KM_TYPE_NR*smp_processor_id();
- vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
+
+ idx = (((unsigned long)lowmem_page_address(page)) >> PAGE_SHIFT) & (FIX_N_COLOURS - 1);
+ idx = (FIX_N_COLOURS - idx);
+ idx = idx + FIX_N_COLOURS * (smp_processor_id() + NR_CPUS * type);
+ vaddr = __fix_to_virt(FIX_KMAP_BEGIN - 1 + idx); /* actually - FIX_CMAP_END */
+
#ifdef CONFIG_DEBUG_HIGHMEM
BUG_ON(!pte_none(*(kmap_pte - idx)));
#endif
@@ -75,12 +80,16 @@
return;
}
- type = kmap_atomic_idx();
#ifdef CONFIG_DEBUG_HIGHMEM
{
- int idx = type + KM_TYPE_NR * smp_processor_id();
+ int idx;
+ type = kmap_atomic_idx();
- BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
+ idx = ((unsigned long)kvaddr >> PAGE_SHIFT) & (FIX_N_COLOURS - 1);
+ idx = (FIX_N_COLOURS - idx);
+ idx = idx + FIX_N_COLOURS * (smp_processor_id() + NR_CPUS * type);
+
+ BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN -1 + idx));
/*
* force other mappings to Oops if they'll try to access
@@ -95,26 +104,6 @@
}
EXPORT_SYMBOL(__kunmap_atomic);
-/*
- * This is the same as kmap_atomic() but can map memory that doesn't
- * have a struct page associated with it.
- */
-void *kmap_atomic_pfn(unsigned long pfn)
-{
- unsigned long vaddr;
- int idx, type;
-
- pagefault_disable();
-
- type = kmap_atomic_idx_push();
- idx = type + KM_TYPE_NR*smp_processor_id();
- vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
- set_pte(kmap_pte-idx, pfn_pte(pfn, PAGE_KERNEL));
- flush_tlb_one(vaddr);
-
- return (void*) vaddr;
-}
-
struct page *kmap_atomic_to_page(void *ptr)
{
unsigned long idx, vaddr = (unsigned long)ptr;
@@ -124,7 +113,7 @@
return virt_to_page(ptr);
idx = virt_to_fix(vaddr);
- pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
+ pte = kmap_pte - (idx - FIX_KMAP_BEGIN + 1);
return pte_page(*pte);
}
@@ -133,6 +122,6 @@
unsigned long kmap_vstart;
/* cache the first kmap pte */
- kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
+ kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN - 1); /* actually - FIX_CMAP_END */
kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
}
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index 9b973e0..c662c83 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -43,6 +43,7 @@
#include <asm/pgalloc.h>
#include <asm/tlb.h>
#include <asm/fixmap.h>
+#include <asm/tlbmisc.h>
/* Atomicity and interruptability */
#ifdef CONFIG_MIPS_MT_SMTC
@@ -116,14 +117,18 @@
void *kmap_coherent(struct page *page, unsigned long addr)
{
+#ifdef CONFIG_EVA
+ dump_stack();
+ panic("kmap_coherent");
+#else
+
enum fixed_addresses idx;
unsigned long vaddr, flags, entrylo;
unsigned long old_ctx;
pte_t pte;
int tlbidx;
- BUG_ON(Page_dcache_dirty(page));
-
+ /* BUG_ON(Page_dcache_dirty(page)); - removed for I-cache flush */
inc_preempt_count();
idx = (addr >> PAGE_SHIFT) & (FIX_N_COLOURS - 1);
#ifdef CONFIG_MIPS_MT_SMTC
@@ -158,20 +163,24 @@
else
tlb_write_indexed();
#else
- tlbidx = read_c0_wired();
+ tlbidx = read_c0_wired() & 0xffff;
write_c0_wired(tlbidx + 1);
write_c0_index(tlbidx);
mtc0_tlbw_hazard();
+ wired_push(vaddr & (PAGE_MASK << 1),entrylo,entrylo,PM_DEFAULT_MASK);
tlb_write_indexed();
#endif
tlbw_use_hazard();
write_c0_entryhi(old_ctx);
+ mtc0_tlbw_hazard();
EXIT_CRITICAL(flags);
return (void*) vaddr;
+#endif /* CONFIG_EVA */
}
-#define UNIQUE_ENTRYHI(idx) (CKSEG0 + ((idx) << (PAGE_SHIFT + 1)))
+#define UNIQUE_ENTRYHI(idx) (cpu_has_tlbinv ? ((CKSEG0 + ((idx) << (PAGE_SHIFT + 1))) | MIPS_EHINV) : \
+ (CKSEG0 + ((idx) << (PAGE_SHIFT + 1))))
void kunmap_coherent(void)
{
@@ -181,16 +190,18 @@
ENTER_CRITICAL(flags);
old_ctx = read_c0_entryhi();
- wired = read_c0_wired() - 1;
+ wired = (read_c0_wired() & 0xffff) - 1;
write_c0_wired(wired);
write_c0_index(wired);
write_c0_entryhi(UNIQUE_ENTRYHI(wired));
write_c0_entrylo0(0);
write_c0_entrylo1(0);
mtc0_tlbw_hazard();
+ wired_pop();
tlb_write_indexed();
tlbw_use_hazard();
write_c0_entryhi(old_ctx);
+ mtc0_tlbw_hazard();
EXIT_CRITICAL(flags);
#endif
dec_preempt_count();
@@ -213,9 +224,15 @@
copy_page(vto, vfrom);
kunmap_atomic(vfrom);
}
- if ((!cpu_has_ic_fills_f_dc) ||
- pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK))
+ if (cpu_has_dc_aliases)
+ SetPageDcacheDirty(to);
+ if (((vma->vm_flags & VM_EXEC) && !cpu_has_ic_fills_f_dc) ||
+ cpu_has_vtag_dcache || (cpu_has_dc_aliases &&
+ pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK))) {
flush_data_cache_page((unsigned long)vto);
+ if (cpu_has_dc_aliases)
+ ClearPageDcacheDirty(to);
+ }
kunmap_atomic(vto);
/* Make sure this page is cleared on other CPU's too before using it */
smp_wmb();
@@ -225,18 +242,29 @@
struct page *page, unsigned long vaddr, void *dst, const void *src,
unsigned long len)
{
+ void *vto = NULL;
+
if (cpu_has_dc_aliases &&
page_mapped(page) && !Page_dcache_dirty(page)) {
- void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
+ vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
memcpy(vto, src, len);
- kunmap_coherent();
} else {
memcpy(dst, src, len);
if (cpu_has_dc_aliases)
SetPageDcacheDirty(page);
}
- if ((vma->vm_flags & VM_EXEC) && !cpu_has_ic_fills_f_dc)
- flush_cache_page(vma, vaddr, page_to_pfn(page));
+ if (((vma->vm_flags & VM_EXEC) && !cpu_has_ic_fills_f_dc) ||
+ (Page_dcache_dirty(page) &&
+ pages_do_alias((unsigned long)dst & PAGE_MASK,
+ vaddr & PAGE_MASK))) {
+ mips_flush_data_cache_range(vma, vaddr, page,
+ vto?(unsigned long)vto : (unsigned long)dst, len);
+
+ if (cpu_has_dc_aliases)
+ ClearPageDcacheDirty(page);
+ }
+ if (vto)
+ kunmap_coherent();
}
void copy_from_user_page(struct vm_area_struct *vma,
@@ -248,12 +276,10 @@
void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
memcpy(dst, vfrom, len);
kunmap_coherent();
- } else {
+ } else
memcpy(dst, src, len);
- if (cpu_has_dc_aliases)
- SetPageDcacheDirty(page);
- }
}
+EXPORT_SYMBOL_GPL(copy_from_user_page);
void __init fixrange_init(unsigned long start, unsigned long end,
pgd_t *pgd_base)
@@ -272,11 +298,11 @@
k = __pmd_offset(vaddr);
pgd = pgd_base + i;
- for ( ; (i < PTRS_PER_PGD) && (vaddr < end); pgd++, i++) {
+ for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) {
pud = (pud_t *)pgd;
- for ( ; (j < PTRS_PER_PUD) && (vaddr < end); pud++, j++) {
+ for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) {
pmd = (pmd_t *)pud;
- for (; (k < PTRS_PER_PMD) && (vaddr < end); pmd++, k++) {
+ for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) {
if (pmd_none(*pmd)) {
pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
set_pmd(pmd, __pmd((unsigned long)pte));
@@ -322,7 +348,7 @@
void __init paging_init(void)
{
unsigned long max_zone_pfns[MAX_NR_ZONES];
- unsigned long lastpfn __maybe_unused;
+ unsigned long lastpfn;
pagetable_init();
@@ -342,14 +368,6 @@
#ifdef CONFIG_HIGHMEM
max_zone_pfns[ZONE_HIGHMEM] = highend_pfn;
lastpfn = highend_pfn;
-
- if (cpu_has_dc_aliases && max_low_pfn != highend_pfn) {
- printk(KERN_WARNING "This processor doesn't support highmem."
- " %ldk highmem ignored\n",
- (highend_pfn - max_low_pfn) << (PAGE_SHIFT - 10));
- max_zone_pfns[ZONE_HIGHMEM] = max_low_pfn;
- lastpfn = max_low_pfn;
- }
#endif
free_area_init_nodes(max_zone_pfns);
@@ -447,11 +465,16 @@
void __init_refok free_initmem(void)
{
prom_free_prom_memory();
+#ifdef CONFIG_EVA
+ free_init_pages("unused memory", __pa_symbol(&__init_begin),
+ __pa_symbol(&__init_end));
+#else
free_initmem_default(POISON_FREE_INITMEM);
+#endif
}
#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
-unsigned long pgd_current[NR_CPUS];
+unsigned long pgd_current[NR_CPUS * 2];
#endif
/*
diff --git a/arch/mips/mm/page.c b/arch/mips/mm/page.c
index 4eb8dcf..b0b048d 100644
--- a/arch/mips/mm/page.c
+++ b/arch/mips/mm/page.c
@@ -99,7 +99,11 @@
uasm_i_addiu(buf, T9, ZERO, off);
uasm_i_daddu(buf, reg1, reg2, T9);
} else {
+#ifdef CONFIG_CPU_MIPSR6
+ if (off > 0xff) {
+#else
if (off > 0x7fff) {
+#endif
uasm_i_lui(buf, T9, uasm_rel_hi(off));
uasm_i_addiu(buf, T9, T9, uasm_rel_lo(off));
UASM_i_ADDU(buf, reg1, reg2, T9);
diff --git a/arch/mips/mm/pgtable-32.c b/arch/mips/mm/pgtable-32.c
index adc6911..a583004 100644
--- a/arch/mips/mm/pgtable-32.c
+++ b/arch/mips/mm/pgtable-32.c
@@ -32,8 +32,11 @@
void __init pagetable_init(void)
{
+#if defined(CONFIG_HIGHMEM) || defined(FIXADDR_START)
unsigned long vaddr;
+ unsigned long vend;
pgd_t *pgd_base;
+#endif
#ifdef CONFIG_HIGHMEM
pgd_t *pgd;
pud_t *pud;
@@ -46,13 +49,18 @@
pgd_init((unsigned long)swapper_pg_dir
+ sizeof(pgd_t) * USER_PTRS_PER_PGD);
+#ifdef FIXADDR_START
pgd_base = swapper_pg_dir;
/*
* Fixed mappings:
*/
- vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
- fixrange_init(vaddr, vaddr + FIXADDR_SIZE, pgd_base);
+ vaddr = __fix_to_virt(__end_of_fixed_addresses - 1);
+ /* Calculate real end before alignment. */
+ vend = vaddr + FIXADDR_SIZE;
+ vaddr = vaddr & PMD_MASK;
+ fixrange_init(vaddr, vend, pgd_base);
+#endif
#ifdef CONFIG_HIGHMEM
/*
diff --git a/arch/mips/mm/pgtable-64.c b/arch/mips/mm/pgtable-64.c
index e8adc00..a6ae0f1 100644
--- a/arch/mips/mm/pgtable-64.c
+++ b/arch/mips/mm/pgtable-64.c
@@ -107,5 +107,5 @@
* Fixed mappings:
*/
vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
- fixrange_init(vaddr, vaddr + FIXADDR_SIZE, pgd_base);
+ fixrange_init(vaddr, 0, pgd_base);
}
diff --git a/arch/mips/mm/sc-mips.c b/arch/mips/mm/sc-mips.c
index df96da7..eff7114 100644
--- a/arch/mips/mm/sc-mips.c
+++ b/arch/mips/mm/sc-mips.c
@@ -7,6 +7,8 @@
#include <linux/mm.h>
#include <asm/mipsregs.h>
+#include <asm/gcmpregs.h>
+#include <asm/gic.h>
#include <asm/bcache.h>
#include <asm/cacheops.h>
#include <asm/page.h>
@@ -18,12 +20,19 @@
* MIPS32/MIPS64 L2 cache handling
*/
+extern int cm3_l2_init(unsigned long lsize, unsigned long indexbase,
+ unsigned long dcache_size, unsigned long gcmpbase);
+
/*
* Writeback and invalidate the secondary cache before DMA.
*/
static void mips_sc_wback_inv(unsigned long addr, unsigned long size)
{
+ if (!cpu_has_cm2)
+ __sync();
blast_scache_range(addr, addr + size);
+ if (cpu_has_cm2_l2sync)
+ *(unsigned long *)(_gcmp_base + GCMP_L2SYNC_OFFSET) = 0;
}
/*
@@ -73,8 +82,11 @@
/* Check the bypass bit (L2B) */
switch (c->cputype) {
case CPU_34K:
- case CPU_74K:
case CPU_1004K:
+ case CPU_74K:
+ case CPU_PROAPTIV: /* proAptiv havn't L2B capability but ... */
+ case CPU_INTERAPTIV:
+ case CPU_P5600:
case CPU_BMIPS5000:
if (config2 & (1 << 12))
return 0;
@@ -88,6 +100,53 @@
return 1;
}
+#ifdef CONFIG_MIPS_CMP
+static inline int cm3_l2_setup(void)
+{
+ struct cpuinfo_mips *c = ¤t_cpu_data;
+ unsigned int tmp;
+ unsigned int l2config = 0;
+ unsigned int l2p;
+
+ if (gcmp3_present)
+ l2config = GCMPGCB(L2CONFIG);
+ if (!(l2config & MIPS_CONF_M))
+ return 0;
+
+ tmp = (l2config & GCMP_GCB_L2CONFIG_LSIZE_MASK) >>
+ GCMP_GCB_L2CONFIG_LSIZE_SHF;
+ if (!tmp)
+ return 0;
+
+ if (l2config & GCMP_GCB_L2CONFIG_BYPASS_MASK) {
+ if (!cm3_l2_init(c->dcache.linesz, INDEX_BASE,
+ c->dcache.sets * c->dcache.ways * c->dcache.linesz,
+ _gcmp_base))
+ return 0;
+ printk("GCR_L2_CONFIG now 0x%08x\n",GCMPGCB(L2CONFIG));
+ printk("CM3 L2 initialized\n");
+ }
+
+ c->scache.linesz = 2 << tmp;
+ tmp = (l2config & GCMP_GCB_L2CONFIG_ASSOC_MASK) >>
+ GCMP_GCB_L2CONFIG_ASSOC_SHF;
+ c->scache.ways = tmp + 1;
+ tmp = (l2config & GCMP_GCB_L2CONFIG_SSIZE_MASK) >>
+ GCMP_GCB_L2CONFIG_SSIZE_SHF;
+ c->scache.sets = 64 << tmp;
+
+ /* setup L2 prefetch */
+ l2p = GCMPGCB(GCML2P);
+ if (l2p & GCMP_GCB_GCML2P_NPFT) {
+ GCMPGCB(GCML2P) = (l2p & ~GCMP_GCB_GCML2P_PAGE_MASK) |
+ PAGE_MASK | GCMP_GCB_GCML2P_PFTEN;
+ GCMPGCB(GCML2PB) |= GCMP_GCB_GCML2PB_CODE_PFTEN;
+ }
+
+ return 1;
+}
+#endif
+
static inline int __init mips_sc_probe(void)
{
struct cpuinfo_mips *c = ¤t_cpu_data;
@@ -99,7 +158,8 @@
/* Ignore anything but MIPSxx processors */
if (!(c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 |
- MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2)))
+ MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2 |
+ MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R6)))
return 0;
/* Does this MIPS32/MIPS64 CPU have a config2 register? */
@@ -109,20 +169,27 @@
config2 = read_c0_config2();
- if (!mips_sc_is_activated(c))
- return 0;
+ if (cpu_has_l2c || !(config2 & ~(MIPS_CONF_M|MIPS_CONF2_SU))) {
+#ifdef CONFIG_MIPS_CMP
+ if (!cm3_l2_setup())
+#endif
+ return 0;
+ } else {
+ if (!mips_sc_is_activated(c))
+ return 0;
- tmp = (config2 >> 8) & 0x0f;
- if (0 <= tmp && tmp <= 7)
- c->scache.sets = 64 << tmp;
- else
- return 0;
+ tmp = (config2 >> 8) & 0x0f;
+ if (0 <= tmp && tmp <= 7)
+ c->scache.sets = 64 << tmp;
+ else
+ return 0;
- tmp = (config2 >> 0) & 0x0f;
- if (0 <= tmp && tmp <= 7)
- c->scache.ways = tmp + 1;
- else
- return 0;
+ tmp = (config2 >> 0) & 0x0f;
+ if (0 <= tmp && tmp <= 7)
+ c->scache.ways = tmp + 1;
+ else
+ return 0;
+ }
c->scache.waysize = c->scache.sets * c->scache.linesz;
c->scache.waybit = __ffs(c->scache.waysize);
@@ -138,6 +205,7 @@
if (found) {
mips_sc_enable();
bcops = &mips_sc_ops;
- }
+ } else
+ cpu_data[0].options &= ~MIPS_CPU_CM2_L2SYNC;
return found;
}
diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
index c643de4..ed18efd 100644
--- a/arch/mips/mm/tlb-r4k.c
+++ b/arch/mips/mm/tlb-r4k.c
@@ -27,7 +27,8 @@
* Make sure all entries differ. If they're not different
* MIPS32 will take revenge ...
*/
-#define UNIQUE_ENTRYHI(idx) (CKSEG0 + ((idx) << (PAGE_SHIFT + 1)))
+#define UNIQUE_ENTRYHI(idx) (cpu_has_tlbinv ? ((CKSEG0 + ((idx) << (PAGE_SHIFT + 1))) | MIPS_EHINV) : \
+ (CKSEG0 + ((idx) << (PAGE_SHIFT + 1))))
/* Atomicity and interruptability */
#ifdef CONFIG_MIPS_MT_SMTC
@@ -67,31 +68,80 @@
#endif
+int lowest_wired;
+int current_wired;
+static struct WiredEntry {
+ unsigned long EntryHi;
+ unsigned long EntryLo0;
+ unsigned long EntryLo1;
+ unsigned long PageMask;
+} wired_entry_array[64];
+
+
void local_flush_tlb_all(void)
{
unsigned long flags;
unsigned long old_ctx;
+ unsigned long old_pagemask;
int entry;
+ int ftlbhighset;
+ int wired;
ENTER_CRITICAL(flags);
+ htw_stop();
/* Save old context and create impossible VPN2 value */
old_ctx = read_c0_entryhi();
write_c0_entrylo0(0);
write_c0_entrylo1(0);
- entry = read_c0_wired();
+ entry = read_c0_wired() & 0xffff;
/* Blast 'em all away. */
- while (entry < current_cpu_data.tlbsize) {
- /* Make sure all entries differ. */
- write_c0_entryhi(UNIQUE_ENTRYHI(entry));
- write_c0_index(entry);
- mtc0_tlbw_hazard();
- tlb_write_indexed();
- entry++;
- }
+ if (cpu_has_tlbinv) {
+ old_pagemask = read_c0_pagemask();
+ if (cpu_has_tlbinv_full)
+ tlbinvf(); /* invalide whole V/FTLB, index isn't used */
+ else {
+ if (current_cpu_data.tlbsizevtlb) {
+ write_c0_index(0);
+ mtc0_tlbw_hazard();
+ tlbinvf(); /* invalide VTLB */
+ }
+ ftlbhighset = current_cpu_data.tlbsizevtlb + current_cpu_data.tlbsizeftlbsets;
+ for (entry=current_cpu_data.tlbsizevtlb;
+ entry < ftlbhighset;
+ entry++) {
+ write_c0_index(entry);
+ mtc0_tlbw_hazard();
+ tlbinvf(); /* invalide one FTLB set */
+ }
+ }
+ /* restore wired entries */
+ for (wired = lowest_wired; wired < current_wired; wired++) {
+ write_c0_index(wired);
+ tlbw_use_hazard(); /* What is the hazard here? */
+ write_c0_pagemask(wired_entry_array[wired].PageMask);
+ write_c0_entryhi(wired_entry_array[wired].EntryHi);
+ write_c0_entrylo0(wired_entry_array[wired].EntryLo0);
+ write_c0_entrylo1(wired_entry_array[wired].EntryLo1);
+ mtc0_tlbw_hazard();
+ tlb_write_indexed();
+ tlbw_use_hazard();
+ }
+ write_c0_pagemask(old_pagemask);
+ } else
+ while (entry < current_cpu_data.tlbsize) {
+ /* Make sure all entries differ. */
+ write_c0_entryhi(UNIQUE_ENTRYHI(entry));
+ write_c0_index(entry);
+ mtc0_tlbw_hazard();
+ tlb_write_indexed();
+ entry++;
+ }
tlbw_use_hazard();
write_c0_entryhi(old_ctx);
+ mtc0_tlbw_hazard();
+ htw_start();
FLUSH_ITLB;
EXIT_CRITICAL(flags);
}
@@ -118,19 +168,23 @@
unsigned long end)
{
struct mm_struct *mm = vma->vm_mm;
- int cpu = smp_processor_id();
+ unsigned long size, flags;
+ int cpu;
+ ENTER_CRITICAL(flags);
+ cpu = smp_processor_id();
if (cpu_context(cpu, mm) != 0) {
- unsigned long size, flags;
-
- ENTER_CRITICAL(flags);
start = round_down(start, PAGE_SIZE << 1);
end = round_up(end, PAGE_SIZE << 1);
size = (end - start) >> (PAGE_SHIFT + 1);
- if (size <= current_cpu_data.tlbsize/2) {
- int oldpid = read_c0_entryhi();
- int newpid = cpu_asid(cpu, mm);
+ if ((current_cpu_data.tlbsizeftlbsets && (size <= current_cpu_data.tlbsize/8)) ||
+ ((!current_cpu_data.tlbsizeftlbsets) && (size <= current_cpu_data.tlbsize/2))) {
+ int oldpid;
+ int newpid;
+ htw_stop();
+ oldpid = read_c0_entryhi();
+ newpid = cpu_asid(cpu, mm);
while (start < end) {
int idx;
@@ -151,12 +205,14 @@
}
tlbw_use_hazard();
write_c0_entryhi(oldpid);
+ mtc0_tlbw_hazard();
+ htw_start();
} else {
drop_mmu_context(mm, cpu);
}
FLUSH_ITLB;
- EXIT_CRITICAL(flags);
}
+ EXIT_CRITICAL(flags);
}
void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
@@ -166,8 +222,12 @@
ENTER_CRITICAL(flags);
size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
size = (size + 1) >> 1;
- if (size <= current_cpu_data.tlbsize / 2) {
- int pid = read_c0_entryhi();
+ if ((current_cpu_data.tlbsizeftlbsets && (size <= current_cpu_data.tlbsize/8)) ||
+ ((!current_cpu_data.tlbsizeftlbsets) && (size <= current_cpu_data.tlbsize/2))) {
+ int pid;
+
+ htw_stop();
+ pid = read_c0_entryhi();
start &= (PAGE_MASK << 1);
end += ((PAGE_SIZE << 1) - 1);
@@ -193,6 +253,8 @@
}
tlbw_use_hazard();
write_c0_entryhi(pid);
+ mtc0_tlbw_hazard();
+ htw_start();
} else {
local_flush_tlb_all();
}
@@ -202,15 +264,17 @@
void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
{
- int cpu = smp_processor_id();
+ unsigned long flags;
+ int cpu;
+ ENTER_CRITICAL(flags);
+ cpu = smp_processor_id();
if (cpu_context(cpu, vma->vm_mm) != 0) {
- unsigned long flags;
int oldpid, newpid, idx;
newpid = cpu_asid(cpu, vma->vm_mm);
page &= (PAGE_MASK << 1);
- ENTER_CRITICAL(flags);
+ htw_stop();
oldpid = read_c0_entryhi();
write_c0_entryhi(page | newpid);
mtc0_tlbw_hazard();
@@ -229,9 +293,11 @@
finish:
write_c0_entryhi(oldpid);
+ mtc0_tlbw_hazard();
+ htw_start();
FLUSH_ITLB_VM(vma);
- EXIT_CRITICAL(flags);
}
+ EXIT_CRITICAL(flags);
}
/*
@@ -244,6 +310,7 @@
int oldpid, idx;
ENTER_CRITICAL(flags);
+ htw_stop();
oldpid = read_c0_entryhi();
page &= (PAGE_MASK << 1);
write_c0_entryhi(page);
@@ -261,6 +328,8 @@
tlbw_use_hazard();
}
write_c0_entryhi(oldpid);
+ mtc0_tlbw_hazard();
+ htw_start();
FLUSH_ITLB;
EXIT_CRITICAL(flags);
}
@@ -286,7 +355,9 @@
return;
ENTER_CRITICAL(flags);
+ htw_stop();
+ /* no needs to save-restore EntryHi - ASID is preserved */
pid = read_c0_entryhi() & ASID_MASK;
address &= (PAGE_MASK << 1);
write_c0_entryhi(address | pid);
@@ -334,10 +405,82 @@
tlb_write_indexed();
}
tlbw_use_hazard();
+ htw_start();
FLUSH_ITLB_VM(vma);
EXIT_CRITICAL(flags);
}
+int wired_push(unsigned long entryhi, unsigned long entrylo0,
+ unsigned long entrylo1, unsigned long pagemask)
+{
+ if (current_wired >= current_cpu_data.tlbsizevtlb) {
+ printk("Attempt to push TLB into wired exceeding VTLV size\n");
+ BUG();
+ }
+
+ wired_entry_array[current_wired].EntryHi = entryhi;
+ wired_entry_array[current_wired].EntryLo0 = entrylo0;
+ wired_entry_array[current_wired].EntryLo1 = entrylo1;
+ wired_entry_array[current_wired].PageMask = pagemask;
+
+ return(current_wired++);
+}
+
+int wired_pop(void)
+{
+ if (current_wired <= lowest_wired) {
+ printk("Attempt to delete a not existed wired TLB\n");
+ BUG();
+ }
+
+ return(--current_wired);
+}
+
+int install_vdso_tlb(void)
+{
+ int tlbidx;
+ int cpu;
+ unsigned long flags;
+
+ if (!current_thread_info()->vdso_page)
+ return(0);
+
+ local_irq_save(flags);
+ cpu = smp_processor_id();
+ htw_stop();
+ /* no needs to save-restore EntryHi - ASID is preserved */
+ write_c0_entryhi(((unsigned long)current->mm->context.vdso & (PAGE_MASK << 1)) |
+ cpu_asid(cpu, current->mm));
+
+ mtc0_tlbw_hazard();
+ tlb_probe();
+ tlb_probe_hazard();
+ tlbidx = read_c0_index();
+#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
+ write_c0_entrylo0(pte_val(pfn_pte(
+ page_to_pfn(current_thread_info()->vdso_page),
+ __pgprot(_page_cachable_default|_PAGE_VALID)))>>32);
+#else
+ write_c0_entrylo0(pte_to_entrylo(pte_val(pfn_pte(
+ page_to_pfn(current_thread_info()->vdso_page),
+ __pgprot(_page_cachable_default|_PAGE_VALID)))));
+#endif
+ write_c0_entrylo1(0);
+ mtc0_tlbw_hazard();
+ if (tlbidx < 0)
+ tlb_write_random();
+ else
+ tlb_write_indexed();
+ tlbw_use_hazard();
+
+ current->mm->context.vdso_asid[cpu] = cpu_asid(cpu, current->mm);
+ current->mm->context.vdso_page[cpu] = current_thread_info()->vdso_page;
+ htw_start();
+ local_irq_restore(flags);
+
+ return(1);
+}
+
void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
unsigned long entryhi, unsigned long pagemask)
{
@@ -348,9 +491,10 @@
ENTER_CRITICAL(flags);
/* Save old context and create impossible VPN2 value */
+ htw_stop();
old_ctx = read_c0_entryhi();
old_pagemask = read_c0_pagemask();
- wired = read_c0_wired();
+ wired = read_c0_wired() & 0xffff;
write_c0_wired(wired + 1);
write_c0_index(wired);
tlbw_use_hazard(); /* What is the hazard here? */
@@ -359,13 +503,45 @@
write_c0_entrylo0(entrylo0);
write_c0_entrylo1(entrylo1);
mtc0_tlbw_hazard();
+ wired_push(entryhi,entrylo0,entrylo1,PM_DEFAULT_MASK);
tlb_write_indexed();
tlbw_use_hazard();
write_c0_entryhi(old_ctx);
- tlbw_use_hazard(); /* What is the hazard here? */
write_c0_pagemask(old_pagemask);
- local_flush_tlb_all();
+ mtc0_tlbw_hazard();
+ htw_start();
+ EXIT_CRITICAL(flags);
+}
+
+void remove_wired_entry(void)
+{
+ unsigned long flags;
+ unsigned long wired;
+ unsigned long old_pagemask;
+ unsigned long old_ctx;
+
+ ENTER_CRITICAL(flags);
+ htw_stop();
+ /* Save old context and create impossible VPN2 value */
+ old_ctx = read_c0_entryhi();
+ old_pagemask = read_c0_pagemask();
+ wired = read_c0_wired() & 0xffff;
+ write_c0_index(wired);
+ tlbw_use_hazard(); /* What is the hazard here? */
+ write_c0_pagemask(PM_DEFAULT_MASK);
+ write_c0_entryhi(UNIQUE_ENTRYHI(wired));
+ write_c0_entrylo0(0);
+ write_c0_entrylo1(0);
+ mtc0_tlbw_hazard();
+ wired_pop();
+ tlb_write_indexed();
+ tlbw_use_hazard();
+
+ write_c0_entryhi(old_ctx);
+ write_c0_pagemask(old_pagemask);
+ mtc0_tlbw_hazard();
+ htw_start();
EXIT_CRITICAL(flags);
}
@@ -377,11 +553,14 @@
unsigned long flags;
ENTER_CRITICAL(flags);
+ htw_stop();
write_c0_pagemask(PM_HUGE_MASK);
back_to_back_c0_hazard();
mask = read_c0_pagemask();
write_c0_pagemask(PM_DEFAULT_MASK);
+ mtc0_tlbw_hazard();
+ htw_start();
EXIT_CRITICAL(flags);
return mask == PM_HUGE_MASK;
@@ -423,8 +602,9 @@
#ifdef CONFIG_64BIT
pg |= PG_ELPA;
#endif
- write_c0_pagegrain(pg);
+ write_c0_pagegrain(pg | read_c0_pagegrain());
}
+ mtc0_tlbw_hazard();
/* From this point on the ARC firmware is dead. */
local_flush_tlb_all();
@@ -436,7 +616,10 @@
int wired = current_cpu_data.tlbsize - ntlb;
write_c0_wired(wired);
write_c0_index(wired-1);
+ mtc0_tlbw_hazard();
printk("Restricting TLB to %d entries\n", ntlb);
+ current_wired = wired;
+ lowest_wired = wired;
} else
printk("Ignoring invalid argument ntlb=%d\n", ntlb);
}
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index afeef93..4173095 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -55,7 +55,7 @@
unsigned long b;
} ____cacheline_aligned_in_smp;
-static struct tlb_reg_save handler_reg_save[NR_CPUS];
+static struct tlb_reg_save handler_reg_save[NR_CPUS * 2];
static inline int r45k_bvahwbug(void)
{
@@ -440,6 +440,7 @@
(unsigned int)(p - tlb_handler));
memcpy((void *)ebase, tlb_handler, 0x80);
+ local_flush_icache_range(ebase, ebase + 0x80);
dump_handler("r3000_tlb_refill", (u32 *)ebase, 32);
}
@@ -511,20 +512,28 @@
case tlb_indexed: tlbw = uasm_i_tlbwi; break;
}
- if (cpu_has_mips_r2) {
+ if (cpu_has_mips_r2 || cpu_has_mips_r6) {
/*
* The architecture spec says an ehb is required here,
* but a number of cores do not have the hazard and
* using an ehb causes an expensive pipeline stall.
*/
- switch (current_cpu_type()) {
- case CPU_M14KC:
- case CPU_74K:
- break;
+ if (cpu_has_mips_r2_exec_hazard) {
+ switch (current_cpu_type()) {
+ case CPU_M14KC:
+ case CPU_M14KEC:
+ case CPU_74K:
+ case CPU_PROAPTIV:
+ case CPU_INTERAPTIV:
+ case CPU_VIRTUOSO:
+ case CPU_P5600:
+ case CPU_SAMURAI:
+ break;
- default:
- uasm_i_ehb(p);
- break;
+ default:
+ uasm_i_ehb(p);
+ break;
+ }
}
tlbw(p);
return;
@@ -640,7 +649,12 @@
unsigned int reg)
{
if (cpu_has_rixi) {
- UASM_i_ROTR(p, reg, reg, ilog2(_PAGE_GLOBAL));
+ if (!cpu_has_himem)
+ UASM_i_ROTR(p, reg, reg, ilog2(_PAGE_GLOBAL));
+ else {
+ UASM_i_SRL(p, reg, reg, ilog2(_PAGE_NO_EXEC));
+ UASM_i_ROTR(p, reg, reg, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC));
+ }
} else {
#ifdef CONFIG_64BIT_PHYS_ADDR
uasm_i_dsrl_safe(p, reg, reg, ilog2(_PAGE_GLOBAL));
@@ -973,9 +987,17 @@
#endif
uasm_i_mfc0(p, tmp, C0_BADVADDR); /* get faulting address */
uasm_i_lw(p, ptr, uasm_rel_lo(pgdc), ptr);
+
+ if (cpu_has_mips32r2 || cpu_has_mips32r6) {
+ uasm_i_ext(p, tmp, tmp, PGDIR_SHIFT, (32 - PGDIR_SHIFT));
+ uasm_i_ins(p, ptr, tmp, PGD_T_LOG2, (32 - PGDIR_SHIFT));
+ return;
+ }
+
uasm_i_srl(p, tmp, tmp, PGDIR_SHIFT); /* get pgd only bits */
uasm_i_sll(p, tmp, tmp, PGD_T_LOG2);
uasm_i_addu(p, ptr, ptr, tmp); /* add in pgd offset */
+
}
#endif /* !CONFIG_64BIT */
@@ -1008,6 +1030,17 @@
static void __cpuinit build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr)
{
+#ifndef CONFIG_64BIT
+ if (cpu_has_mips_r2 || cpu_has_mips_r6) {
+ /* For MIPS32R2, PTE ptr offset is obtained from BadVAddr */
+ UASM_i_MFC0(p, tmp, C0_BADVADDR);
+ UASM_i_LW(p, ptr, 0, ptr);
+ uasm_i_ext(p, tmp, tmp, PAGE_SHIFT+1, PGDIR_SHIFT-PAGE_SHIFT-1);
+ uasm_i_ins(p, ptr, tmp, PTE_T_LOG2+1, PGDIR_SHIFT-PAGE_SHIFT-1);
+ return;
+ }
+#endif /* CONFIG_64BIT */
+
/*
* Bug workaround for the Nevada. It seems as if under certain
* circumstances the move from cp0_context might produce a
@@ -1043,9 +1076,17 @@
uasm_i_ld(p, tmp, 0, ptep); /* get even pte */
uasm_i_ld(p, ptep, sizeof(pte_t), ptep); /* get odd pte */
if (cpu_has_rixi) {
- UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL));
- UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */
- UASM_i_ROTR(p, ptep, ptep, ilog2(_PAGE_GLOBAL));
+ if (!cpu_has_himem) {
+ UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL));
+ UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */
+ UASM_i_ROTR(p, ptep, ptep, ilog2(_PAGE_GLOBAL));
+ } else {
+ UASM_i_SRL(p, tmp, tmp, ilog2(_PAGE_NO_EXEC));
+ UASM_i_SRL(p, ptep, ptep, ilog2(_PAGE_NO_EXEC));
+ UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC));
+ UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */
+ UASM_i_ROTR(p, ptep, ptep, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC));
+ }
} else {
uasm_i_dsrl_safe(p, tmp, tmp, ilog2(_PAGE_GLOBAL)); /* convert to entrylo0 */
UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */
@@ -1068,11 +1109,20 @@
if (r45k_bvahwbug())
build_tlb_probe_entry(p);
if (cpu_has_rixi) {
- UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL));
+ if (!cpu_has_himem)
+ UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL));
+ else {
+ UASM_i_SRL(p, tmp, tmp, ilog2(_PAGE_NO_EXEC));
+ UASM_i_SRL(p, ptep, ptep, ilog2(_PAGE_NO_EXEC));
+ UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC));
+ }
if (r4k_250MHZhwbug())
UASM_i_MTC0(p, 0, C0_ENTRYLO0);
UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */
- UASM_i_ROTR(p, ptep, ptep, ilog2(_PAGE_GLOBAL));
+ if (!cpu_has_himem)
+ UASM_i_ROTR(p, ptep, ptep, ilog2(_PAGE_GLOBAL));
+ else
+ UASM_i_ROTR(p, ptep, ptep, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC));
} else {
UASM_i_SRL(p, tmp, tmp, ilog2(_PAGE_GLOBAL)); /* convert to entrylo0 */
if (r4k_250MHZhwbug())
@@ -1440,6 +1490,7 @@
final_len);
memcpy((void *)ebase, final_handler, 0x100);
+ local_flush_icache_range(ebase, ebase + 0x100);
dump_handler("r4000_tlb_refill", (u32 *)ebase, 64);
}
@@ -1867,8 +1918,16 @@
uasm_l_smp_pgtable_change(l, *p);
#endif
iPTE_LW(p, wr.r1, wr.r2); /* get even pte */
- if (!m4kc_tlbp_war())
+ if (!m4kc_tlbp_war()) {
build_tlb_probe_entry(p);
+ if (cpu_has_htw) {
+ /* race condition happens, leaving */
+ uasm_i_ehb(p);
+ uasm_i_mfc0(p, wr.r3, C0_INDEX);
+ uasm_il_bltz(p, r, wr.r3, label_leave);
+ uasm_i_nop(p);
+ }
+ }
return wr;
}
@@ -1920,7 +1979,7 @@
if (m4kc_tlbp_war())
build_tlb_probe_entry(&p);
- if (cpu_has_rixi) {
+ if (cpu_has_rixi && !cpu_has_rixi_except) {
/*
* If the page is not _PAGE_VALID, RI or XI could not
* have triggered it. Skip the expensive test..
@@ -1935,6 +1994,19 @@
uasm_i_nop(&p);
uasm_i_tlbr(&p);
+
+ switch (current_cpu_type()) {
+ default:
+ if (cpu_has_mips_r2 || cpu_has_mips_r6) {
+ uasm_i_ehb(&p);
+
+ case CPU_CAVIUM_OCTEON:
+ case CPU_CAVIUM_OCTEON_PLUS:
+ case CPU_CAVIUM_OCTEON2:
+ break;
+ }
+ }
+
/* Examine entrylo 0 or 1 based on ptr. */
if (use_bbit_insns()) {
uasm_i_bbit0(&p, wr.r2, ilog2(sizeof(pte_t)), 8);
@@ -1974,7 +2046,7 @@
build_pte_present(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbl);
build_tlb_probe_entry(&p);
- if (cpu_has_rixi) {
+ if (cpu_has_rixi && !cpu_has_rixi_except) {
/*
* If the page is not _PAGE_VALID, RI or XI could not
* have triggered it. Skip the expensive test..
@@ -1989,6 +2061,19 @@
uasm_i_nop(&p);
uasm_i_tlbr(&p);
+
+ switch (current_cpu_type()) {
+ default:
+ if (cpu_has_mips_r2 || cpu_has_mips_r6) {
+ uasm_i_ehb(&p);
+
+ case CPU_CAVIUM_OCTEON:
+ case CPU_CAVIUM_OCTEON_PLUS:
+ case CPU_CAVIUM_OCTEON2:
+ break;
+ }
+ }
+
/* Examine entrylo 0 or 1 based on ptr. */
if (use_bbit_insns()) {
uasm_i_bbit0(&p, wr.r2, ilog2(sizeof(pte_t)), 8);
@@ -2155,6 +2240,108 @@
dump_handler("r4000_tlb_modify", handle_tlbm, ARRAY_SIZE(handle_tlbm));
}
+static void print_htw_config(void)
+{
+ unsigned long config;
+ unsigned int pwctl;
+ const int field = 2 * sizeof(unsigned long);
+
+ config = read_c0_pwfield();
+ pr_debug("PWField (0x%0*lx): GDI: 0x%02lx UDI: 0x%02lx MDI: 0x%02lx PTI: 0x%02lx PTEI: 0x%02lx\n",
+ field, config,
+ (config & MIPS_PWFIELD_GDI_MASK) >> MIPS_PWFIELD_GDI_SHIFT,
+ (config & MIPS_PWFIELD_UDI_MASK) >> MIPS_PWFIELD_UDI_SHIFT,
+ (config & MIPS_PWFIELD_MDI_MASK) >> MIPS_PWFIELD_MDI_SHIFT,
+ (config & MIPS_PWFIELD_PTI_MASK) >> MIPS_PWFIELD_PTI_SHIFT,
+ (config & MIPS_PWFIELD_PTEI_MASK) >> MIPS_PWFIELD_PTEI_SHIFT);
+
+ config = read_c0_pwsize();
+ pr_debug("PWSize (0x%0*lx): GDW: 0x%02lx UDW: 0x%02lx MDW: 0x%02lx PTW: 0x%02lx PTEW: 0x%02lx\n",
+ field, config,
+ (config & MIPS_PWSIZE_GDW_MASK) >> MIPS_PWSIZE_GDW_SHIFT,
+ (config & MIPS_PWSIZE_UDW_MASK) >> MIPS_PWSIZE_UDW_SHIFT,
+ (config & MIPS_PWSIZE_MDW_MASK) >> MIPS_PWSIZE_MDW_SHIFT,
+ (config & MIPS_PWSIZE_PTW_MASK) >> MIPS_PWSIZE_PTW_SHIFT,
+ (config & MIPS_PWSIZE_PTEW_MASK) >> MIPS_PWSIZE_PTEW_SHIFT);
+
+ pwctl = read_c0_pwctl();
+ pr_debug("PWCtl (0x%x): PWEn: 0x%x DPH: 0x%x HugePg: 0x%x Psn: 0x%x\n",
+ pwctl,
+ (pwctl & MIPS_PWCTL_PWEN_MASK) >> MIPS_PWCTL_PWEN_SHIFT,
+ (pwctl & MIPS_PWCTL_DPH_MASK) >> MIPS_PWCTL_DPH_SHIFT,
+ (pwctl & MIPS_PWCTL_HUGEPG_MASK) >> MIPS_PWCTL_HUGEPG_SHIFT,
+ (pwctl & MIPS_PWCTL_PSN_MASK) >> MIPS_PWCTL_PSN_SHIFT);
+}
+
+static void config_htw_params(void)
+{
+ unsigned long pwfield, pwsize, ptei;
+
+ /*
+ * We are using 2-level page tables, so we only need to
+ * setup GDW and PTW appropriately. UDW and MDW will remain 0.
+ * The default value of GDI/UDI/MDI/PTI is 0xc. It is illegal to
+ * write values less than 0xc in these fields because the entire
+ * write will be dropped. As a result of which, we must preserve
+ * the original reset values and overwrite only what we really want.
+ */
+
+ pwfield = read_c0_pwfield();
+ /* re-initialize the GDI field */
+ pwfield &= ~MIPS_PWFIELD_GDI_MASK;
+ pwfield |= PGDIR_SHIFT << MIPS_PWFIELD_GDI_SHIFT;
+#ifdef CONFIG_64BIT
+#if defined(CONFIG_48VMBITS) || !defined(CONFIG_PAGE_SIZE_64KB)
+ /* re-initialize the MDI field */
+ pwfield &= ~MIPS_PWFIELD_MDI_MASK;
+ pwfield |= PMD_SHIFT << MIPS_PWFIELD_MDI_SHIFT;
+ /* re-initialize the BDI field */
+ pwfield &= ~MIPS_PWFIELD_BDI_MASK;
+ pwfield |= MIPS_BASE_SHIFT << MIPS_PWFIELD_BDI_SHIFT;
+#endif
+#endif
+ /* re-initialize the PTI field including the even/odd bit */
+ pwfield &= ~MIPS_PWFIELD_PTI_MASK;
+ pwfield |= PAGE_SHIFT << MIPS_PWFIELD_PTI_SHIFT;
+ /* Set the PTEI right shift */
+ ptei = _PAGE_GLOBAL_SHIFT << MIPS_PWFIELD_PTEI_SHIFT;
+ pwfield |= ptei;
+ write_c0_pwfield(pwfield);
+ /* Check whether the PTEI value is supported */
+ back_to_back_c0_hazard();
+ pwfield = read_c0_pwfield();
+ if (((pwfield & MIPS_PWFIELD_PTEI_MASK) << MIPS_PWFIELD_PTEI_SHIFT)
+ != ptei) {
+ pr_warn("Unsupported PTEI field value: 0x%lx. HTW will not be enabled",
+ ptei);
+ /*
+ * Drop option to avoid HTW being enabled via another path
+ * (eg htw_reset())
+ */
+ current_cpu_data.options2 &= ~MIPS_CPU_HTW;
+ return;
+ }
+
+ pwsize = ilog2(PTRS_PER_PGD) << MIPS_PWSIZE_GDW_SHIFT;
+#ifdef CONFIG_64BIT
+ pwsize |= MIPS_BASE_SIZE;
+ pwsize |= MIPS_PWSIZE_PS;
+#if defined(CONFIG_48VMBITS) || !defined(CONFIG_PAGE_SIZE_64KB)
+ pwsize |= ilog2(PTRS_PER_PMD) << MIPS_PWSIZE_MDW_SHIFT;
+#endif
+#endif
+ pwsize |= ilog2(PTRS_PER_PTE) << MIPS_PWSIZE_PTW_SHIFT;
+ write_c0_pwsize(pwsize);
+
+ /* Make sure everything is set before we enable the HTW */
+ back_to_back_c0_hazard();
+
+ /* Don't enable HTW until PWBASE is set */
+ pr_info("Hardware Page Table Walker is configured\n");
+
+ print_htw_config();
+}
+
void __cpuinit build_tlb_refill_handler(void)
{
/*
@@ -2218,6 +2405,9 @@
}
if (cpu_has_local_ebase)
build_r4000_tlb_refill_handler();
+ if (cpu_has_htw)
+ config_htw_params();
+
}
}
diff --git a/arch/mips/mm/uasm-mips.c b/arch/mips/mm/uasm-mips.c
index 5fcdd8f..a65c6cc1 100644
--- a/arch/mips/mm/uasm-mips.c
+++ b/arch/mips/mm/uasm-mips.c
@@ -39,6 +39,14 @@
| (e) << RE_SH \
| (f) << FUNC_SH)
+/* This macro sets the non-variable bits of an R6 instruction. */
+#define M6(a, b, c, d, f) \
+ ((a) << OP_SH \
+ | (b) << RS_SH \
+ | (c) << RT_SH \
+ | (d) << SIMM9_SH \
+ | (f) << FUNC_SH)
+
/* Define these when we are not the ISA the kernel is being compiled with. */
#ifdef CONFIG_CPU_MICROMIPS
#define CL_uasm_i_b(buf, off) ISAOPC(_beq)(buf, 0, 0, off)
@@ -54,8 +62,10 @@
{ insn_addu, M(spec_op, 0, 0, 0, 0, addu_op), RS | RT | RD },
{ insn_andi, M(andi_op, 0, 0, 0, 0, 0), RS | RT | UIMM },
{ insn_and, M(spec_op, 0, 0, 0, 0, and_op), RS | RT | RD },
+#ifndef CONFIG_CPU_MIPSR6
{ insn_bbit0, M(lwc2_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
{ insn_bbit1, M(swc2_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
+#endif
{ insn_beql, M(beql_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
{ insn_beq, M(beq_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
{ insn_bgezl, M(bcond_op, 0, bgezl_op, 0, 0, 0), RS | BIMM },
@@ -63,7 +73,11 @@
{ insn_bltzl, M(bcond_op, 0, bltzl_op, 0, 0, 0), RS | BIMM },
{ insn_bltz, M(bcond_op, 0, bltz_op, 0, 0, 0), RS | BIMM },
{ insn_bne, M(bne_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
+#ifdef CONFIG_CPU_MIPSR6
+ { insn_cache, M6(cache_op, 0, 0, 0, cache6_op), RS | RT | SIMM9 },
+#else
{ insn_cache, M(cache_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
+#endif
{ insn_daddiu, M(daddiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
{ insn_daddu, M(spec_op, 0, 0, 0, 0, daddu_op), RS | RT | RD },
{ insn_dinsm, M(spec3_op, 0, 0, 0, 0, dinsm_op), RS | RT | RD | RE },
@@ -84,11 +98,20 @@
{ insn_j, M(j_op, 0, 0, 0, 0, 0), JIMM },
{ insn_jal, M(jal_op, 0, 0, 0, 0, 0), JIMM },
{ insn_j, M(j_op, 0, 0, 0, 0, 0), JIMM },
+#ifdef CONFIG_CPU_MIPSR6
+ { insn_jr, M(spec_op, 0, 0, 0, 0, jalr_op), RS },
+#else
{ insn_jr, M(spec_op, 0, 0, 0, 0, jr_op), RS },
+#endif
{ insn_ld, M(ld_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
{ insn_ldx, M(spec3_op, 0, 0, 0, ldx_op, lx_op), RS | RT | RD },
+#ifdef CONFIG_CPU_MIPSR6
+ { insn_lld, M6(spec3_op, 0, 0, 0, lld6_op), RS | RT | SIMM9 },
+ { insn_ll, M6(spec3_op, 0, 0, 0, ll6_op), RS | RT | SIMM9 },
+#else
{ insn_lld, M(lld_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
{ insn_ll, M(ll_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
+#endif
{ insn_lui, M(lui_op, 0, 0, 0, 0, 0), RT | SIMM },
{ insn_lw, M(lw_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
{ insn_lwx, M(spec3_op, 0, 0, 0, lwx_op, lx_op), RS | RT | RD },
@@ -96,11 +119,20 @@
{ insn_mtc0, M(cop0_op, mtc_op, 0, 0, 0, 0), RT | RD | SET},
{ insn_ori, M(ori_op, 0, 0, 0, 0, 0), RS | RT | UIMM },
{ insn_or, M(spec_op, 0, 0, 0, 0, or_op), RS | RT | RD },
+#ifdef CONFIG_CPU_MIPSR6
+ { insn_pref, M6(spec3_op, 0, 0, 0, pref6_op), RS | RT | SIMM9 },
+#else
{ insn_pref, M(pref_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
+#endif
{ insn_rfe, M(cop0_op, cop_op, 0, 0, 0, rfe_op), 0 },
{ insn_rotr, M(spec_op, 1, 0, 0, 0, srl_op), RT | RD | RE },
+#ifdef CONFIG_CPU_MIPSR6
+ { insn_scd, M6(spec3_op, 0, 0, 0, scd6_op), RS | RT | SIMM9 },
+ { insn_sc, M6(spec3_op, 0, 0, 0, sc6_op), RS | RT | SIMM9 },
+#else
{ insn_scd, M(scd_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
{ insn_sc, M(sc_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
+#endif
{ insn_sd, M(sd_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
{ insn_sll, M(spec_op, 0, 0, 0, 0, sll_op), RT | RD | RE },
{ insn_sra, M(spec_op, 0, 0, 0, 0, sra_op), RT | RD | RE },
@@ -137,6 +169,14 @@
return (arg >> 2) & JIMM_MASK;
}
+static inline __uasminit u32 build_simm9(s32 arg)
+{
+ WARN(((arg > 0xff) || (arg < -0x100)),
+ KERN_WARNING "Micro-assembler field overflow\n");
+
+ return (arg & SIMM9_MASK) << SIMM9_SH;
+}
+
/*
* The order of opcode arguments is implicitly left to right,
* starting with RS and ending with FUNC or IMM.
@@ -181,6 +221,8 @@
op |= build_set(va_arg(ap, u32));
if (ip->fields & SCIMM)
op |= build_scimm(va_arg(ap, u32));
+ if (ip->fields & SIMM9)
+ op |= build_simm9(va_arg(ap, u32));
va_end(ap);
**buf = op;
diff --git a/arch/mips/mm/uasm.c b/arch/mips/mm/uasm.c
index 7eb5e43..7125f64 100644
--- a/arch/mips/mm/uasm.c
+++ b/arch/mips/mm/uasm.c
@@ -24,7 +24,8 @@
JIMM = 0x080,
FUNC = 0x100,
SET = 0x200,
- SCIMM = 0x400
+ SCIMM = 0x400,
+ SIMM9 = 0x800
};
#define OP_MASK 0x3f
@@ -41,6 +42,8 @@
#define FUNC_SH 0
#define SET_MASK 0x7
#define SET_SH 0
+#define SIMM9_SH 7
+#define SIMM9_MASK 0x1ff
enum opcode {
insn_invalid,
diff --git a/arch/mips/mti-malta/malta-init.c b/arch/mips/mti-malta/malta-init.c
index ff8caff..845d92f 100644
--- a/arch/mips/mti-malta/malta-init.c
+++ b/arch/mips/mti-malta/malta-init.c
@@ -20,6 +20,7 @@
#include <asm/traps.h>
#include <asm/fw/fw.h>
#include <asm/gcmpregs.h>
+#include <asm/cpcregs.h>
#include <asm/mips-boards/generic.h>
#include <asm/mips-boards/malta.h>
@@ -84,10 +85,15 @@
extern char except_vec_nmi;
base = cpu_has_veic ?
+#ifndef CONFIG_EVA
(void *)(CAC_BASE + 0xa80) :
(void *)(CAC_BASE + 0x380);
+#else
+ (void *)(YAMON_BASE + 0xa80) :
+ (void *)(YAMON_BASE + 0x380);
+#endif
memcpy(base, &except_vec_nmi, 0x80);
- flush_icache_range((unsigned long)base, (unsigned long)base + 0x80);
+ local_flush_icache_range((unsigned long)base, (unsigned long)base + 0x80);
}
static void __init mips_ejtag_setup(void)
@@ -96,12 +102,18 @@
extern char except_vec_ejtag_debug;
base = cpu_has_veic ?
+#ifndef CONFIG_EVA
(void *)(CAC_BASE + 0xa00) :
(void *)(CAC_BASE + 0x300);
+#else
+ (void *)(YAMON_BASE + 0xa00) :
+ (void *)(YAMON_BASE + 0x300);
+#endif
memcpy(base, &except_vec_ejtag_debug, 0x80);
- flush_icache_range((unsigned long)base, (unsigned long)base + 0x80);
+ local_flush_icache_range((unsigned long)base, (unsigned long)base + 0x80);
}
+void __init prom_mem_check(int niocu);
extern struct plat_smp_ops msmtc_smp_ops;
void __init prom_init(void)
@@ -230,9 +242,39 @@
MSC01_PCI_SWAP_BYTESWAP << MSC01_PCI_SWAP_MEM_SHF |
MSC01_PCI_SWAP_BYTESWAP << MSC01_PCI_SWAP_BAR0_SHF);
#endif
- /* Fix up target memory mapping. */
+ /* Fix up target memory mapping. */
+#ifndef CONFIG_EVA
MSC_READ(MSC01_PCI_BAR0, mask);
MSC_WRITE(MSC01_PCI_P2SCMSKL, mask & MSC01_PCI_BAR0_SIZE_MSK);
+#else
+#ifdef CONFIG_EVA_OLD_MALTA_MAP
+ /* Classic (old) Malta memory map:
+ Setup the Malta max (2GB) memory for PCI DMA in host bridge
+ in transparent addressing mode, starting from 80000000.
+ Don't believe in registers content */
+ mask = 0x80000008;
+ MSC_WRITE(MSC01_PCI_BAR0, mask);
+
+ mask = 0x80000000;
+ MSC_WRITE(MSC01_PCI_HEAD4, mask);
+ MSC_WRITE(MSC01_PCI_P2SCMSKL, mask);
+ MSC_WRITE(MSC01_PCI_P2SCMAPL, mask);
+#else
+ /* New Malta memory map:
+ Setup the Malta max memory (2G) for PCI DMA in host bridge
+ in transparent addressing mode, starting from 00000000.
+ Don't believe in registers content */
+ mask = 0x80000008;
+ MSC_WRITE(MSC01_PCI_BAR0, mask);
+
+ mask = 0x00000000;
+ MSC_WRITE(MSC01_PCI_HEAD4, mask);
+ mask = 0x80000000;
+ MSC_WRITE(MSC01_PCI_P2SCMSKL, mask);
+ mask = 0x00000000;
+ MSC_WRITE(MSC01_PCI_P2SCMAPL, mask);
+#endif
+#endif
/* Don't handle target retries indefinitely. */
if ((data & MSC01_PCI_CFG_MAXRTRY_MSK) ==
@@ -267,10 +309,21 @@
#ifdef CONFIG_SERIAL_8250_CONSOLE
console_config();
#endif
+#ifdef CONFIG_MIPS_CMP
/* Early detection of CMP support */
- if (gcmp_probe(GCMP_BASE_ADDR, GCMP_ADDRSPACE_SZ))
+ if ((mips_revision_sconid != MIPS_REVISION_SCON_ROCIT) &&
+ (mips_revision_sconid != MIPS_REVISION_SCON_GT64120)) {
+ gcmp_present = 0;
+ printk("GCMP NOT present\n");
+ } else if (gcmp_probe(GCMP_BASE_ADDR_MALTA, GCMP_ADDRSPACE_SZ_MALTA)) {
+ cpc_probe(CPC_BASE_ADDR_MALTA, CPC_ADDRSPACE_SZ_MALTA);
+#if defined(CONFIG_EVA) && !defined(CONFIG_EVA_OLD_MALTA_MAP)
+ prom_mem_check(gcmp_niocu());
+#endif
if (!register_cmp_smp_ops())
return;
+ }
+#endif
if (!register_vsmp_smp_ops())
return;
diff --git a/arch/mips/mti-malta/malta-int.c b/arch/mips/mti-malta/malta-int.c
index 0a1339a..4a7fdaf 100644
--- a/arch/mips/mti-malta/malta-int.c
+++ b/arch/mips/mti-malta/malta-int.c
@@ -46,9 +46,7 @@
#include <asm/gcmpregs.h>
#include <asm/setup.h>
-int gcmp_present = -1;
static unsigned long _msc01_biu_base;
-static unsigned long _gcmp_base;
static unsigned int ipi_map[NR_CPUS];
static DEFINE_RAW_SPINLOCK(mips_irq_lock);
@@ -203,7 +201,11 @@
{
__asm__(
" .set push \n"
+#ifdef CONFIG_CPU_MIPSR6
+ " .set mips64r6 \n"
+#else
" .set mips32 \n"
+#endif
" clz %0, %1 \n"
" .set pop \n"
: "=r" (x)
@@ -417,44 +419,6 @@
};
#undef X
-/*
- * GCMP needs to be detected before any SMP initialisation
- */
-int __init gcmp_probe(unsigned long addr, unsigned long size)
-{
- if (mips_revision_sconid != MIPS_REVISION_SCON_ROCIT) {
- gcmp_present = 0;
- return gcmp_present;
- }
-
- if (gcmp_present >= 0)
- return gcmp_present;
-
- _gcmp_base = (unsigned long) ioremap_nocache(GCMP_BASE_ADDR, GCMP_ADDRSPACE_SZ);
- _msc01_biu_base = (unsigned long) ioremap_nocache(MSC01_BIU_REG_BASE, MSC01_BIU_ADDRSPACE_SZ);
- gcmp_present = (GCMPGCB(GCMPB) & GCMP_GCB_GCMPB_GCMPBASE_MSK) == GCMP_BASE_ADDR;
-
- if (gcmp_present)
- pr_debug("GCMP present\n");
- return gcmp_present;
-}
-
-/* Return the number of IOCU's present */
-int __init gcmp_niocu(void)
-{
- return gcmp_present ?
- (GCMPGCB(GC) & GCMP_GCB_GC_NUMIOCU_MSK) >> GCMP_GCB_GC_NUMIOCU_SHF :
- 0;
-}
-
-/* Set GCMP region attributes */
-void __init gcmp_setregion(int region, unsigned long base,
- unsigned long mask, int type)
-{
- GCMPGCBn(CMxBASE, region) = base;
- GCMPGCBn(CMxMASK, region) = mask | type;
-}
-
#if defined(CONFIG_MIPS_MT_SMP)
static void __init fill_ipi_map1(int baseintr, int cpu, int cpupin)
{
@@ -471,7 +435,7 @@
{
int cpu;
- for (cpu = 0; cpu < NR_CPUS; cpu++) {
+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
fill_ipi_map1(gic_resched_int_base, cpu, GIC_CPU_INT1);
fill_ipi_map1(gic_call_int_base, cpu, GIC_CPU_INT2);
}
@@ -486,14 +450,19 @@
void __init arch_init_irq(void)
{
+ unsigned long gicaddr = GIC_BASE_ADDR;
+
init_i8259_irqs();
if (!cpu_has_veic)
mips_cpu_irq_init();
- if (gcmp_present) {
- GCMPGCB(GICBA) = GIC_BASE_ADDR | GCMP_GCB_GICBA_EN_MSK;
- gic_present = 1;
+ if (gcmp_present && (GCMPGCB(GICST) & GCMP_GCB_GICST_EN_MSK)) {
+ GCMPGCBaddrWrite(GICBA, (gicaddr | GCMP_GCB_GICBA_EN_MSK));
+ if (GCMPGCBaddr(GICBA) & GCMP_GCB_GICBA_EN_MSK) {
+ gic_present = 1;
+ gicaddr = GCMPGCBaddr(GICBA) & ~GCMP_GCB_GICBA_EN_MSK;
+ }
} else {
if (mips_revision_sconid == MIPS_REVISION_SCON_ROCIT) {
_msc01_biu_base = (unsigned long)
@@ -572,12 +541,11 @@
/* FIXME */
int i;
#if defined(CONFIG_MIPS_MT_SMP)
- gic_call_int_base = GIC_NUM_INTRS - NR_CPUS;
- gic_resched_int_base = gic_call_int_base - NR_CPUS;
+ gic_call_int_base = GIC_NUM_INTRS -
+ (NR_CPUS - nr_cpu_ids) * 2 - nr_cpu_ids;
+ gic_resched_int_base = gic_call_int_base - nr_cpu_ids;
fill_ipi_map();
#endif
- gic_init(GIC_BASE_ADDR, GIC_ADDRSPACE_SZ, gic_intr_map,
- ARRAY_SIZE(gic_intr_map), MIPS_GIC_IRQ_BASE);
if (!gcmp_present) {
/* Enable the GIC */
i = REG(_msc01_biu_base, MSC01_SC_CFG);
@@ -585,24 +553,27 @@
(i | (0x1 << MSC01_SC_CFG_GICENA_SHF));
pr_debug("GIC Enabled\n");
}
+ gic_init(gicaddr, GIC_ADDRSPACE_SZ, gic_intr_map,
+ ARRAY_SIZE(gic_intr_map), MIPS_GIC_IRQ_BASE);
#if defined(CONFIG_MIPS_MT_SMP)
/* set up ipi interrupts */
if (cpu_has_vint) {
set_vi_handler(MIPSCPU_INT_IPI0, malta_ipi_irqdispatch);
set_vi_handler(MIPSCPU_INT_IPI1, malta_ipi_irqdispatch);
}
- /* Argh.. this really needs sorting out.. */
- printk("CPU%d: status register was %08x\n", smp_processor_id(), read_c0_status());
- write_c0_status(read_c0_status() | STATUSF_IP3 | STATUSF_IP4);
- printk("CPU%d: status register now %08x\n", smp_processor_id(), read_c0_status());
- write_c0_status(0x1100dc00);
- printk("CPU%d: status register frc %08x\n", smp_processor_id(), read_c0_status());
- for (i = 0; i < NR_CPUS; i++) {
+ for (i = 0; i < nr_cpu_ids; i++) {
arch_init_ipiirq(MIPS_GIC_IRQ_BASE +
GIC_RESCHED_INT(i), &irq_resched);
arch_init_ipiirq(MIPS_GIC_IRQ_BASE +
GIC_CALL_INT(i), &irq_call);
}
+ set_c0_status(mips_smp_c0_status_mask |
+ (0x100 << GIC_MIPS_CPU_IPI_RESCHED_IRQ) |
+ (0x100 << GIC_MIPS_CPU_IPI_CALL_IRQ));
+ back_to_back_c0_hazard();
+ printk("CPU%d: status register %08x\n", smp_processor_id(), read_c0_status());
+ mips_smp_c0_status_mask |= ((0x100 << GIC_MIPS_CPU_IPI_RESCHED_IRQ) |
+ (0x100 << GIC_MIPS_CPU_IPI_CALL_IRQ));
#endif
} else {
#if defined(CONFIG_MIPS_MT_SMP)
@@ -619,6 +590,8 @@
}
cpu_ipi_resched_irq = MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_RESCHED_IRQ;
cpu_ipi_call_irq = MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_CALL_IRQ;
+ mips_smp_c0_status_mask |= ((0x100 << MIPS_CPU_IPI_RESCHED_IRQ) |
+ (0x100 << MIPS_CPU_IPI_CALL_IRQ));
}
arch_init_ipiirq(cpu_ipi_resched_irq, &irq_resched);
arch_init_ipiirq(cpu_ipi_call_irq, &irq_call);
@@ -698,55 +671,63 @@
int retval = is_fixup ? MIPS_BE_FIXUP : MIPS_BE_FATAL;
if (gcmp_present) {
- unsigned long cm_error = GCMPGCB(GCMEC);
- unsigned long cm_addr = GCMPGCB(GCMEA);
+ unsigned long cm_error = GCMPGCBaddr(GCMEC);
+ unsigned long cm_addr = GCMPGCBaddr(GCMEA);
unsigned long cm_other = GCMPGCB(GCMEO);
unsigned long cause, ocause;
char buf[256];
- cause = (cm_error & GCMP_GCB_GMEC_ERROR_TYPE_MSK);
- if (cause != 0) {
- cause >>= GCMP_GCB_GMEC_ERROR_TYPE_SHF;
- if (cause < 16) {
- unsigned long cca_bits = (cm_error >> 15) & 7;
- unsigned long tr_bits = (cm_error >> 12) & 7;
- unsigned long mcmd_bits = (cm_error >> 7) & 0x1f;
- unsigned long stag_bits = (cm_error >> 3) & 15;
- unsigned long sport_bits = (cm_error >> 0) & 7;
+ if (gcmp3_present) {
+ /* no decoding of fields yet */
+ printk("BusError: CM_ERROR=%08x%08x, CM_ADDR=%08x%08x, CM_OTHER=%lx\n",
+ GCMPGCBhi(GCMEC), GCMPGCBlo(GCMEC), GCMPGCBhi(GCMEA), GCMPGCBlo(GCMEA),
+ cm_other);
+ GCMPGCBhi(GCMEC) = 0;
+ } else {
+ cause = (cm_error & GCMP_GCB_GMEC_ERROR_TYPE_MSK);
+ if (cause != 0) {
+ cause >>= GCMP_GCB_GMEC_ERROR_TYPE_SHF;
+ if (cause < 16) {
+ unsigned long cca_bits = (cm_error >> 15) & 7;
+ unsigned long tr_bits = (cm_error >> 12) & 7;
+ unsigned long mcmd_bits = (cm_error >> 7) & 0x1f;
+ unsigned long stag_bits = (cm_error >> 3) & 15;
+ unsigned long sport_bits = (cm_error >> 0) & 7;
- snprintf(buf, sizeof(buf),
- "CCA=%lu TR=%s MCmd=%s STag=%lu "
- "SPort=%lu\n",
- cca_bits, tr[tr_bits], mcmd[mcmd_bits],
- stag_bits, sport_bits);
- } else {
- /* glob state & sresp together */
- unsigned long c3_bits = (cm_error >> 18) & 7;
- unsigned long c2_bits = (cm_error >> 15) & 7;
- unsigned long c1_bits = (cm_error >> 12) & 7;
- unsigned long c0_bits = (cm_error >> 9) & 7;
- unsigned long sc_bit = (cm_error >> 8) & 1;
- unsigned long mcmd_bits = (cm_error >> 3) & 0x1f;
- unsigned long sport_bits = (cm_error >> 0) & 7;
- snprintf(buf, sizeof(buf),
- "C3=%s C2=%s C1=%s C0=%s SC=%s "
- "MCmd=%s SPort=%lu\n",
- core[c3_bits], core[c2_bits],
- core[c1_bits], core[c0_bits],
- sc_bit ? "True" : "False",
- mcmd[mcmd_bits], sport_bits);
+ snprintf(buf, sizeof(buf),
+ "CCA=%lu TR=%s MCmd=%s STag=%lu "
+ "SPort=%lu\n",
+ cca_bits, tr[tr_bits], mcmd[mcmd_bits],
+ stag_bits, sport_bits);
+ } else {
+ /* glob state & sresp together */
+ unsigned long c3_bits = (cm_error >> 18) & 7;
+ unsigned long c2_bits = (cm_error >> 15) & 7;
+ unsigned long c1_bits = (cm_error >> 12) & 7;
+ unsigned long c0_bits = (cm_error >> 9) & 7;
+ unsigned long sc_bit = (cm_error >> 8) & 1;
+ unsigned long mcmd_bits = (cm_error >> 3) & 0x1f;
+ unsigned long sport_bits = (cm_error >> 0) & 7;
+ snprintf(buf, sizeof(buf),
+ "C3=%s C2=%s C1=%s C0=%s SC=%s "
+ "MCmd=%s SPort=%lu\n",
+ core[c3_bits], core[c2_bits],
+ core[c1_bits], core[c0_bits],
+ sc_bit ? "True" : "False",
+ mcmd[mcmd_bits], sport_bits);
+ }
+
+ ocause = (cm_other & GCMP_GCB_GMEO_ERROR_2ND_MSK) >>
+ GCMP_GCB_GMEO_ERROR_2ND_SHF;
+
+ printk("CM_ERROR=%08lx %s <%s>\n", cm_error,
+ causes[cause], buf);
+ printk("CM_ADDR =%08lx\n", cm_addr);
+ printk("CM_OTHER=%08lx %s\n", cm_other, causes[ocause]);
+
+ /* reprime cause register */
+ GCMPGCBaddrWrite(GCMEC, 0UL);
}
-
- ocause = (cm_other & GCMP_GCB_GMEO_ERROR_2ND_MSK) >>
- GCMP_GCB_GMEO_ERROR_2ND_SHF;
-
- printk("CM_ERROR=%08lx %s <%s>\n", cm_error,
- causes[cause], buf);
- printk("CM_ADDR =%08lx\n", cm_addr);
- printk("CM_OTHER=%08lx %s\n", cm_other, causes[ocause]);
-
- /* reprime cause register */
- GCMPGCB(GCMEC) = 0;
}
}
diff --git a/arch/mips/mti-malta/malta-memory.c b/arch/mips/mti-malta/malta-memory.c
index 1f73d63..05ffdcf 100644
--- a/arch/mips/mti-malta/malta-memory.c
+++ b/arch/mips/mti-malta/malta-memory.c
@@ -21,25 +21,50 @@
static fw_memblock_t mdesc[FW_MAX_MEMBLOCKS];
-/* determined physical memory size, not overridden by command line args */
-unsigned long physical_memsize = 0L;
+#ifdef DEBUG
+static char *mtypes[3] = {
+ "Dont use memory",
+ "YAMON PROM memory",
+ "Free memmory",
+ "Memory in use",
+};
+#endif
-fw_memblock_t * __init fw_getmdesc(void)
+/* determined physical memory size, not overridden by command line args */
+unsigned long physical_memsize = 0L;
+static unsigned newMapType;
+
+static inline fw_memblock_t * __init prom_getmdesc(void)
{
- char *memsize_str, *ptr;
- unsigned int memsize;
+ char *memsize_str;
+ char *ememsize_str;
+ unsigned long memsize = 0;
+ unsigned long ememsize = 0;
+ char *ptr;
static char cmdline[COMMAND_LINE_SIZE] __initdata;
- long val;
- int tmp;
/* otherwise look in the environment */
memsize_str = fw_getenv("memsize");
- if (!memsize_str) {
- pr_warn("memsize not set in YAMON, set to default (32Mb)\n");
+#ifdef DEBUG
+ pr_debug("prom_memsize = %s\n", memsize_str);
+#endif
+ if (memsize_str)
+ memsize = simple_strtol(memsize_str, NULL, 0);
+ ememsize_str = fw_getenv("ememsize");
+#ifdef DEBUG
+ pr_debug("fw_ememsize = %s\n", ememsize_str);
+#endif
+ if (ememsize_str)
+ ememsize = simple_strtol(ememsize_str, NULL, 0);
+
+ if ((!memsize) && !ememsize) {
+ printk(KERN_WARNING
+ "memsize not set in boot prom, set to default (32Mb)\n");
physical_memsize = 0x02000000;
} else {
- tmp = kstrtol(memsize_str, 0, &val);
- physical_memsize = (unsigned long)val;
+ physical_memsize = ememsize;
+ if (!physical_memsize)
+ physical_memsize = memsize;
}
#ifdef CONFIG_CPU_BIG_ENDIAN
@@ -48,26 +73,54 @@
physical_memsize -= PAGE_SIZE;
#endif
+ memsize = 0;
/* Check the command line for a memsize directive that overrides
the physical/default amount */
strcpy(cmdline, arcs_cmdline);
- ptr = strstr(cmdline, "memsize=");
- if (ptr && (ptr != cmdline) && (*(ptr - 1) != ' '))
- ptr = strstr(ptr, " memsize=");
-
- if (ptr)
- memsize = memparse(ptr + 8, &ptr);
- else
+ ptr = strstr(cmdline, " memsize=");
+ if (ptr && (ptr != cmdline))
+ memsize = memparse(ptr + 9, &ptr);
+ ptr = strstr(cmdline, " ememsize=");
+ if (ptr && (ptr != cmdline))
+ memsize = memparse(ptr + 10, &ptr);
+ if (!memsize) {
+ ptr = strstr(cmdline, "memsize=");
+ if (ptr && (ptr != cmdline))
+ memsize = memparse(ptr + 8, &ptr);
+ ptr = strstr(cmdline, "ememsize=");
+ if (ptr && (ptr != cmdline))
+ memsize = memparse(ptr + 9, &ptr);
+ }
+ if (!memsize)
memsize = physical_memsize;
+ if ((memsize == 0x10000000) && !ememsize)
+ if ((!ptr) || (ptr == cmdline)) {
+ printk("YAMON reports memsize=256M but doesn't report ememsize option\n");
+ printk("If you install > 256MB memory, upgrade YAMON or use boot option memsize=XXXM\n");
+ }
+ newMapType = *((unsigned int *)CKSEG1ADDR(0xbf403004));
+ printk("System Controller register = %0x\n",newMapType);
+ newMapType &= 0x100; /* extract map type bit */
+#ifdef CONFIG_EVA_OLD_MALTA_MAP
+ if (newMapType)
+ panic("Malta board has new memory map layout but kernel is configured for legacy map\n");
+#endif
+#if (!defined(CONFIG_PHYS_ADDR_T_64BIT)) && !defined(CONFIG_HIGHMEM)
+ /* Don't use last 64KB - it is just for macros arithmetics overflow */
+ /* It is assumed that HIGHMEM lowmem map follows this rule too */
+ if (((unsigned long long)PHYS_OFFSET + (unsigned long long)memsize) > 0xffff0000ULL)
+ memsize = 0xffff0000ULL - (unsigned long long)PHYS_OFFSET;
+#endif
+
memset(mdesc, 0, sizeof(mdesc));
mdesc[0].type = fw_dontuse;
- mdesc[0].base = 0x00000000;
+ mdesc[0].base = PHYS_OFFSET;
mdesc[0].size = 0x00001000;
mdesc[1].type = fw_code;
- mdesc[1].base = 0x00001000;
+ mdesc[1].base = mdesc[0].base + 0x00001000UL;
mdesc[1].size = 0x000ef000;
/*
@@ -78,21 +131,53 @@
* devices.
*/
mdesc[2].type = fw_dontuse;
- mdesc[2].base = 0x000f0000;
+ mdesc[2].base = mdesc[0].base + 0x000f0000UL;
mdesc[2].size = 0x00010000;
- mdesc[3].type = fw_dontuse;
- mdesc[3].base = 0x00100000;
- mdesc[3].size = CPHYSADDR(PFN_ALIGN((unsigned long)&_end)) -
- mdesc[3].base;
+ mdesc[3].type = fw_inuse;
+ mdesc[3].base = mdesc[0].base + 0x00100000UL;
+ mdesc[3].size = CPHYSADDR(PFN_ALIGN((unsigned long)&_end)) - 0x00100000UL;
- mdesc[4].type = fw_free;
- mdesc[4].base = CPHYSADDR(PFN_ALIGN(&_end));
- mdesc[4].size = memsize - mdesc[4].base;
+ /* this code assumes that PAGE_OFFSET == 0 and PHYS_OFFSET == n*512MB */
+ if ((memsize > 0x20000000) && !PHYS_OFFSET) {
+ /* first 256MB */
+ mdesc[4].type = fw_free;
+ mdesc[4].base = mdesc[0].base + CPHYSADDR(PFN_ALIGN(&_end));
+ mdesc[4].size = mdesc[0].base + 0x10000000 - CPHYSADDR(mdesc[4].base);
+
+ /* I/O hole ... */
+
+ /* the rest of memory (256MB behind hole is lost) */
+ mdesc[5].type = fw_free;
+ mdesc[5].base = mdesc[0].base + 0x20000000;
+ mdesc[5].size = memsize - 0x20000000;
+ } else {
+ /* limit to 256MB, exclude I/O hole */
+ if (!PHYS_OFFSET)
+ memsize = (memsize > 0x10000000)? 0x10000000 : memsize;
+
+ mdesc[4].type = fw_free;
+ mdesc[4].base = mdesc[0].base + CPHYSADDR(PFN_ALIGN(&_end));
+ mdesc[4].size = memsize - CPHYSADDR(mdesc[4].base);
+ }
return &mdesc[0];
}
+#ifdef CONFIG_EVA
+#ifndef CONFIG_EVA_OLD_MALTA_MAP
+void __init prom_mem_check(int niocu)
+{
+ if (!newMapType) {
+ if (niocu && mdesc[5].size) {
+ printk(KERN_WARNING "Malta board has legacy memory map + IOCU, but kernel is configured for new map layout, restrict memsize to 256MB\n");
+ boot_mem_map.nr_map--;
+ }
+ }
+}
+#endif /* !CONFIG_EVA_OLD_MALTA_MAP */
+#endif /* CONFIG_EVA */
+
static int __init fw_memtype_classify(unsigned int type)
{
switch (type) {
@@ -100,15 +185,37 @@
return BOOT_MEM_RAM;
case fw_code:
return BOOT_MEM_ROM_DATA;
+ case fw_inuse:
+ return BOOT_MEM_INUSE;
default:
return BOOT_MEM_RESERVED;
}
}
+fw_memblock_t __init *fw_getmdesc(void)
+{
+ fw_memblock_t *p;
+
+ p = prom_getmdesc();
+ return p;
+}
+
void __init fw_meminit(void)
{
fw_memblock_t *p;
+#ifdef DEBUG
+ pr_debug("YAMON MEMORY DESCRIPTOR dump:\n");
+ p = fw_getmdesc();
+
+ while (p->size) {
+ int i = 0;
+ pr_debug("[%d,%p]: base<%08lx> size<%x> type<%s>\n",
+ i, p, p->base, p->size, mtypes[p->type]);
+ p++;
+ i++;
+ }
+#endif
p = fw_getmdesc();
while (p->size) {
diff --git a/arch/mips/mti-malta/malta-pci.c b/arch/mips/mti-malta/malta-pci.c
index 37134dd..e0c9b59 100644
--- a/arch/mips/mti-malta/malta-pci.c
+++ b/arch/mips/mti-malta/malta-pci.c
@@ -198,11 +198,17 @@
MSC_READ(MSC01_PCI_SC2PMBASL, start);
MSC_READ(MSC01_PCI_SC2PMMSKL, mask);
MSC_READ(MSC01_PCI_SC2PMMAPL, map);
+#if defined(CONFIG_EVA) && !defined(CONFIG_EVA_OLD_MALTA_MAP)
+ /* shift PCI devices to upper 2GB, to prevent PCI bridges loop */
+ map |= 0xa0000000;
+ MSC_WRITE(MSC01_PCI_SC2PMMAPL, map);
+ MSC_READ(MSC01_PCI_SC2PMMAPL, map);
+#endif
msc_mem_resource.start = start & mask;
msc_mem_resource.end = (start & mask) | ~mask;
msc_controller.mem_offset = (start & mask) - (map & mask);
#ifdef CONFIG_MIPS_CMP
- if (gcmp_niocu())
+ if ((!gcmp3_present) && gcmp_niocu())
gcmp_setregion(0, start, mask,
GCMP_GCB_GCMPB_CMDEFTGT_IOCU1);
#endif
@@ -214,7 +220,7 @@
msc_controller.io_offset = 0;
ioport_resource.end = ~mask;
#ifdef CONFIG_MIPS_CMP
- if (gcmp_niocu())
+ if ((!gcmp3_present) && gcmp_niocu())
gcmp_setregion(1, start, mask,
GCMP_GCB_GCMPB_CMDEFTGT_IOCU1);
#endif
diff --git a/arch/mips/mti-malta/malta-platform.c b/arch/mips/mti-malta/malta-platform.c
index 132f866..e1dd1c1 100644
--- a/arch/mips/mti-malta/malta-platform.c
+++ b/arch/mips/mti-malta/malta-platform.c
@@ -47,6 +47,7 @@
static struct plat_serial8250_port uart8250_data[] = {
SMC_PORT(0x3F8, 4),
SMC_PORT(0x2F8, 3),
+#ifndef CONFIG_MIPS_CMP
{
.mapbase = 0x1f000900, /* The CBUS UART */
.irq = MIPS_CPU_IRQ_BASE + MIPSCPU_INT_MB2,
@@ -55,6 +56,7 @@
.flags = CBUS_UART_FLAGS,
.regshift = 3,
},
+#endif
{ },
};
diff --git a/arch/mips/mti-malta/malta-reset.c b/arch/mips/mti-malta/malta-reset.c
index 3294205..d627d4b 100644
--- a/arch/mips/mti-malta/malta-reset.c
+++ b/arch/mips/mti-malta/malta-reset.c
@@ -1,33 +1,18 @@
/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
* Carsten Langgaard, carstenl@mips.com
* Copyright (C) 1999,2000 MIPS Technologies, Inc. All rights reserved.
- *
- * ########################################################################
- *
- * This program is free software; you can distribute it and/or modify it
- * under the terms of the GNU General Public License (Version 2) as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
- *
- * ########################################################################
- *
- * Reset the MIPS boards.
- *
*/
-#include <linux/init.h>
+#include <linux/io.h>
#include <linux/pm.h>
-#include <asm/io.h>
#include <asm/reboot.h>
-#include <asm/mips-boards/generic.h>
+
+#define SOFTRES_REG 0x1f000500
+#define GORESET 0x42
static void mips_machine_restart(char *command)
{
@@ -45,7 +30,6 @@
__raw_writel(GORESET, softres_reg);
}
-
static int __init mips_reboot_setup(void)
{
_machine_restart = mips_machine_restart;
@@ -54,5 +38,4 @@
return 0;
}
-
arch_initcall(mips_reboot_setup);
diff --git a/arch/mips/mti-malta/malta-setup.c b/arch/mips/mti-malta/malta-setup.c
index c72a069..fc34ef5 100644
--- a/arch/mips/mti-malta/malta-setup.c
+++ b/arch/mips/mti-malta/malta-setup.c
@@ -130,8 +130,9 @@
} else if (gcmp_niocu() != 0) {
/* Nothing special needs to be done to enable coherency */
pr_info("CMP IOCU detected\n");
- if ((*(unsigned int *)0xbf403000 & 0x81) != 0x81) {
- pr_crit("IOCU OPERATION DISABLED BY SWITCH - DEFAULTING TO SW IO COHERENCY\n");
+ if ((*(unsigned int *)CKSEG1ADDR(0xbf403000) & 0x81) != 0x81) {
+ pr_crit("IOCU OPERATION DISABLED BY SWITCH"
+ " - DEFAULTING TO SW IO COHERENCY\n");
return 0;
}
supported = 1;
@@ -151,13 +152,17 @@
if (plat_enable_iocoherency()) {
if (coherentio == 0)
pr_info("Hardware DMA cache coherency disabled\n");
- else
+ else {
+ coherentio = 1;
pr_info("Hardware DMA cache coherency enabled\n");
+ }
} else {
if (coherentio == 1)
pr_info("Hardware DMA cache coherency unsupported, but enabled from command line!\n");
- else
+ else {
+ coherentio = 0;
pr_info("Software DMA cache coherency enabled\n");
+ }
}
#else
if (!plat_enable_iocoherency())
@@ -243,10 +248,147 @@
#endif
}
+#ifdef CONFIG_EVA
+extern unsigned int mips_cca;
+
+void __init plat_eva_setup(void)
+{
+ unsigned int val;
+
+#ifdef CONFIG_EVA_OLD_MALTA_MAP
+
+#ifdef CONFIG_EVA_3GB
+ val = ((MIPS_SEGCFG_UK << MIPS_SEGCFG_AM_SHIFT) |
+ (0 << MIPS_SEGCFG_PA_SHIFT) | (2 << MIPS_SEGCFG_C_SHIFT) |
+ (1 << MIPS_SEGCFG_EU_SHIFT));
+ val |= (((MIPS_SEGCFG_MK << MIPS_SEGCFG_AM_SHIFT) |
+ (0 << MIPS_SEGCFG_PA_SHIFT) | (mips_cca << MIPS_SEGCFG_C_SHIFT) |
+ (1 << MIPS_SEGCFG_EU_SHIFT)) << 16);
+ write_c0_segctl0(val);
+
+ val = ((MIPS_SEGCFG_MUSUK << MIPS_SEGCFG_AM_SHIFT) |
+ (0 << MIPS_SEGCFG_PA_SHIFT) | (mips_cca << MIPS_SEGCFG_C_SHIFT) |
+ (1 << MIPS_SEGCFG_EU_SHIFT));
+#else /* !CONFIG_EVA_3G */
+ val = ((MIPS_SEGCFG_MK << MIPS_SEGCFG_AM_SHIFT) |
+ (0 << MIPS_SEGCFG_PA_SHIFT) | (mips_cca << MIPS_SEGCFG_C_SHIFT) |
+ (1 << MIPS_SEGCFG_EU_SHIFT));
+ val |= (((MIPS_SEGCFG_MK << MIPS_SEGCFG_AM_SHIFT) |
+ (0 << MIPS_SEGCFG_PA_SHIFT) | (mips_cca << MIPS_SEGCFG_C_SHIFT) |
+ (1 << MIPS_SEGCFG_EU_SHIFT)) << 16);
+ write_c0_segctl0(val);
+
+ val = ((MIPS_SEGCFG_MUSUK << MIPS_SEGCFG_AM_SHIFT) |
+ (0 << MIPS_SEGCFG_PA_SHIFT) | (2 << MIPS_SEGCFG_C_SHIFT) |
+ (1 << MIPS_SEGCFG_EU_SHIFT));
+#endif /* CONFIG_EVA_3G */
+#ifdef CONFIG_SMP
+ val |= (((MIPS_SEGCFG_MUSUK << MIPS_SEGCFG_AM_SHIFT) |
+ (0 << MIPS_SEGCFG_PA_SHIFT) | (mips_cca << MIPS_SEGCFG_C_SHIFT) |
+ (1 << MIPS_SEGCFG_EU_SHIFT)) << 16);
+#else
+ val |= (((MIPS_SEGCFG_MUSUK << MIPS_SEGCFG_AM_SHIFT) |
+ (4 << MIPS_SEGCFG_PA_SHIFT) | (mips_cca << MIPS_SEGCFG_C_SHIFT) |
+ (1 << MIPS_SEGCFG_EU_SHIFT)) << 16);
+#endif
+ write_c0_segctl1(val);
+
+ val = ((MIPS_SEGCFG_MUSUK << MIPS_SEGCFG_AM_SHIFT) |
+ (6 << MIPS_SEGCFG_PA_SHIFT) | (mips_cca << MIPS_SEGCFG_C_SHIFT) |
+ (1 << MIPS_SEGCFG_EU_SHIFT));
+ val |= (((MIPS_SEGCFG_MUSUK << MIPS_SEGCFG_AM_SHIFT) |
+ (4 << MIPS_SEGCFG_PA_SHIFT) | (mips_cca << MIPS_SEGCFG_C_SHIFT) |
+ (1 << MIPS_SEGCFG_EU_SHIFT)) << 16);
+
+#else /* !CONFIG_EVA_OLD_MALTA_MAP */
+
+#ifdef CONFIG_EVA_3GB
+ val = ((MIPS_SEGCFG_UK << MIPS_SEGCFG_AM_SHIFT) |
+ (0 << MIPS_SEGCFG_PA_SHIFT) | (2 << MIPS_SEGCFG_C_SHIFT) |
+ (1 << MIPS_SEGCFG_EU_SHIFT));
+ val |= (((MIPS_SEGCFG_MK << MIPS_SEGCFG_AM_SHIFT) |
+ (0 << MIPS_SEGCFG_PA_SHIFT) | (mips_cca << MIPS_SEGCFG_C_SHIFT) |
+ (1 << MIPS_SEGCFG_EU_SHIFT)) << 16);
+ write_c0_segctl0(val);
+
+ val = ((MIPS_SEGCFG_MUSUK << MIPS_SEGCFG_AM_SHIFT) |
+ (6 << MIPS_SEGCFG_PA_SHIFT) | (mips_cca << MIPS_SEGCFG_C_SHIFT) |
+ (1 << MIPS_SEGCFG_EU_SHIFT));
+ val |= (((MIPS_SEGCFG_MUSUK << MIPS_SEGCFG_AM_SHIFT) |
+ (5 << MIPS_SEGCFG_PA_SHIFT) | (mips_cca << MIPS_SEGCFG_C_SHIFT) |
+ (1 << MIPS_SEGCFG_EU_SHIFT)) << 16);
+ write_c0_segctl1(val);
+
+ val = ((MIPS_SEGCFG_MUSUK << MIPS_SEGCFG_AM_SHIFT) |
+ (3 << MIPS_SEGCFG_PA_SHIFT) | (mips_cca << MIPS_SEGCFG_C_SHIFT) |
+ (1 << MIPS_SEGCFG_EU_SHIFT));
+ val |= (((MIPS_SEGCFG_MUSUK << MIPS_SEGCFG_AM_SHIFT) |
+ (1 << MIPS_SEGCFG_PA_SHIFT) | (mips_cca << MIPS_SEGCFG_C_SHIFT) |
+ (1 << MIPS_SEGCFG_EU_SHIFT)) << 16);
+#else /* !CONFIG_EVA_3G */
+ val = ((MIPS_SEGCFG_MK << MIPS_SEGCFG_AM_SHIFT) |
+ (0 << MIPS_SEGCFG_PA_SHIFT) | (mips_cca << MIPS_SEGCFG_C_SHIFT) |
+ (1 << MIPS_SEGCFG_EU_SHIFT));
+ val |= (((MIPS_SEGCFG_MK << MIPS_SEGCFG_AM_SHIFT) |
+ (0 << MIPS_SEGCFG_PA_SHIFT) | (mips_cca << MIPS_SEGCFG_C_SHIFT) |
+ (1 << MIPS_SEGCFG_EU_SHIFT)) << 16);
+ write_c0_segctl0(val);
+
+ val = ((MIPS_SEGCFG_MUSUK << MIPS_SEGCFG_AM_SHIFT) |
+ (0 << MIPS_SEGCFG_PA_SHIFT) | (2 << MIPS_SEGCFG_C_SHIFT) |
+ (1 << MIPS_SEGCFG_EU_SHIFT));
+ val |= (((MIPS_SEGCFG_MUSUK << MIPS_SEGCFG_AM_SHIFT) |
+ (0 << MIPS_SEGCFG_PA_SHIFT) | (mips_cca << MIPS_SEGCFG_C_SHIFT) |
+ (1 << MIPS_SEGCFG_EU_SHIFT)) << 16);
+ write_c0_segctl1(val);
+
+ val = ((MIPS_SEGCFG_MUSUK << MIPS_SEGCFG_AM_SHIFT) |
+ (2 << MIPS_SEGCFG_PA_SHIFT) | (mips_cca << MIPS_SEGCFG_C_SHIFT) |
+ (1 << MIPS_SEGCFG_EU_SHIFT));
+ val |= (((MIPS_SEGCFG_MUSUK << MIPS_SEGCFG_AM_SHIFT) |
+ (0 << MIPS_SEGCFG_PA_SHIFT) | (mips_cca << MIPS_SEGCFG_C_SHIFT) |
+ (1 << MIPS_SEGCFG_EU_SHIFT)) << 16);
+#endif /* CONFIG_EVA_3G */
+
+#endif /* CONFIG_EVA_OLD_MALTA_MAP */
+
+ write_c0_segctl2(val);
+ back_to_back_c0_hazard();
+
+ val = read_c0_config5();
+ write_c0_config5(val|MIPS_CONF5_K|MIPS_CONF5_CV|MIPS_CONF5_EVA);
+ back_to_back_c0_hazard();
+
+ printk("Enhanced Virtual Addressing (EVA) active\n");
+}
+
+extern int gcmp_present;
+void BEV_overlay_segment(void);
+#endif
+
void __init plat_mem_setup(void)
{
unsigned int i;
+#ifdef CONFIG_CPU_MIPSR6
+ pr_info("BEVVA = %lx\n", read_c0_bevva());
+#endif
+
+#ifdef CONFIG_EVA
+#ifdef CONFIG_MIPS_CMP
+ if (gcmp_present)
+ BEV_overlay_segment();
+#endif
+
+ if ((cpu_has_segments) && (cpu_has_eva))
+ plat_eva_setup();
+ else {
+ printk("cpu_has_segments=%ld cpu_has_eva=%ld\n",cpu_has_segments,cpu_has_eva);
+ printk("Kernel is built for EVA support but EVA or segment control registers are not found\n");
+ panic("EVA absent");
+ }
+#endif
+
mips_pcibios_init();
/* Request I/O space for devices used on the Malta board. */
diff --git a/arch/mips/mti-sead3/sead3.dts b/arch/mips/mti-sead3/sead3.dts
index 658f437..e4b317d 100644
--- a/arch/mips/mti-sead3/sead3.dts
+++ b/arch/mips/mti-sead3/sead3.dts
@@ -15,10 +15,6 @@
};
};
- chosen {
- bootargs = "console=ttyS1,38400 rootdelay=10 root=/dev/sda3";
- };
-
memory {
device_type = "memory";
reg = <0x0 0x08000000>;
diff --git a/arch/mips/oprofile/common.c b/arch/mips/oprofile/common.c
index af763e8..aa14b66 100644
--- a/arch/mips/oprofile/common.c
+++ b/arch/mips/oprofile/common.c
@@ -86,12 +86,15 @@
case CPU_1004K:
case CPU_74K:
case CPU_LOONGSON1:
+ case CPU_INTERAPTIV:
case CPU_SB1:
case CPU_SB1A:
case CPU_R10000:
case CPU_R12000:
case CPU_R14000:
case CPU_XLR:
+ case CPU_VIRTUOSO:
+ case CPU_P5600:
lmodel = &op_model_mipsxx_ops;
break;
diff --git a/arch/mips/oprofile/op_model_mipsxx.c b/arch/mips/oprofile/op_model_mipsxx.c
index e4b1140..aaf642a 100644
--- a/arch/mips/oprofile/op_model_mipsxx.c
+++ b/arch/mips/oprofile/op_model_mipsxx.c
@@ -244,7 +244,7 @@
unsigned int counter;
int handled = IRQ_NONE;
- if (cpu_has_mips_r2 && !(read_c0_cause() & (1 << 26)))
+ if ((cpu_has_mips_r2 || cpu_has_mips_r6) && !(read_c0_cause() & (1 << 26)))
return handled;
switch (counters) {
@@ -376,6 +376,22 @@
op_model_mipsxx_ops.cpu_type = "mips/74K";
break;
+ case CPU_PROAPTIV:
+ op_model_mipsxx_ops.cpu_type = "mips/proAptiv";
+ break;
+
+ case CPU_INTERAPTIV:
+ op_model_mipsxx_ops.cpu_type = "mips/interAptiv";
+ break;
+
+ case CPU_P5600:
+ op_model_mipsxx_ops.cpu_type = "mips/P5600";
+ break;
+
+ case CPU_VIRTUOSO:
+ op_model_mipsxx_ops.cpu_type = "mips/Virtuoso";
+ break;
+
case CPU_5KC:
op_model_mipsxx_ops.cpu_type = "mips/5K";
break;
diff --git a/arch/openrisc/kernel/prom.c b/arch/openrisc/kernel/prom.c
index 5869e3f..150215a 100644
--- a/arch/openrisc/kernel/prom.c
+++ b/arch/openrisc/kernel/prom.c
@@ -96,8 +96,7 @@
}
#ifdef CONFIG_BLK_DEV_INITRD
-void __init early_init_dt_setup_initrd_arch(unsigned long start,
- unsigned long end)
+void __init early_init_dt_setup_initrd_arch(u64 start, u64 end)
{
initrd_start = (unsigned long)__va(start);
initrd_end = (unsigned long)__va(end);
diff --git a/arch/powerpc/include/asm/prom.h b/arch/powerpc/include/asm/prom.h
index bc2da15..ac204e0 100644
--- a/arch/powerpc/include/asm/prom.h
+++ b/arch/powerpc/include/asm/prom.h
@@ -43,9 +43,6 @@
extern void kdump_move_device_tree(void);
-/* CPU OF node matching */
-struct device_node *of_get_cpu_node(int cpu, unsigned int *thread);
-
/* cache lookup */
struct device_node *of_find_next_cache_node(struct device_node *np);
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index 8b6f7a9..eb4de78 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -550,8 +550,7 @@
}
#ifdef CONFIG_BLK_DEV_INITRD
-void __init early_init_dt_setup_initrd_arch(unsigned long start,
- unsigned long end)
+void __init early_init_dt_setup_initrd_arch(u64 start, u64 end)
{
initrd_start = (unsigned long)__va(start);
initrd_end = (unsigned long)__va(end);
@@ -827,49 +826,10 @@
__initcall(prom_reconfig_setup);
#endif
-/* Find the device node for a given logical cpu number, also returns the cpu
- * local thread number (index in ibm,interrupt-server#s) if relevant and
- * asked for (non NULL)
- */
-struct device_node *of_get_cpu_node(int cpu, unsigned int *thread)
+bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
{
- int hardid;
- struct device_node *np;
-
- hardid = get_hard_smp_processor_id(cpu);
-
- for_each_node_by_type(np, "cpu") {
- const u32 *intserv;
- unsigned int plen, t;
-
- /* Check for ibm,ppc-interrupt-server#s. If it doesn't exist
- * fallback to "reg" property and assume no threads
- */
- intserv = of_get_property(np, "ibm,ppc-interrupt-server#s",
- &plen);
- if (intserv == NULL) {
- const u32 *reg = of_get_property(np, "reg", NULL);
- if (reg == NULL)
- continue;
- if (*reg == hardid) {
- if (thread)
- *thread = 0;
- return np;
- }
- } else {
- plen /= sizeof(u32);
- for (t = 0; t < plen; t++) {
- if (hardid == intserv[t]) {
- if (thread)
- *thread = t;
- return np;
- }
- }
- }
- }
- return NULL;
+ return (int)phys_id == get_hard_smp_processor_id(cpu);
}
-EXPORT_SYMBOL(of_get_cpu_node);
#if defined(CONFIG_DEBUG_FS) && defined(DEBUG)
static struct debugfs_blob_wrapper flat_dt_blob;
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index fe120da..552edaf 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -37,7 +37,7 @@
select ARCH_WANT_OPTIONAL_GPIOLIB
select ARCH_WANT_FRAME_POINTERS
select HAVE_DMA_ATTRS
- select HAVE_DMA_CONTIGUOUS if !SWIOTLB
+ select HAVE_DMA_CONTIGUOUS
select HAVE_KRETPROBES
select HAVE_OPTPROBES
select HAVE_KPROBES_ON_FTRACE
@@ -207,6 +207,12 @@
config ARCH_SUSPEND_POSSIBLE
def_bool y
+config ARCH_WANT_HUGE_PMD_SHARE
+ def_bool y
+
+config ARCH_WANT_GENERAL_HUGETLB
+ def_bool y
+
config ZONE_DMA32
bool
default X86_64
@@ -321,10 +327,6 @@
---help---
This option is needed for the systems that have more than 8 CPUs
-config GOLDFISH
- def_bool y
- depends on X86_GOLDFISH
-
if X86_32
config X86_EXTENDED_PLATFORM
bool "Support for extended (non-PC) x86 platforms"
@@ -407,14 +409,6 @@
# Following is an alphabetically sorted list of 32 bit extended platforms
# Please maintain the alphabetic order if and when there are additions
-config X86_GOLDFISH
- bool "Goldfish (Virtual Platform)"
- depends on X86_32
- ---help---
- Enable support for the Goldfish virtual platform used primarily
- for Android development. Unless you are building for the Android
- Goldfish emulator say N here.
-
config X86_INTEL_CE
bool "CE4100 TV platform"
depends on PCI
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index 5c47726..01acb4a 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -58,7 +58,9 @@
KBUILD_CFLAGS += -m64
# Use -mpreferred-stack-boundary=3 if supported.
- KBUILD_CFLAGS += $(call cc-option,-mno-sse -mpreferred-stack-boundary=3)
+ KBUILD_CFLAGS += $(call cc-option,-mpreferred-stack-boundary=3)
+
+ KBUILD_CFLAGS += -fno-pic
# FIXME - should be integrated in Makefile.cpu (Makefile_32.cpu)
cflags-$(CONFIG_MK8) += $(call cc-option,-march=k8)
@@ -141,7 +143,7 @@
#
KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
# prevent gcc from generating any FP code by mistake
-KBUILD_CFLAGS += $(call cc-option,-mno-sse -mno-mmx -mno-sse2 -mno-3dnow,)
+KBUILD_CFLAGS += -mno-sse -mno-mmx -mno-sse2 -mno-3dnow
KBUILD_CFLAGS += $(call cc-option,-mno-avx,)
KBUILD_CFLAGS += $(mflags-y)
diff --git a/arch/x86/Makefile_32.cpu b/arch/x86/Makefile_32.cpu
index 6647ed4..ea4c7eb 100644
--- a/arch/x86/Makefile_32.cpu
+++ b/arch/x86/Makefile_32.cpu
@@ -45,6 +45,9 @@
# cpu entries
cflags-$(CONFIG_X86_GENERIC) += $(call tune,generic,$(call tune,i686))
+# Android qemu support
+cflags-$(CONFIG_X86_GOLDFISH) += -mfpmath=387
+
# Work around the pentium-mmx code generator madness of gcc4.4.x which
# does stack alignment by generating horrible code _before_ the mcount
# prologue (push %ebp, mov %esp, %ebp) which breaks the function graph
diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
index 379814b..6cf0111 100644
--- a/arch/x86/boot/Makefile
+++ b/arch/x86/boot/Makefile
@@ -53,18 +53,18 @@
# How to compile the 16-bit code. Note we always compile for -march=i386,
# that way we can complain to the user if the CPU is insufficient.
-KBUILD_CFLAGS := $(USERINCLUDE) -g -Os -D_SETUP -D__KERNEL__ \
+KBUILD_CFLAGS := $(USERINCLUDE) -m32 -g -Os -D_SETUP -D__KERNEL__ \
-DDISABLE_BRANCH_PROFILING \
-Wall -Wstrict-prototypes \
-march=i386 -mregparm=3 \
-include $(srctree)/$(src)/code16gcc.h \
-fno-strict-aliasing -fomit-frame-pointer -fno-pic \
+ -mno-mmx -mno-sse \
$(call cc-option, -ffreestanding) \
$(call cc-option, -fno-toplevel-reorder,\
- $(call cc-option, -fno-unit-at-a-time)) \
+ $(call cc-option, -fno-unit-at-a-time)) \
$(call cc-option, -fno-stack-protector) \
$(call cc-option, -mpreferred-stack-boundary=2)
-KBUILD_CFLAGS += $(call cc-option, -m32)
KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
GCOV_PROFILE := n
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
index 5ef205c..7194d9f 100644
--- a/arch/x86/boot/compressed/Makefile
+++ b/arch/x86/boot/compressed/Makefile
@@ -12,6 +12,7 @@
cflags-$(CONFIG_X86_32) := -march=i386
cflags-$(CONFIG_X86_64) := -mcmodel=small
KBUILD_CFLAGS += $(cflags-y)
+KBUILD_CFLAGS += -mno-mmx -mno-sse
KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
diff --git a/arch/x86/configs/i386_emu_defconfig b/arch/x86/configs/i386_emu_defconfig
new file mode 100644
index 0000000..72e95ba
--- /dev/null
+++ b/arch/x86/configs/i386_emu_defconfig
@@ -0,0 +1,370 @@
+# CONFIG_64BIT is not set
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_AUDIT=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_DELAY_ACCT=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_CGROUPS=y
+CONFIG_CGROUP_DEBUG=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_RESOURCE_COUNTERS=y
+CONFIG_CGROUP_SCHED=y
+CONFIG_RT_GROUP_SCHED=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_EMBEDDED=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_OSF_PARTITION=y
+CONFIG_AMIGA_PARTITION=y
+CONFIG_MAC_PARTITION=y
+CONFIG_BSD_DISKLABEL=y
+CONFIG_MINIX_SUBPARTITION=y
+CONFIG_SOLARIS_X86_PARTITION=y
+CONFIG_UNIXWARE_DISKLABEL=y
+CONFIG_SGI_PARTITION=y
+CONFIG_SUN_PARTITION=y
+CONFIG_KARMA_PARTITION=y
+CONFIG_MCORE2=y
+CONFIG_X86_GENERIC=y
+CONFIG_HPET_TIMER=y
+CONFIG_PREEMPT=y
+# CONFIG_X86_MCE is not set
+CONFIG_X86_REBOOTFIXUPS=y
+CONFIG_X86_MSR=y
+CONFIG_X86_CPUID=y
+CONFIG_NOHIGHMEM=y
+CONFIG_CMA=y
+# CONFIG_MTRR_SANITIZER is not set
+CONFIG_HZ_100=y
+CONFIG_PHYSICAL_START=0x100000
+# CONFIG_RELOCATABLE is not set
+CONFIG_PHYSICAL_ALIGN=0x200000
+# CONFIG_COMPAT_VDSO is not set
+CONFIG_PM_AUTOSLEEP=y
+CONFIG_PM_WAKELOCKS=y
+CONFIG_CPU_FREQ=y
+# CONFIG_CPU_FREQ_STAT is not set
+CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_PCIEPORTBUS=y
+# CONFIG_PCIEASPM is not set
+CONFIG_PCCARD=y
+CONFIG_YENTA=y
+CONFIG_HOTPLUG_PCI=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_BINFMT_MISC=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=y
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
+CONFIG_IP_MROUTE=y
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
+CONFIG_SYN_COOKIES=y
+CONFIG_INET_ESP=y
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_LRO is not set
+# CONFIG_INET_DIAG is not set
+CONFIG_IPV6_PRIVACY=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_IPV6_OPTIMISTIC_DAD=y
+CONFIG_INET6_AH=y
+CONFIG_INET6_ESP=y
+CONFIG_INET6_IPCOMP=y
+CONFIG_IPV6_MIP6=y
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_NETLABEL=y
+CONFIG_NETFILTER=y
+CONFIG_NF_CONNTRACK=y
+CONFIG_NF_CONNTRACK_SECMARK=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CT_PROTO_DCCP=y
+CONFIG_NF_CT_PROTO_SCTP=y
+CONFIG_NF_CT_PROTO_UDPLITE=y
+CONFIG_NF_CONNTRACK_AMANDA=y
+CONFIG_NF_CONNTRACK_FTP=y
+CONFIG_NF_CONNTRACK_H323=y
+CONFIG_NF_CONNTRACK_IRC=y
+CONFIG_NF_CONNTRACK_NETBIOS_NS=y
+CONFIG_NF_CONNTRACK_PPTP=y
+CONFIG_NF_CONNTRACK_SANE=y
+CONFIG_NF_CONNTRACK_TFTP=y
+CONFIG_NF_CT_NETLINK=y
+CONFIG_NETFILTER_TPROXY=y
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
+CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y
+CONFIG_NETFILTER_XT_TARGET_MARK=y
+CONFIG_NETFILTER_XT_TARGET_NFLOG=y
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
+CONFIG_NETFILTER_XT_TARGET_TPROXY=y
+CONFIG_NETFILTER_XT_TARGET_TRACE=y
+CONFIG_NETFILTER_XT_TARGET_SECMARK=y
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
+CONFIG_NETFILTER_XT_MATCH_COMMENT=y
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_HELPER=y
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
+CONFIG_NETFILTER_XT_MATCH_LENGTH=y
+CONFIG_NETFILTER_XT_MATCH_LIMIT=y
+CONFIG_NETFILTER_XT_MATCH_MAC=y
+CONFIG_NETFILTER_XT_MATCH_MARK=y
+CONFIG_NETFILTER_XT_MATCH_POLICY=y
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
+CONFIG_NETFILTER_XT_MATCH_QTAGUID=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA2=y
+CONFIG_NETFILTER_XT_MATCH_SOCKET=y
+CONFIG_NETFILTER_XT_MATCH_STATE=y
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=y
+CONFIG_NETFILTER_XT_MATCH_STRING=y
+CONFIG_NETFILTER_XT_MATCH_TIME=y
+CONFIG_NETFILTER_XT_MATCH_U32=y
+CONFIG_NF_CONNTRACK_IPV4=y
+CONFIG_IP_NF_IPTABLES=y
+CONFIG_IP_NF_MATCH_AH=y
+CONFIG_IP_NF_MATCH_ECN=y
+CONFIG_IP_NF_MATCH_TTL=y
+CONFIG_IP_NF_FILTER=y
+CONFIG_IP_NF_TARGET_REJECT=y
+CONFIG_IP_NF_TARGET_REJECT_SKERR=y
+CONFIG_IP_NF_TARGET_ULOG=y
+CONFIG_IP_NF_MANGLE=y
+CONFIG_IP_NF_RAW=y
+CONFIG_IP_NF_SECURITY=y
+CONFIG_IP_NF_ARPTABLES=y
+CONFIG_IP_NF_ARPFILTER=y
+CONFIG_IP_NF_ARP_MANGLE=y
+CONFIG_NF_CONNTRACK_IPV6=y
+CONFIG_IP6_NF_IPTABLES=y
+CONFIG_IP6_NF_FILTER=y
+CONFIG_IP6_NF_TARGET_REJECT=y
+CONFIG_IP6_NF_TARGET_REJECT_SKERR=y
+CONFIG_IP6_NF_MANGLE=y
+CONFIG_IP6_NF_RAW=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_HTB=y
+CONFIG_NET_CLS_U32=y
+CONFIG_NET_EMATCH=y
+CONFIG_NET_EMATCH_U32=y
+CONFIG_NET_CLS_ACT=y
+CONFIG_CFG80211=y
+CONFIG_MAC80211=y
+CONFIG_MAC80211_LEDS=y
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DMA_CMA=y
+CONFIG_CONNECTOR=y
+CONFIG_MTD=y
+CONFIG_MTD_BLOCK=y
+CONFIG_NFTL=y
+CONFIG_MTD_NAND=y
+CONFIG_MTD_NAND_PLATFORM=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=16384
+CONFIG_BLK_DEV_SD=y
+CONFIG_BLK_DEV_SR=y
+CONFIG_BLK_DEV_SR_VENDOR=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_SPI_ATTRS=y
+CONFIG_SCSI_ISCSI_ATTRS=y
+# CONFIG_SCSI_LOWLEVEL is not set
+CONFIG_ATA=y
+CONFIG_SATA_AHCI=y
+CONFIG_ATA_PIIX=y
+CONFIG_PATA_AMD=y
+CONFIG_PATA_OLDPIIX=y
+CONFIG_PATA_SCH=y
+CONFIG_PATA_MPIIX=y
+CONFIG_ATA_GENERIC=y
+CONFIG_MD=y
+CONFIG_BLK_DEV_MD=y
+CONFIG_BLK_DEV_DM=y
+CONFIG_DM_DEBUG=y
+CONFIG_DM_CRYPT=y
+CONFIG_DM_MIRROR=y
+CONFIG_DM_ZERO=y
+CONFIG_DM_UEVENT=y
+CONFIG_DM_VERITY=y
+CONFIG_NETDEVICES=y
+CONFIG_NETCONSOLE=y
+CONFIG_TUN=y
+CONFIG_BNX2=y
+CONFIG_TIGON3=y
+CONFIG_NET_TULIP=y
+CONFIG_E100=y
+CONFIG_E1000=y
+CONFIG_E1000E=y
+CONFIG_SKY2=y
+CONFIG_NE2K_PCI=y
+CONFIG_FORCEDETH=y
+CONFIG_8139TOO=y
+# CONFIG_8139TOO_PIO is not set
+CONFIG_R8169=y
+CONFIG_FDDI=y
+CONFIG_PPP=y
+CONFIG_PPP_BSDCOMP=y
+CONFIG_PPP_DEFLATE=y
+CONFIG_PPP_MPPE=y
+CONFIG_PPPOLAC=y
+CONFIG_PPPOPNS=y
+CONFIG_INPUT_POLLDEV=y
+# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
+CONFIG_INPUT_EVDEV=y
+# CONFIG_KEYBOARD_ATKBD is not set
+CONFIG_KEYBOARD_GOLDFISH_EVENTS=y
+# CONFIG_INPUT_MOUSE is not set
+CONFIG_INPUT_JOYSTICK=y
+CONFIG_INPUT_TABLET=y
+CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_INPUT_MISC=y
+# CONFIG_SERIO is not set
+CONFIG_VT_HW_CONSOLE_BINDING=y
+# CONFIG_LEGACY_PTYS is not set
+CONFIG_SERIAL_NONSTANDARD=y
+CONFIG_GOLDFISH_TTY=y
+# CONFIG_DEVMEM is not set
+# CONFIG_DEVKMEM is not set
+CONFIG_NVRAM=y
+CONFIG_I2C_I801=y
+CONFIG_BATTERY_GOLDFISH=y
+CONFIG_WATCHDOG=y
+CONFIG_AGP=y
+CONFIG_AGP_AMD64=y
+CONFIG_AGP_INTEL=y
+CONFIG_DRM=y
+CONFIG_FB=y
+CONFIG_FB_MODE_HELPERS=y
+CONFIG_FB_TILEBLITTING=y
+CONFIG_FB_GOLDFISH=y
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+# CONFIG_LCD_CLASS_DEVICE is not set
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_HIDRAW=y
+CONFIG_HID_A4TECH=y
+CONFIG_HID_APPLE=y
+CONFIG_HID_BELKIN=y
+CONFIG_HID_CHERRY=y
+CONFIG_HID_CHICONY=y
+CONFIG_HID_CYPRESS=y
+CONFIG_HID_EZKEY=y
+CONFIG_HID_GYRATION=y
+CONFIG_HID_LOGITECH=y
+CONFIG_LOGITECH_FF=y
+CONFIG_HID_MICROSOFT=y
+CONFIG_HID_MONTEREY=y
+CONFIG_HID_NTRIG=y
+CONFIG_HID_PANTHERLORD=y
+CONFIG_PANTHERLORD_FF=y
+CONFIG_HID_PETALYNX=y
+CONFIG_HID_SAMSUNG=y
+CONFIG_HID_SONY=y
+CONFIG_HID_SUNPLUS=y
+CONFIG_HID_TOPSEED=y
+CONFIG_HID_PID=y
+CONFIG_USB_HIDDEV=y
+CONFIG_USB=y
+CONFIG_USB_DEBUG=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+CONFIG_USB_MON=y
+CONFIG_USB_EHCI_HCD=y
+# CONFIG_USB_EHCI_TT_NEWSCHED is not set
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_UHCI_HCD=y
+CONFIG_USB_PRINTER=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_OTG_WAKELOCK=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_G_ANDROID=y
+CONFIG_MMC=y
+CONFIG_MMC_GOLDFISH=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_SWITCH=y
+CONFIG_EDAC=y
+CONFIG_RTC_CLASS=y
+# CONFIG_RTC_HCTOSYS is not set
+CONFIG_DMADEVICES=y
+CONFIG_STAGING=y
+CONFIG_ANDROID=y
+CONFIG_ASHMEM=y
+CONFIG_ANDROID_LOW_MEMORY_KILLER=y
+CONFIG_SYNC=y
+CONFIG_GOLDFISH_AUDIO=y
+CONFIG_MTD_GOLDFISH_NAND=y
+CONFIG_GOLDFISH=y
+CONFIG_GOLDFISH_BUS=y
+CONFIG_GOLDFISH_PIPE=y
+CONFIG_ANDROID_BINDER_IPC=y
+CONFIG_ISCSI_IBFT_FIND=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_QUOTA=y
+CONFIG_QUOTA_NETLINK_INTERFACE=y
+# CONFIG_PRINT_QUOTA_WARNING is not set
+CONFIG_FUSE_FS=y
+CONFIG_ISO9660_FS=y
+CONFIG_JOLIET=y
+CONFIG_ZISOFS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_HUGETLBFS=y
+# CONFIG_NETWORK_FILESYSTEMS is not set
+CONFIG_NLS_DEFAULT="utf8"
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ASCII=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_NLS_UTF8=y
+CONFIG_PRINTK_TIME=y
+# CONFIG_ENABLE_WARN_DEPRECATED is not set
+# CONFIG_ENABLE_MUST_CHECK is not set
+CONFIG_FRAME_WARN=2048
+CONFIG_MAGIC_SYSRQ=y
+# CONFIG_UNUSED_SYMBOLS is not set
+CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_MEMORY_INIT=y
+CONFIG_SCHED_TRACER=y
+CONFIG_BLK_DEV_IO_TRACE=y
+CONFIG_PROVIDE_OHCI1394_DMA_INIT=y
+CONFIG_KEYS=y
+CONFIG_KEYS_DEBUG_PROC_KEYS=y
+CONFIG_SECURITY=y
+CONFIG_SECURITY_NETWORK=y
+CONFIG_SECURITY_SELINUX=y
+CONFIG_CRYPTO_SHA256=y
+CONFIG_CRYPTO_AES_586=y
+CONFIG_CRYPTO_TWOFISH=y
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+# CONFIG_VIRTUALIZATION is not set
+CONFIG_CRC_T10DIF=y
diff --git a/arch/x86/configs/i386_ranchu_defconfig b/arch/x86/configs/i386_ranchu_defconfig
new file mode 100644
index 0000000..2101ce7
--- /dev/null
+++ b/arch/x86/configs/i386_ranchu_defconfig
@@ -0,0 +1,371 @@
+# CONFIG_64BIT is not set
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_AUDIT=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_DELAY_ACCT=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_CGROUPS=y
+CONFIG_CGROUP_DEBUG=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_RESOURCE_COUNTERS=y
+CONFIG_CGROUP_SCHED=y
+CONFIG_RT_GROUP_SCHED=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_EMBEDDED=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_OSF_PARTITION=y
+CONFIG_AMIGA_PARTITION=y
+CONFIG_MAC_PARTITION=y
+CONFIG_BSD_DISKLABEL=y
+CONFIG_MINIX_SUBPARTITION=y
+CONFIG_SOLARIS_X86_PARTITION=y
+CONFIG_UNIXWARE_DISKLABEL=y
+CONFIG_SGI_PARTITION=y
+CONFIG_SUN_PARTITION=y
+CONFIG_KARMA_PARTITION=y
+CONFIG_SMP=y
+CONFIG_X86_BIGSMP=y
+CONFIG_MCORE2=y
+CONFIG_X86_GENERIC=y
+CONFIG_HPET_TIMER=y
+CONFIG_NR_CPUS=512
+CONFIG_PREEMPT=y
+# CONFIG_X86_MCE is not set
+CONFIG_X86_REBOOTFIXUPS=y
+CONFIG_X86_MSR=y
+CONFIG_X86_CPUID=y
+CONFIG_NOHIGHMEM=y
+CONFIG_CMA=y
+# CONFIG_MTRR_SANITIZER is not set
+CONFIG_EFI=y
+CONFIG_EFI_STUB=y
+CONFIG_HZ_100=y
+CONFIG_PHYSICAL_START=0x100000
+CONFIG_PHYSICAL_ALIGN=0x200000
+# CONFIG_COMPAT_VDSO is not set
+CONFIG_PM_AUTOSLEEP=y
+CONFIG_PM_WAKELOCKS=y
+CONFIG_CPU_FREQ=y
+# CONFIG_CPU_FREQ_STAT is not set
+CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_PCIEPORTBUS=y
+# CONFIG_PCIEASPM is not set
+CONFIG_PCCARD=y
+CONFIG_YENTA=y
+CONFIG_HOTPLUG_PCI=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_BINFMT_MISC=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=y
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
+CONFIG_IP_MROUTE=y
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
+CONFIG_SYN_COOKIES=y
+CONFIG_INET_ESP=y
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_LRO is not set
+# CONFIG_INET_DIAG is not set
+CONFIG_IPV6_PRIVACY=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_IPV6_OPTIMISTIC_DAD=y
+CONFIG_INET6_AH=y
+CONFIG_INET6_ESP=y
+CONFIG_INET6_IPCOMP=y
+CONFIG_IPV6_MIP6=y
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_NETLABEL=y
+CONFIG_NETFILTER=y
+CONFIG_NF_CONNTRACK=y
+CONFIG_NF_CONNTRACK_SECMARK=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CT_PROTO_DCCP=y
+CONFIG_NF_CT_PROTO_SCTP=y
+CONFIG_NF_CT_PROTO_UDPLITE=y
+CONFIG_NF_CONNTRACK_AMANDA=y
+CONFIG_NF_CONNTRACK_FTP=y
+CONFIG_NF_CONNTRACK_H323=y
+CONFIG_NF_CONNTRACK_IRC=y
+CONFIG_NF_CONNTRACK_NETBIOS_NS=y
+CONFIG_NF_CONNTRACK_PPTP=y
+CONFIG_NF_CONNTRACK_SANE=y
+CONFIG_NF_CONNTRACK_TFTP=y
+CONFIG_NF_CT_NETLINK=y
+CONFIG_NETFILTER_TPROXY=y
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
+CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y
+CONFIG_NETFILTER_XT_TARGET_MARK=y
+CONFIG_NETFILTER_XT_TARGET_NFLOG=y
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
+CONFIG_NETFILTER_XT_TARGET_TPROXY=y
+CONFIG_NETFILTER_XT_TARGET_TRACE=y
+CONFIG_NETFILTER_XT_TARGET_SECMARK=y
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
+CONFIG_NETFILTER_XT_MATCH_COMMENT=y
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_HELPER=y
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
+CONFIG_NETFILTER_XT_MATCH_LENGTH=y
+CONFIG_NETFILTER_XT_MATCH_LIMIT=y
+CONFIG_NETFILTER_XT_MATCH_MAC=y
+CONFIG_NETFILTER_XT_MATCH_MARK=y
+CONFIG_NETFILTER_XT_MATCH_POLICY=y
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
+CONFIG_NETFILTER_XT_MATCH_QTAGUID=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA2=y
+CONFIG_NETFILTER_XT_MATCH_SOCKET=y
+CONFIG_NETFILTER_XT_MATCH_STATE=y
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=y
+CONFIG_NETFILTER_XT_MATCH_STRING=y
+CONFIG_NETFILTER_XT_MATCH_TIME=y
+CONFIG_NETFILTER_XT_MATCH_U32=y
+CONFIG_NF_CONNTRACK_IPV4=y
+CONFIG_IP_NF_IPTABLES=y
+CONFIG_IP_NF_MATCH_AH=y
+CONFIG_IP_NF_MATCH_ECN=y
+CONFIG_IP_NF_MATCH_TTL=y
+CONFIG_IP_NF_FILTER=y
+CONFIG_IP_NF_TARGET_REJECT=y
+CONFIG_IP_NF_TARGET_REJECT_SKERR=y
+CONFIG_IP_NF_TARGET_ULOG=y
+CONFIG_IP_NF_MANGLE=y
+CONFIG_IP_NF_RAW=y
+CONFIG_IP_NF_SECURITY=y
+CONFIG_IP_NF_ARPTABLES=y
+CONFIG_IP_NF_ARPFILTER=y
+CONFIG_IP_NF_ARP_MANGLE=y
+CONFIG_NF_CONNTRACK_IPV6=y
+CONFIG_IP6_NF_IPTABLES=y
+CONFIG_IP6_NF_FILTER=y
+CONFIG_IP6_NF_TARGET_REJECT=y
+CONFIG_IP6_NF_TARGET_REJECT_SKERR=y
+CONFIG_IP6_NF_MANGLE=y
+CONFIG_IP6_NF_RAW=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_HTB=y
+CONFIG_NET_CLS_U32=y
+CONFIG_NET_EMATCH=y
+CONFIG_NET_EMATCH_U32=y
+CONFIG_NET_CLS_ACT=y
+CONFIG_CFG80211=y
+CONFIG_MAC80211=y
+CONFIG_MAC80211_LEDS=y
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DMA_CMA=y
+CONFIG_CONNECTOR=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=16384
+CONFIG_VIRTIO_BLK=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_BLK_DEV_SR=y
+CONFIG_BLK_DEV_SR_VENDOR=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_SPI_ATTRS=y
+CONFIG_SCSI_ISCSI_ATTRS=y
+# CONFIG_SCSI_LOWLEVEL is not set
+CONFIG_ATA=y
+CONFIG_SATA_AHCI=y
+CONFIG_ATA_PIIX=y
+CONFIG_PATA_AMD=y
+CONFIG_PATA_OLDPIIX=y
+CONFIG_PATA_SCH=y
+CONFIG_PATA_MPIIX=y
+CONFIG_ATA_GENERIC=y
+CONFIG_MD=y
+CONFIG_BLK_DEV_MD=y
+CONFIG_BLK_DEV_DM=y
+CONFIG_DM_DEBUG=y
+CONFIG_DM_CRYPT=y
+CONFIG_DM_MIRROR=y
+CONFIG_DM_ZERO=y
+CONFIG_DM_UEVENT=y
+CONFIG_DM_VERITY=y
+CONFIG_NETDEVICES=y
+CONFIG_NETCONSOLE=y
+CONFIG_TUN=y
+CONFIG_VIRTIO_NET=y
+CONFIG_BNX2=y
+CONFIG_TIGON3=y
+CONFIG_NET_TULIP=y
+CONFIG_E100=y
+CONFIG_E1000=y
+CONFIG_E1000E=y
+CONFIG_SKY2=y
+CONFIG_NE2K_PCI=y
+CONFIG_FORCEDETH=y
+CONFIG_8139TOO=y
+# CONFIG_8139TOO_PIO is not set
+CONFIG_R8169=y
+CONFIG_FDDI=y
+CONFIG_PPP=y
+CONFIG_PPP_BSDCOMP=y
+CONFIG_PPP_DEFLATE=y
+CONFIG_PPP_MPPE=y
+CONFIG_PPPOLAC=y
+CONFIG_PPPOPNS=y
+CONFIG_INPUT_POLLDEV=y
+# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
+CONFIG_INPUT_EVDEV=y
+# CONFIG_KEYBOARD_ATKBD is not set
+CONFIG_KEYBOARD_GOLDFISH_EVENTS=y
+# CONFIG_INPUT_MOUSE is not set
+CONFIG_INPUT_JOYSTICK=y
+CONFIG_INPUT_TABLET=y
+CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_INPUT_MISC=y
+# CONFIG_SERIO is not set
+CONFIG_VT_HW_CONSOLE_BINDING=y
+# CONFIG_LEGACY_PTYS is not set
+CONFIG_SERIAL_NONSTANDARD=y
+# CONFIG_DEVMEM is not set
+# CONFIG_DEVKMEM is not set
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_VIRTIO_CONSOLE=y
+CONFIG_NVRAM=y
+CONFIG_I2C_I801=y
+CONFIG_BATTERY_GOLDFISH=y
+CONFIG_WATCHDOG=y
+CONFIG_AGP=y
+CONFIG_AGP_AMD64=y
+CONFIG_AGP_INTEL=y
+CONFIG_DRM=y
+CONFIG_FB=y
+CONFIG_FB_MODE_HELPERS=y
+CONFIG_FB_TILEBLITTING=y
+CONFIG_FB_EFI=y
+CONFIG_FB_GOLDFISH=y
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+# CONFIG_LCD_CLASS_DEVICE is not set
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_HIDRAW=y
+CONFIG_HID_A4TECH=y
+CONFIG_HID_APPLE=y
+CONFIG_HID_BELKIN=y
+CONFIG_HID_CHERRY=y
+CONFIG_HID_CHICONY=y
+CONFIG_HID_CYPRESS=y
+CONFIG_HID_EZKEY=y
+CONFIG_HID_GYRATION=y
+CONFIG_HID_LOGITECH=y
+CONFIG_LOGITECH_FF=y
+CONFIG_HID_MICROSOFT=y
+CONFIG_HID_MONTEREY=y
+CONFIG_HID_NTRIG=y
+CONFIG_HID_PANTHERLORD=y
+CONFIG_PANTHERLORD_FF=y
+CONFIG_HID_PETALYNX=y
+CONFIG_HID_SAMSUNG=y
+CONFIG_HID_SONY=y
+CONFIG_HID_SUNPLUS=y
+CONFIG_HID_TOPSEED=y
+CONFIG_HID_PID=y
+CONFIG_USB_HIDDEV=y
+CONFIG_USB=y
+CONFIG_USB_DEBUG=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+CONFIG_USB_MON=y
+CONFIG_USB_EHCI_HCD=y
+# CONFIG_USB_EHCI_TT_NEWSCHED is not set
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_UHCI_HCD=y
+CONFIG_USB_PRINTER=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_OTG_WAKELOCK=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_G_ANDROID=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_SWITCH=y
+CONFIG_EDAC=y
+CONFIG_RTC_CLASS=y
+# CONFIG_RTC_HCTOSYS is not set
+CONFIG_DMADEVICES=y
+CONFIG_VIRTIO_PCI=y
+CONFIG_STAGING=y
+CONFIG_ANDROID=y
+CONFIG_ASHMEM=y
+CONFIG_ANDROID_LOW_MEMORY_KILLER=y
+CONFIG_SYNC=y
+CONFIG_GOLDFISH_AUDIO=y
+CONFIG_GOLDFISH=y
+CONFIG_GOLDFISH_PIPE=y
+CONFIG_ANDROID_BINDER_IPC=y
+CONFIG_ISCSI_IBFT_FIND=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_QUOTA=y
+CONFIG_QUOTA_NETLINK_INTERFACE=y
+# CONFIG_PRINT_QUOTA_WARNING is not set
+CONFIG_FUSE_FS=y
+CONFIG_ISO9660_FS=y
+CONFIG_JOLIET=y
+CONFIG_ZISOFS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_HUGETLBFS=y
+# CONFIG_NETWORK_FILESYSTEMS is not set
+CONFIG_NLS_DEFAULT="utf8"
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ASCII=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_NLS_UTF8=y
+CONFIG_PRINTK_TIME=y
+# CONFIG_ENABLE_WARN_DEPRECATED is not set
+# CONFIG_ENABLE_MUST_CHECK is not set
+CONFIG_FRAME_WARN=2048
+CONFIG_MAGIC_SYSRQ=y
+# CONFIG_UNUSED_SYMBOLS is not set
+CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_MEMORY_INIT=y
+CONFIG_SCHED_TRACER=y
+CONFIG_BLK_DEV_IO_TRACE=y
+CONFIG_PROVIDE_OHCI1394_DMA_INIT=y
+CONFIG_KEYS=y
+CONFIG_KEYS_DEBUG_PROC_KEYS=y
+CONFIG_SECURITY=y
+CONFIG_SECURITY_NETWORK=y
+CONFIG_SECURITY_SELINUX=y
+CONFIG_CRYPTO_SHA256=y
+CONFIG_CRYPTO_AES_586=y
+CONFIG_CRYPTO_TWOFISH=y
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+# CONFIG_VIRTUALIZATION is not set
+CONFIG_CRC_T10DIF=y
diff --git a/arch/x86/configs/x86_64_emu_defconfig b/arch/x86/configs/x86_64_emu_defconfig
new file mode 100644
index 0000000..e1415cf
--- /dev/null
+++ b/arch/x86/configs/x86_64_emu_defconfig
@@ -0,0 +1,366 @@
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_AUDIT=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_DELAY_ACCT=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_RCU_FANOUT=32
+CONFIG_CGROUPS=y
+CONFIG_CGROUP_DEBUG=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_RESOURCE_COUNTERS=y
+CONFIG_CGROUP_SCHED=y
+CONFIG_RT_GROUP_SCHED=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_EMBEDDED=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_OSF_PARTITION=y
+CONFIG_AMIGA_PARTITION=y
+CONFIG_MAC_PARTITION=y
+CONFIG_BSD_DISKLABEL=y
+CONFIG_MINIX_SUBPARTITION=y
+CONFIG_SOLARIS_X86_PARTITION=y
+CONFIG_UNIXWARE_DISKLABEL=y
+CONFIG_SGI_PARTITION=y
+CONFIG_SUN_PARTITION=y
+CONFIG_KARMA_PARTITION=y
+CONFIG_MCORE2=y
+CONFIG_PREEMPT=y
+# CONFIG_X86_MCE is not set
+CONFIG_X86_MSR=y
+CONFIG_X86_CPUID=y
+CONFIG_CMA=y
+# CONFIG_MTRR_SANITIZER is not set
+CONFIG_HZ_100=y
+CONFIG_PHYSICAL_START=0x100000
+# CONFIG_RELOCATABLE is not set
+CONFIG_PM_AUTOSLEEP=y
+CONFIG_PM_WAKELOCKS=y
+CONFIG_CPU_FREQ=y
+# CONFIG_CPU_FREQ_STAT is not set
+CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_PCI_MMCONFIG=y
+CONFIG_PCIEPORTBUS=y
+# CONFIG_PCIEASPM is not set
+CONFIG_PCCARD=y
+CONFIG_YENTA=y
+CONFIG_HOTPLUG_PCI=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_BINFMT_MISC=y
+CONFIG_IA32_EMULATION=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=y
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
+CONFIG_IP_MROUTE=y
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
+CONFIG_SYN_COOKIES=y
+CONFIG_INET_ESP=y
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_LRO is not set
+# CONFIG_INET_DIAG is not set
+CONFIG_IPV6_PRIVACY=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_IPV6_OPTIMISTIC_DAD=y
+CONFIG_INET6_AH=y
+CONFIG_INET6_ESP=y
+CONFIG_INET6_IPCOMP=y
+CONFIG_IPV6_MIP6=y
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_NETLABEL=y
+CONFIG_NETFILTER=y
+CONFIG_NF_CONNTRACK=y
+CONFIG_NF_CONNTRACK_SECMARK=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CT_PROTO_DCCP=y
+CONFIG_NF_CT_PROTO_SCTP=y
+CONFIG_NF_CT_PROTO_UDPLITE=y
+CONFIG_NF_CONNTRACK_AMANDA=y
+CONFIG_NF_CONNTRACK_FTP=y
+CONFIG_NF_CONNTRACK_H323=y
+CONFIG_NF_CONNTRACK_IRC=y
+CONFIG_NF_CONNTRACK_NETBIOS_NS=y
+CONFIG_NF_CONNTRACK_PPTP=y
+CONFIG_NF_CONNTRACK_SANE=y
+CONFIG_NF_CONNTRACK_TFTP=y
+CONFIG_NF_CT_NETLINK=y
+CONFIG_NETFILTER_TPROXY=y
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
+CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y
+CONFIG_NETFILTER_XT_TARGET_MARK=y
+CONFIG_NETFILTER_XT_TARGET_NFLOG=y
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
+CONFIG_NETFILTER_XT_TARGET_TPROXY=y
+CONFIG_NETFILTER_XT_TARGET_TRACE=y
+CONFIG_NETFILTER_XT_TARGET_SECMARK=y
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
+CONFIG_NETFILTER_XT_MATCH_COMMENT=y
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_HELPER=y
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
+CONFIG_NETFILTER_XT_MATCH_LENGTH=y
+CONFIG_NETFILTER_XT_MATCH_LIMIT=y
+CONFIG_NETFILTER_XT_MATCH_MAC=y
+CONFIG_NETFILTER_XT_MATCH_MARK=y
+CONFIG_NETFILTER_XT_MATCH_POLICY=y
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
+CONFIG_NETFILTER_XT_MATCH_QTAGUID=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA2=y
+CONFIG_NETFILTER_XT_MATCH_SOCKET=y
+CONFIG_NETFILTER_XT_MATCH_STATE=y
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=y
+CONFIG_NETFILTER_XT_MATCH_STRING=y
+CONFIG_NETFILTER_XT_MATCH_TIME=y
+CONFIG_NETFILTER_XT_MATCH_U32=y
+CONFIG_NF_CONNTRACK_IPV4=y
+CONFIG_IP_NF_IPTABLES=y
+CONFIG_IP_NF_MATCH_AH=y
+CONFIG_IP_NF_MATCH_ECN=y
+CONFIG_IP_NF_MATCH_TTL=y
+CONFIG_IP_NF_FILTER=y
+CONFIG_IP_NF_TARGET_REJECT=y
+CONFIG_IP_NF_TARGET_REJECT_SKERR=y
+CONFIG_IP_NF_TARGET_ULOG=y
+CONFIG_IP_NF_MANGLE=y
+CONFIG_IP_NF_RAW=y
+CONFIG_IP_NF_SECURITY=y
+CONFIG_IP_NF_ARPTABLES=y
+CONFIG_IP_NF_ARPFILTER=y
+CONFIG_IP_NF_ARP_MANGLE=y
+CONFIG_NF_CONNTRACK_IPV6=y
+CONFIG_IP6_NF_IPTABLES=y
+CONFIG_IP6_NF_FILTER=y
+CONFIG_IP6_NF_TARGET_REJECT=y
+CONFIG_IP6_NF_TARGET_REJECT_SKERR=y
+CONFIG_IP6_NF_MANGLE=y
+CONFIG_IP6_NF_RAW=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_HTB=y
+CONFIG_NET_CLS_U32=y
+CONFIG_NET_EMATCH=y
+CONFIG_NET_EMATCH_U32=y
+CONFIG_NET_CLS_ACT=y
+CONFIG_CFG80211=y
+CONFIG_MAC80211=y
+CONFIG_MAC80211_LEDS=y
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DMA_CMA=y
+CONFIG_CONNECTOR=y
+CONFIG_MTD=y
+CONFIG_MTD_BLOCK=y
+CONFIG_NFTL=y
+CONFIG_MTD_NAND=y
+CONFIG_MTD_NAND_PLATFORM=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=16384
+CONFIG_BLK_DEV_SD=y
+CONFIG_BLK_DEV_SR=y
+CONFIG_BLK_DEV_SR_VENDOR=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_SPI_ATTRS=y
+CONFIG_SCSI_ISCSI_ATTRS=y
+# CONFIG_SCSI_LOWLEVEL is not set
+CONFIG_ATA=y
+CONFIG_SATA_AHCI=y
+CONFIG_ATA_PIIX=y
+CONFIG_PATA_AMD=y
+CONFIG_PATA_OLDPIIX=y
+CONFIG_PATA_SCH=y
+CONFIG_PATA_MPIIX=y
+CONFIG_ATA_GENERIC=y
+CONFIG_MD=y
+CONFIG_BLK_DEV_MD=y
+CONFIG_BLK_DEV_DM=y
+CONFIG_DM_DEBUG=y
+CONFIG_DM_CRYPT=y
+CONFIG_DM_MIRROR=y
+CONFIG_DM_ZERO=y
+CONFIG_DM_UEVENT=y
+CONFIG_DM_VERITY=y
+CONFIG_NETDEVICES=y
+CONFIG_NETCONSOLE=y
+CONFIG_TUN=y
+CONFIG_BNX2=y
+CONFIG_TIGON3=y
+CONFIG_NET_TULIP=y
+CONFIG_E100=y
+CONFIG_E1000=y
+CONFIG_E1000E=y
+CONFIG_SKY2=y
+CONFIG_NE2K_PCI=y
+CONFIG_FORCEDETH=y
+CONFIG_8139TOO=y
+# CONFIG_8139TOO_PIO is not set
+CONFIG_R8169=y
+CONFIG_FDDI=y
+CONFIG_PPP=y
+CONFIG_PPP_BSDCOMP=y
+CONFIG_PPP_DEFLATE=y
+CONFIG_PPP_MPPE=y
+CONFIG_PPPOLAC=y
+CONFIG_PPPOPNS=y
+CONFIG_INPUT_POLLDEV=y
+# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
+CONFIG_INPUT_EVDEV=y
+# CONFIG_KEYBOARD_ATKBD is not set
+CONFIG_KEYBOARD_GOLDFISH_EVENTS=y
+# CONFIG_INPUT_MOUSE is not set
+CONFIG_INPUT_JOYSTICK=y
+CONFIG_INPUT_TABLET=y
+CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_INPUT_MISC=y
+# CONFIG_SERIO is not set
+CONFIG_VT_HW_CONSOLE_BINDING=y
+# CONFIG_LEGACY_PTYS is not set
+CONFIG_SERIAL_NONSTANDARD=y
+CONFIG_GOLDFISH_TTY=y
+# CONFIG_DEVMEM is not set
+# CONFIG_DEVKMEM is not set
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_NVRAM=y
+CONFIG_I2C_I801=y
+CONFIG_BATTERY_GOLDFISH=y
+CONFIG_WATCHDOG=y
+CONFIG_AGP=y
+CONFIG_AGP_AMD64=y
+CONFIG_AGP_INTEL=y
+CONFIG_DRM=y
+CONFIG_FB=y
+CONFIG_FB_MODE_HELPERS=y
+CONFIG_FB_TILEBLITTING=y
+CONFIG_FB_GOLDFISH=y
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+# CONFIG_LCD_CLASS_DEVICE is not set
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_HIDRAW=y
+CONFIG_HID_A4TECH=y
+CONFIG_HID_APPLE=y
+CONFIG_HID_BELKIN=y
+CONFIG_HID_CHERRY=y
+CONFIG_HID_CHICONY=y
+CONFIG_HID_CYPRESS=y
+CONFIG_HID_EZKEY=y
+CONFIG_HID_GYRATION=y
+CONFIG_HID_LOGITECH=y
+CONFIG_LOGITECH_FF=y
+CONFIG_HID_MICROSOFT=y
+CONFIG_HID_MONTEREY=y
+CONFIG_HID_NTRIG=y
+CONFIG_HID_PANTHERLORD=y
+CONFIG_PANTHERLORD_FF=y
+CONFIG_HID_PETALYNX=y
+CONFIG_HID_SAMSUNG=y
+CONFIG_HID_SONY=y
+CONFIG_HID_SUNPLUS=y
+CONFIG_HID_TOPSEED=y
+CONFIG_HID_PID=y
+CONFIG_USB_HIDDEV=y
+CONFIG_USB=y
+CONFIG_USB_DEBUG=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+CONFIG_USB_MON=y
+CONFIG_USB_EHCI_HCD=y
+# CONFIG_USB_EHCI_TT_NEWSCHED is not set
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_UHCI_HCD=y
+CONFIG_USB_PRINTER=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_OTG_WAKELOCK=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_G_ANDROID=y
+CONFIG_MMC=y
+CONFIG_MMC_GOLDFISH=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_SWITCH=y
+CONFIG_EDAC=y
+CONFIG_RTC_CLASS=y
+# CONFIG_RTC_HCTOSYS is not set
+CONFIG_DMADEVICES=y
+CONFIG_STAGING=y
+CONFIG_ANDROID=y
+CONFIG_ASHMEM=y
+CONFIG_ANDROID_LOW_MEMORY_KILLER=y
+CONFIG_SYNC=y
+CONFIG_GOLDFISH_AUDIO=y
+CONFIG_MTD_GOLDFISH_NAND=y
+CONFIG_GOLDFISH=y
+CONFIG_GOLDFISH_BUS=y
+CONFIG_GOLDFISH_PIPE=y
+CONFIG_ANDROID_BINDER_IPC=y
+CONFIG_ISCSI_IBFT_FIND=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_QUOTA=y
+CONFIG_QUOTA_NETLINK_INTERFACE=y
+# CONFIG_PRINT_QUOTA_WARNING is not set
+CONFIG_FUSE_FS=y
+CONFIG_ISO9660_FS=y
+CONFIG_JOLIET=y
+CONFIG_ZISOFS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_HUGETLBFS=y
+# CONFIG_NETWORK_FILESYSTEMS is not set
+CONFIG_NLS_DEFAULT="utf8"
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ASCII=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_NLS_UTF8=y
+CONFIG_PRINTK_TIME=y
+# CONFIG_ENABLE_WARN_DEPRECATED is not set
+# CONFIG_ENABLE_MUST_CHECK is not set
+CONFIG_MAGIC_SYSRQ=y
+# CONFIG_UNUSED_SYMBOLS is not set
+CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_MEMORY_INIT=y
+CONFIG_SCHED_TRACER=y
+CONFIG_BLK_DEV_IO_TRACE=y
+CONFIG_PROVIDE_OHCI1394_DMA_INIT=y
+CONFIG_KEYS=y
+CONFIG_KEYS_DEBUG_PROC_KEYS=y
+CONFIG_SECURITY=y
+CONFIG_SECURITY_NETWORK=y
+CONFIG_SECURITY_SELINUX=y
+CONFIG_CRYPTO_SHA256=y
+CONFIG_CRYPTO_TWOFISH=y
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+# CONFIG_VIRTUALIZATION is not set
+CONFIG_CRC_T10DIF=y
diff --git a/arch/x86/configs/x86_64_ranchu_defconfig b/arch/x86/configs/x86_64_ranchu_defconfig
new file mode 100644
index 0000000..0a430c7
--- /dev/null
+++ b/arch/x86/configs/x86_64_ranchu_defconfig
@@ -0,0 +1,364 @@
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_AUDIT=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_DELAY_ACCT=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_RCU_FANOUT=32
+CONFIG_CGROUPS=y
+CONFIG_CGROUP_DEBUG=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_RESOURCE_COUNTERS=y
+CONFIG_CGROUP_SCHED=y
+CONFIG_RT_GROUP_SCHED=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_EMBEDDED=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_PARTITION_ADVANCED=y
+CONFIG_OSF_PARTITION=y
+CONFIG_AMIGA_PARTITION=y
+CONFIG_MAC_PARTITION=y
+CONFIG_BSD_DISKLABEL=y
+CONFIG_MINIX_SUBPARTITION=y
+CONFIG_SOLARIS_X86_PARTITION=y
+CONFIG_UNIXWARE_DISKLABEL=y
+CONFIG_SGI_PARTITION=y
+CONFIG_SUN_PARTITION=y
+CONFIG_KARMA_PARTITION=y
+CONFIG_SMP=y
+CONFIG_MCORE2=y
+CONFIG_MAXSMP=y
+CONFIG_PREEMPT=y
+# CONFIG_X86_MCE is not set
+CONFIG_X86_MSR=y
+CONFIG_X86_CPUID=y
+CONFIG_CMA=y
+# CONFIG_MTRR_SANITIZER is not set
+CONFIG_EFI=y
+CONFIG_EFI_STUB=y
+CONFIG_HZ_100=y
+CONFIG_PHYSICAL_START=0x100000
+CONFIG_PM_AUTOSLEEP=y
+CONFIG_PM_WAKELOCKS=y
+CONFIG_CPU_FREQ=y
+# CONFIG_CPU_FREQ_STAT is not set
+CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+CONFIG_PCI_MMCONFIG=y
+CONFIG_PCIEPORTBUS=y
+# CONFIG_PCIEASPM is not set
+CONFIG_PCCARD=y
+CONFIG_YENTA=y
+CONFIG_HOTPLUG_PCI=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_BINFMT_MISC=y
+CONFIG_IA32_EMULATION=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=y
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
+CONFIG_IP_MROUTE=y
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
+CONFIG_SYN_COOKIES=y
+CONFIG_INET_ESP=y
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_LRO is not set
+# CONFIG_INET_DIAG is not set
+CONFIG_IPV6_PRIVACY=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_IPV6_OPTIMISTIC_DAD=y
+CONFIG_INET6_AH=y
+CONFIG_INET6_ESP=y
+CONFIG_INET6_IPCOMP=y
+CONFIG_IPV6_MIP6=y
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_NETLABEL=y
+CONFIG_NETFILTER=y
+CONFIG_NF_CONNTRACK=y
+CONFIG_NF_CONNTRACK_SECMARK=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CT_PROTO_DCCP=y
+CONFIG_NF_CT_PROTO_SCTP=y
+CONFIG_NF_CT_PROTO_UDPLITE=y
+CONFIG_NF_CONNTRACK_AMANDA=y
+CONFIG_NF_CONNTRACK_FTP=y
+CONFIG_NF_CONNTRACK_H323=y
+CONFIG_NF_CONNTRACK_IRC=y
+CONFIG_NF_CONNTRACK_NETBIOS_NS=y
+CONFIG_NF_CONNTRACK_PPTP=y
+CONFIG_NF_CONNTRACK_SANE=y
+CONFIG_NF_CONNTRACK_TFTP=y
+CONFIG_NF_CT_NETLINK=y
+CONFIG_NETFILTER_TPROXY=y
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
+CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y
+CONFIG_NETFILTER_XT_TARGET_MARK=y
+CONFIG_NETFILTER_XT_TARGET_NFLOG=y
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
+CONFIG_NETFILTER_XT_TARGET_TPROXY=y
+CONFIG_NETFILTER_XT_TARGET_TRACE=y
+CONFIG_NETFILTER_XT_TARGET_SECMARK=y
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
+CONFIG_NETFILTER_XT_MATCH_COMMENT=y
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_HELPER=y
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
+CONFIG_NETFILTER_XT_MATCH_LENGTH=y
+CONFIG_NETFILTER_XT_MATCH_LIMIT=y
+CONFIG_NETFILTER_XT_MATCH_MAC=y
+CONFIG_NETFILTER_XT_MATCH_MARK=y
+CONFIG_NETFILTER_XT_MATCH_POLICY=y
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
+CONFIG_NETFILTER_XT_MATCH_QTAGUID=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA2=y
+CONFIG_NETFILTER_XT_MATCH_SOCKET=y
+CONFIG_NETFILTER_XT_MATCH_STATE=y
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=y
+CONFIG_NETFILTER_XT_MATCH_STRING=y
+CONFIG_NETFILTER_XT_MATCH_TIME=y
+CONFIG_NETFILTER_XT_MATCH_U32=y
+CONFIG_NF_CONNTRACK_IPV4=y
+CONFIG_IP_NF_IPTABLES=y
+CONFIG_IP_NF_MATCH_AH=y
+CONFIG_IP_NF_MATCH_ECN=y
+CONFIG_IP_NF_MATCH_TTL=y
+CONFIG_IP_NF_FILTER=y
+CONFIG_IP_NF_TARGET_REJECT=y
+CONFIG_IP_NF_TARGET_REJECT_SKERR=y
+CONFIG_IP_NF_TARGET_ULOG=y
+CONFIG_IP_NF_MANGLE=y
+CONFIG_IP_NF_RAW=y
+CONFIG_IP_NF_SECURITY=y
+CONFIG_IP_NF_ARPTABLES=y
+CONFIG_IP_NF_ARPFILTER=y
+CONFIG_IP_NF_ARP_MANGLE=y
+CONFIG_NF_CONNTRACK_IPV6=y
+CONFIG_IP6_NF_IPTABLES=y
+CONFIG_IP6_NF_FILTER=y
+CONFIG_IP6_NF_TARGET_REJECT=y
+CONFIG_IP6_NF_TARGET_REJECT_SKERR=y
+CONFIG_IP6_NF_MANGLE=y
+CONFIG_IP6_NF_RAW=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_HTB=y
+CONFIG_NET_CLS_U32=y
+CONFIG_NET_EMATCH=y
+CONFIG_NET_EMATCH_U32=y
+CONFIG_NET_CLS_ACT=y
+CONFIG_CFG80211=y
+CONFIG_MAC80211=y
+CONFIG_MAC80211_LEDS=y
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DMA_CMA=y
+CONFIG_CONNECTOR=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=16384
+CONFIG_VIRTIO_BLK=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_BLK_DEV_SR=y
+CONFIG_BLK_DEV_SR_VENDOR=y
+CONFIG_CHR_DEV_SG=y
+CONFIG_SCSI_CONSTANTS=y
+CONFIG_SCSI_SPI_ATTRS=y
+CONFIG_SCSI_ISCSI_ATTRS=y
+# CONFIG_SCSI_LOWLEVEL is not set
+CONFIG_ATA=y
+CONFIG_SATA_AHCI=y
+CONFIG_ATA_PIIX=y
+CONFIG_PATA_AMD=y
+CONFIG_PATA_OLDPIIX=y
+CONFIG_PATA_SCH=y
+CONFIG_PATA_MPIIX=y
+CONFIG_ATA_GENERIC=y
+CONFIG_MD=y
+CONFIG_BLK_DEV_MD=y
+CONFIG_BLK_DEV_DM=y
+CONFIG_DM_DEBUG=y
+CONFIG_DM_CRYPT=y
+CONFIG_DM_MIRROR=y
+CONFIG_DM_ZERO=y
+CONFIG_DM_UEVENT=y
+CONFIG_DM_VERITY=y
+CONFIG_NETDEVICES=y
+CONFIG_NETCONSOLE=y
+CONFIG_TUN=y
+CONFIG_VIRTIO_NET=y
+CONFIG_BNX2=y
+CONFIG_TIGON3=y
+CONFIG_NET_TULIP=y
+CONFIG_E100=y
+CONFIG_E1000=y
+CONFIG_E1000E=y
+CONFIG_SKY2=y
+CONFIG_NE2K_PCI=y
+CONFIG_FORCEDETH=y
+CONFIG_8139TOO=y
+# CONFIG_8139TOO_PIO is not set
+CONFIG_R8169=y
+CONFIG_FDDI=y
+CONFIG_PPP=y
+CONFIG_PPP_BSDCOMP=y
+CONFIG_PPP_DEFLATE=y
+CONFIG_PPP_MPPE=y
+CONFIG_PPPOLAC=y
+CONFIG_PPPOPNS=y
+CONFIG_INPUT_POLLDEV=y
+# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
+CONFIG_INPUT_EVDEV=y
+# CONFIG_KEYBOARD_ATKBD is not set
+CONFIG_KEYBOARD_GOLDFISH_EVENTS=y
+# CONFIG_INPUT_MOUSE is not set
+CONFIG_INPUT_JOYSTICK=y
+CONFIG_INPUT_TABLET=y
+CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_INPUT_MISC=y
+# CONFIG_SERIO is not set
+CONFIG_VT_HW_CONSOLE_BINDING=y
+# CONFIG_LEGACY_PTYS is not set
+CONFIG_SERIAL_NONSTANDARD=y
+# CONFIG_DEVMEM is not set
+# CONFIG_DEVKMEM is not set
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_VIRTIO_CONSOLE=y
+CONFIG_NVRAM=y
+CONFIG_I2C_I801=y
+CONFIG_BATTERY_GOLDFISH=y
+CONFIG_WATCHDOG=y
+CONFIG_AGP=y
+CONFIG_AGP_AMD64=y
+CONFIG_AGP_INTEL=y
+CONFIG_DRM=y
+CONFIG_FB=y
+CONFIG_FB_MODE_HELPERS=y
+CONFIG_FB_TILEBLITTING=y
+CONFIG_FB_EFI=y
+CONFIG_FB_GOLDFISH=y
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+# CONFIG_LCD_CLASS_DEVICE is not set
+# CONFIG_VGA_CONSOLE is not set
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_HIDRAW=y
+CONFIG_HID_A4TECH=y
+CONFIG_HID_APPLE=y
+CONFIG_HID_BELKIN=y
+CONFIG_HID_CHERRY=y
+CONFIG_HID_CHICONY=y
+CONFIG_HID_CYPRESS=y
+CONFIG_HID_EZKEY=y
+CONFIG_HID_GYRATION=y
+CONFIG_HID_LOGITECH=y
+CONFIG_LOGITECH_FF=y
+CONFIG_HID_MICROSOFT=y
+CONFIG_HID_MONTEREY=y
+CONFIG_HID_NTRIG=y
+CONFIG_HID_PANTHERLORD=y
+CONFIG_PANTHERLORD_FF=y
+CONFIG_HID_PETALYNX=y
+CONFIG_HID_SAMSUNG=y
+CONFIG_HID_SONY=y
+CONFIG_HID_SUNPLUS=y
+CONFIG_HID_TOPSEED=y
+CONFIG_HID_PID=y
+CONFIG_USB_HIDDEV=y
+CONFIG_USB=y
+CONFIG_USB_DEBUG=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+CONFIG_USB_MON=y
+CONFIG_USB_EHCI_HCD=y
+# CONFIG_USB_EHCI_TT_NEWSCHED is not set
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_UHCI_HCD=y
+CONFIG_USB_PRINTER=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_OTG_WAKELOCK=y
+CONFIG_USB_GADGET=y
+CONFIG_USB_G_ANDROID=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_SWITCH=y
+CONFIG_EDAC=y
+CONFIG_RTC_CLASS=y
+# CONFIG_RTC_HCTOSYS is not set
+CONFIG_DMADEVICES=y
+CONFIG_VIRTIO_PCI=y
+CONFIG_STAGING=y
+CONFIG_ANDROID=y
+CONFIG_ASHMEM=y
+CONFIG_ANDROID_LOW_MEMORY_KILLER=y
+CONFIG_SYNC=y
+CONFIG_GOLDFISH_AUDIO=y
+CONFIG_GOLDFISH=y
+CONFIG_GOLDFISH_PIPE=y
+CONFIG_ANDROID_BINDER_IPC=y
+CONFIG_ISCSI_IBFT_FIND=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_QUOTA=y
+CONFIG_QUOTA_NETLINK_INTERFACE=y
+# CONFIG_PRINT_QUOTA_WARNING is not set
+CONFIG_FUSE_FS=y
+CONFIG_ISO9660_FS=y
+CONFIG_JOLIET=y
+CONFIG_ZISOFS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_HUGETLBFS=y
+# CONFIG_NETWORK_FILESYSTEMS is not set
+CONFIG_NLS_DEFAULT="utf8"
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_ASCII=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_NLS_UTF8=y
+CONFIG_PRINTK_TIME=y
+# CONFIG_ENABLE_WARN_DEPRECATED is not set
+# CONFIG_ENABLE_MUST_CHECK is not set
+CONFIG_MAGIC_SYSRQ=y
+# CONFIG_UNUSED_SYMBOLS is not set
+CONFIG_DEBUG_INFO=y
+CONFIG_DEBUG_MEMORY_INIT=y
+CONFIG_SCHED_TRACER=y
+CONFIG_BLK_DEV_IO_TRACE=y
+CONFIG_PROVIDE_OHCI1394_DMA_INIT=y
+CONFIG_KEYS=y
+CONFIG_KEYS_DEBUG_PROC_KEYS=y
+CONFIG_SECURITY=y
+CONFIG_SECURITY_NETWORK=y
+CONFIG_SECURITY_SELINUX=y
+CONFIG_CRYPTO_SHA256=y
+CONFIG_CRYPTO_TWOFISH=y
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+# CONFIG_VIRTUALIZATION is not set
+CONFIG_CRC_T10DIF=y
diff --git a/arch/x86/include/asm/dma-contiguous.h b/arch/x86/include/asm/dma-contiguous.h
index c092416..b4b38ba 100644
--- a/arch/x86/include/asm/dma-contiguous.h
+++ b/arch/x86/include/asm/dma-contiguous.h
@@ -4,7 +4,6 @@
#ifdef __KERNEL__
#include <linux/types.h>
-#include <asm-generic/dma-contiguous.h>
static inline void
dma_contiguous_early_fixup(phys_addr_t base, unsigned long size) { }
diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h
index 0dc7d9e..847b35b 100644
--- a/arch/x86/include/asm/fixmap.h
+++ b/arch/x86/include/asm/fixmap.h
@@ -175,64 +175,7 @@
}
#endif
-#define set_fixmap(idx, phys) \
- __set_fixmap(idx, phys, PAGE_KERNEL)
-
-/*
- * Some hardware wants to get fixmapped without caching.
- */
-#define set_fixmap_nocache(idx, phys) \
- __set_fixmap(idx, phys, PAGE_KERNEL_NOCACHE)
-
-#define clear_fixmap(idx) \
- __set_fixmap(idx, 0, __pgprot(0))
-
-#define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT))
-#define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT)
-
-extern void __this_fixmap_does_not_exist(void);
-
-/*
- * 'index to address' translation. If anyone tries to use the idx
- * directly without translation, we catch the bug with a NULL-deference
- * kernel oops. Illegal ranges of incoming indices are caught too.
- */
-static __always_inline unsigned long fix_to_virt(const unsigned int idx)
-{
- /*
- * this branch gets completely eliminated after inlining,
- * except when someone tries to use fixaddr indices in an
- * illegal way. (such as mixing up address types or using
- * out-of-range indices).
- *
- * If it doesn't get removed, the linker will complain
- * loudly with a reasonably clear error message..
- */
- if (idx >= __end_of_fixed_addresses)
- __this_fixmap_does_not_exist();
-
- return __fix_to_virt(idx);
-}
-
-static inline unsigned long virt_to_fix(const unsigned long vaddr)
-{
- BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START);
- return __virt_to_fix(vaddr);
-}
-
-/* Return an pointer with offset calculated */
-static __always_inline unsigned long
-__set_fixmap_offset(enum fixed_addresses idx, phys_addr_t phys, pgprot_t flags)
-{
- __set_fixmap(idx, phys, flags);
- return fix_to_virt(idx) + (phys & (PAGE_SIZE - 1));
-}
-
-#define set_fixmap_offset(idx, phys) \
- __set_fixmap_offset(idx, phys, PAGE_KERNEL)
-
-#define set_fixmap_offset_nocache(idx, phys) \
- __set_fixmap_offset(idx, phys, PAGE_KERNEL_NOCACHE)
+#include <asm-generic/fixmap.h>
#endif /* !__ASSEMBLY__ */
#endif /* _ASM_X86_FIXMAP_H */
diff --git a/arch/x86/include/asm/swiotlb.h b/arch/x86/include/asm/swiotlb.h
index 977f176..ab05d73 100644
--- a/arch/x86/include/asm/swiotlb.h
+++ b/arch/x86/include/asm/swiotlb.h
@@ -29,4 +29,11 @@
static inline void dma_mark_clean(void *addr, size_t size) {}
+extern void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flags,
+ struct dma_attrs *attrs);
+extern void x86_swiotlb_free_coherent(struct device *dev, size_t size,
+ void *vaddr, dma_addr_t dma_addr,
+ struct dma_attrs *attrs);
+
#endif /* _ASM_X86_SWIOTLB_H */
diff --git a/arch/x86/kernel/amd_gart_64.c b/arch/x86/kernel/amd_gart_64.c
index b574b29..8e3842f 100644
--- a/arch/x86/kernel/amd_gart_64.c
+++ b/arch/x86/kernel/amd_gart_64.c
@@ -512,7 +512,7 @@
dma_addr_t dma_addr, struct dma_attrs *attrs)
{
gart_unmap_page(dev, dma_addr, size, DMA_BIDIRECTIONAL, NULL);
- free_pages((unsigned long)vaddr, get_order(size));
+ dma_generic_free_coherent(dev, size, vaddr, dma_addr, attrs);
}
static int gart_mapping_error(struct device *dev, dma_addr_t dma_addr)
diff --git a/arch/x86/kernel/devicetree.c b/arch/x86/kernel/devicetree.c
index b158152..2fbad6b 100644
--- a/arch/x86/kernel/devicetree.c
+++ b/arch/x86/kernel/devicetree.c
@@ -52,8 +52,7 @@
}
#ifdef CONFIG_BLK_DEV_INITRD
-void __init early_init_dt_setup_initrd_arch(unsigned long start,
- unsigned long end)
+void __init early_init_dt_setup_initrd_arch(u64 start, u64 end)
{
initrd_start = (unsigned long)__va(start);
initrd_end = (unsigned long)__va(end);
diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c
index 6c483ba..77dd0ad 100644
--- a/arch/x86/kernel/pci-swiotlb.c
+++ b/arch/x86/kernel/pci-swiotlb.c
@@ -14,7 +14,7 @@
#include <asm/iommu_table.h>
int swiotlb __read_mostly;
-static void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
+void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
dma_addr_t *dma_handle, gfp_t flags,
struct dma_attrs *attrs)
{
@@ -28,11 +28,14 @@
return swiotlb_alloc_coherent(hwdev, size, dma_handle, flags);
}
-static void x86_swiotlb_free_coherent(struct device *dev, size_t size,
+void x86_swiotlb_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_addr,
struct dma_attrs *attrs)
{
- swiotlb_free_coherent(dev, size, vaddr, dma_addr);
+ if (is_swiotlb_buffer(dma_to_phys(dev, dma_addr)))
+ swiotlb_free_coherent(dev, size, vaddr, dma_addr);
+ else
+ dma_generic_free_coherent(dev, size, vaddr, dma_addr, attrs);
}
static struct dma_map_ops swiotlb_dma_ops = {
diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
index ae1aa71..7e73e8c 100644
--- a/arch/x86/mm/hugetlbpage.c
+++ b/arch/x86/mm/hugetlbpage.c
@@ -16,169 +16,6 @@
#include <asm/tlbflush.h>
#include <asm/pgalloc.h>
-static unsigned long page_table_shareable(struct vm_area_struct *svma,
- struct vm_area_struct *vma,
- unsigned long addr, pgoff_t idx)
-{
- unsigned long saddr = ((idx - svma->vm_pgoff) << PAGE_SHIFT) +
- svma->vm_start;
- unsigned long sbase = saddr & PUD_MASK;
- unsigned long s_end = sbase + PUD_SIZE;
-
- /* Allow segments to share if only one is marked locked */
- unsigned long vm_flags = vma->vm_flags & ~VM_LOCKED;
- unsigned long svm_flags = svma->vm_flags & ~VM_LOCKED;
-
- /*
- * match the virtual addresses, permission and the alignment of the
- * page table page.
- */
- if (pmd_index(addr) != pmd_index(saddr) ||
- vm_flags != svm_flags ||
- sbase < svma->vm_start || svma->vm_end < s_end)
- return 0;
-
- return saddr;
-}
-
-static int vma_shareable(struct vm_area_struct *vma, unsigned long addr)
-{
- unsigned long base = addr & PUD_MASK;
- unsigned long end = base + PUD_SIZE;
-
- /*
- * check on proper vm_flags and page table alignment
- */
- if (vma->vm_flags & VM_MAYSHARE &&
- vma->vm_start <= base && end <= vma->vm_end)
- return 1;
- return 0;
-}
-
-/*
- * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc()
- * and returns the corresponding pte. While this is not necessary for the
- * !shared pmd case because we can allocate the pmd later as well, it makes the
- * code much cleaner. pmd allocation is essential for the shared case because
- * pud has to be populated inside the same i_mmap_mutex section - otherwise
- * racing tasks could either miss the sharing (see huge_pte_offset) or select a
- * bad pmd for sharing.
- */
-static pte_t *
-huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
-{
- struct vm_area_struct *vma = find_vma(mm, addr);
- struct address_space *mapping = vma->vm_file->f_mapping;
- pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) +
- vma->vm_pgoff;
- struct vm_area_struct *svma;
- unsigned long saddr;
- pte_t *spte = NULL;
- pte_t *pte;
-
- if (!vma_shareable(vma, addr))
- return (pte_t *)pmd_alloc(mm, pud, addr);
-
- mutex_lock(&mapping->i_mmap_mutex);
- vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) {
- if (svma == vma)
- continue;
-
- saddr = page_table_shareable(svma, vma, addr, idx);
- if (saddr) {
- spte = huge_pte_offset(svma->vm_mm, saddr);
- if (spte) {
- get_page(virt_to_page(spte));
- break;
- }
- }
- }
-
- if (!spte)
- goto out;
-
- spin_lock(&mm->page_table_lock);
- if (pud_none(*pud))
- pud_populate(mm, pud, (pmd_t *)((unsigned long)spte & PAGE_MASK));
- else
- put_page(virt_to_page(spte));
- spin_unlock(&mm->page_table_lock);
-out:
- pte = (pte_t *)pmd_alloc(mm, pud, addr);
- mutex_unlock(&mapping->i_mmap_mutex);
- return pte;
-}
-
-/*
- * unmap huge page backed by shared pte.
- *
- * Hugetlb pte page is ref counted at the time of mapping. If pte is shared
- * indicated by page_count > 1, unmap is achieved by clearing pud and
- * decrementing the ref count. If count == 1, the pte page is not shared.
- *
- * called with vma->vm_mm->page_table_lock held.
- *
- * returns: 1 successfully unmapped a shared pte page
- * 0 the underlying pte page is not shared, or it is the last user
- */
-int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
-{
- pgd_t *pgd = pgd_offset(mm, *addr);
- pud_t *pud = pud_offset(pgd, *addr);
-
- BUG_ON(page_count(virt_to_page(ptep)) == 0);
- if (page_count(virt_to_page(ptep)) == 1)
- return 0;
-
- pud_clear(pud);
- put_page(virt_to_page(ptep));
- *addr = ALIGN(*addr, HPAGE_SIZE * PTRS_PER_PTE) - HPAGE_SIZE;
- return 1;
-}
-
-pte_t *huge_pte_alloc(struct mm_struct *mm,
- unsigned long addr, unsigned long sz)
-{
- pgd_t *pgd;
- pud_t *pud;
- pte_t *pte = NULL;
-
- pgd = pgd_offset(mm, addr);
- pud = pud_alloc(mm, pgd, addr);
- if (pud) {
- if (sz == PUD_SIZE) {
- pte = (pte_t *)pud;
- } else {
- BUG_ON(sz != PMD_SIZE);
- if (pud_none(*pud))
- pte = huge_pmd_share(mm, addr, pud);
- else
- pte = (pte_t *)pmd_alloc(mm, pud, addr);
- }
- }
- BUG_ON(pte && !pte_none(*pte) && !pte_huge(*pte));
-
- return pte;
-}
-
-pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
-{
- pgd_t *pgd;
- pud_t *pud;
- pmd_t *pmd = NULL;
-
- pgd = pgd_offset(mm, addr);
- if (pgd_present(*pgd)) {
- pud = pud_offset(pgd, addr);
- if (pud_present(*pud)) {
- if (pud_large(*pud))
- return (pte_t *)pud;
- pmd = pmd_offset(pud, addr);
- }
- }
- return (pte_t *) pmd;
-}
-
#if 0 /* This is just for testing */
struct page *
follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
@@ -240,30 +77,6 @@
return !!(pud_val(pud) & _PAGE_PSE);
}
-struct page *
-follow_huge_pmd(struct mm_struct *mm, unsigned long address,
- pmd_t *pmd, int write)
-{
- struct page *page;
-
- page = pte_page(*(pte_t *)pmd);
- if (page)
- page += ((address & ~PMD_MASK) >> PAGE_SHIFT);
- return page;
-}
-
-struct page *
-follow_huge_pud(struct mm_struct *mm, unsigned long address,
- pud_t *pud, int write)
-{
- struct page *page;
-
- page = pte_page(*(pte_t *)pud);
- if (page)
- page += ((address & ~PUD_MASK) >> PAGE_SHIFT);
- return page;
-}
-
#endif
/* x86_64 also uses this file */
diff --git a/arch/x86/pci/sta2x11-fixup.c b/arch/x86/pci/sta2x11-fixup.c
index 9d8a509..5ceda85 100644
--- a/arch/x86/pci/sta2x11-fixup.c
+++ b/arch/x86/pci/sta2x11-fixup.c
@@ -173,9 +173,7 @@
{
void *vaddr;
- vaddr = dma_generic_alloc_coherent(dev, size, dma_handle, flags, attrs);
- if (!vaddr)
- vaddr = swiotlb_alloc_coherent(dev, size, dma_handle, flags);
+ vaddr = x86_swiotlb_alloc_coherent(dev, size, dma_handle, flags, attrs);
*dma_handle = p2a(*dma_handle, to_pci_dev(dev));
return vaddr;
}
@@ -183,7 +181,7 @@
/* We have our own dma_ops: the same as swiotlb but from alloc (above) */
static struct dma_map_ops sta2x11_dma_ops = {
.alloc = sta2x11_swiotlb_alloc_coherent,
- .free = swiotlb_free_coherent,
+ .free = x86_swiotlb_free_coherent,
.map_page = swiotlb_map_page,
.unmap_page = swiotlb_unmap_page,
.map_sg = swiotlb_map_sg_attrs,
diff --git a/arch/x86/platform/goldfish/Makefile b/arch/x86/platform/goldfish/Makefile
index f030b53..1ce5bb2 100644
--- a/arch/x86/platform/goldfish/Makefile
+++ b/arch/x86/platform/goldfish/Makefile
@@ -1 +1 @@
-obj-$(CONFIG_GOLDFISH) += goldfish.o
+obj-$(CONFIG_GOLDFISH_BUS) += goldfish.o
diff --git a/arch/x86/realmode/rm/Makefile b/arch/x86/realmode/rm/Makefile
index 8869287..9cac825 100644
--- a/arch/x86/realmode/rm/Makefile
+++ b/arch/x86/realmode/rm/Makefile
@@ -73,9 +73,10 @@
-march=i386 -mregparm=3 \
-include $(srctree)/$(src)/../../boot/code16gcc.h \
-fno-strict-aliasing -fomit-frame-pointer -fno-pic \
+ -mno-mmx -mno-sse \
$(call cc-option, -ffreestanding) \
$(call cc-option, -fno-toplevel-reorder,\
- $(call cc-option, -fno-unit-at-a-time)) \
+ $(call cc-option, -fno-unit-at-a-time)) \
$(call cc-option, -fno-stack-protector) \
$(call cc-option, -mpreferred-stack-boundary=2)
KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c
index 6dd25ec..d45e602 100644
--- a/arch/xtensa/kernel/setup.c
+++ b/arch/xtensa/kernel/setup.c
@@ -170,8 +170,7 @@
__tagtable(BP_TAG_FDT, parse_tag_fdt);
-void __init early_init_dt_setup_initrd_arch(unsigned long start,
- unsigned long end)
+void __init early_init_dt_setup_initrd_arch(u64 start, u64 end)
{
initrd_start = (void *)__va(start);
initrd_end = (void *)__va(end);
diff --git a/drivers/acpi/acpi_platform.c b/drivers/acpi/acpi_platform.c
index fafec5d..33b5dec 100644
--- a/drivers/acpi/acpi_platform.c
+++ b/drivers/acpi/acpi_platform.c
@@ -16,6 +16,7 @@
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/dma-mapping.h>
#include <linux/platform_device.h>
#include "internal.h"
@@ -30,6 +31,28 @@
{ "PNP0D40" },
+ /* Android-specific virtual hardware (goldfish devices)
+ *
+ * Note 1: The sole purpose of adding the following ACPI IDs is to
+ * ensure that these goldfish devices are enumerated to the kernel as
+ * platform devices.
+ *
+ * Note 2: Once the Android emulator has adopted Linux kernel 3.16 or
+ * newer (it still uses 3.10 at the time of writing), these changes will
+ * become obsolete (in fact, this acpi_platform_device_ids array will
+ * disappear), since the new kernel will recognize devices enumerated
+ * via ACPI as platform devices by default.
+ *
+ * Note 3: See external/qemu-android/hw/i386/acpi-dsdt-goldfish.dsl in
+ * the Android source tree for the original definitions of these ACPI
+ * IDs in ASL.
+ */
+ { "GFSH0001" }, /* goldfish battery */
+ { "GFSH0002" }, /* goldfish events */
+ { "GFSH0003" }, /* android pipe */
+ { "GFSH0004" }, /* goldfish framebuffer */
+ { "GFSH0005" }, /* goldfish audio */
+
{ }
};
@@ -103,6 +126,7 @@
pdevinfo.res = resources;
pdevinfo.num_res = count;
pdevinfo.acpi_node.handle = adev->handle;
+ pdevinfo.dma_mask = DMA_BIT_MASK(32);
pdev = platform_device_register_full(&pdevinfo);
if (IS_ERR(pdev)) {
dev_err(&adev->dev, "platform device creation failed: %ld\n",
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index 9634800..4fe77b8 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -210,11 +210,9 @@
APIs extension; the file's descriptor can then be passed on to other
driver.
-config CMA
- bool "Contiguous Memory Allocator"
- depends on HAVE_DMA_CONTIGUOUS && HAVE_MEMBLOCK
- select MIGRATION
- select MEMORY_ISOLATION
+config DMA_CMA
+ bool "DMA Contiguous Memory Allocator"
+ depends on HAVE_DMA_CONTIGUOUS && CMA
help
This enables the Contiguous Memory Allocator which allows drivers
to allocate big physically-contiguous blocks of memory for use with
@@ -223,17 +221,7 @@
For more information see <include/linux/dma-contiguous.h>.
If unsure, say "n".
-if CMA
-
-config CMA_DEBUG
- bool "CMA debug messages (DEVELOPMENT)"
- depends on DEBUG_KERNEL
- help
- Turns on debug messages in CMA. This produces KERN_DEBUG
- messages for every CMA call as well as various messages while
- processing calls such as dma_alloc_from_contiguous().
- This option does not affect warning and error messages.
-
+if DMA_CMA
comment "Default contiguous memory area size:"
config CMA_SIZE_MBYTES
diff --git a/drivers/base/Makefile b/drivers/base/Makefile
index 4e22ce3..5d93bb5 100644
--- a/drivers/base/Makefile
+++ b/drivers/base/Makefile
@@ -6,7 +6,7 @@
attribute_container.o transport_class.o \
topology.o
obj-$(CONFIG_DEVTMPFS) += devtmpfs.o
-obj-$(CONFIG_CMA) += dma-contiguous.o
+obj-$(CONFIG_DMA_CMA) += dma-contiguous.o
obj-y += power/
obj-$(CONFIG_HAS_DMA) += dma-mapping.o
obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += dma-coherent.o
diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c
index 0ca5442..99802d6f 100644
--- a/drivers/base/dma-contiguous.c
+++ b/drivers/base/dma-contiguous.c
@@ -96,7 +96,7 @@
#endif
/**
- * dma_contiguous_reserve() - reserve area for contiguous memory handling
+ * dma_contiguous_reserve() - reserve area(s) for contiguous memory handling
* @limit: End address of the reserved memory (optional, 0 for any).
*
* This function reserves memory from early allocator. It should be
@@ -124,22 +124,29 @@
#endif
}
- if (selected_size) {
+ if (selected_size && !dma_contiguous_default_area) {
pr_debug("%s: reserving %ld MiB for global area\n", __func__,
(unsigned long)selected_size / SZ_1M);
- dma_declare_contiguous(NULL, selected_size, 0, limit);
+ dma_contiguous_reserve_area(selected_size, 0, limit,
+ &dma_contiguous_default_area);
}
};
static DEFINE_MUTEX(cma_mutex);
-static __init int cma_activate_area(unsigned long base_pfn, unsigned long count)
+static int __init cma_activate_area(struct cma *cma)
{
- unsigned long pfn = base_pfn;
- unsigned i = count >> pageblock_order;
+ int bitmap_size = BITS_TO_LONGS(cma->count) * sizeof(long);
+ unsigned long base_pfn = cma->base_pfn, pfn = base_pfn;
+ unsigned i = cma->count >> pageblock_order;
struct zone *zone;
+ cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
+
+ if (!cma->bitmap)
+ return -ENOMEM;
+
WARN_ON_ONCE(!pfn_valid(pfn));
zone = page_zone(pfn_to_page(pfn));
@@ -153,92 +160,53 @@
}
init_cma_reserved_pageblock(pfn_to_page(base_pfn));
} while (--i);
+
return 0;
}
-static __init struct cma *cma_create_area(unsigned long base_pfn,
- unsigned long count)
-{
- int bitmap_size = BITS_TO_LONGS(count) * sizeof(long);
- struct cma *cma;
- int ret = -ENOMEM;
-
- pr_debug("%s(base %08lx, count %lx)\n", __func__, base_pfn, count);
-
- cma = kmalloc(sizeof *cma, GFP_KERNEL);
- if (!cma)
- return ERR_PTR(-ENOMEM);
-
- cma->base_pfn = base_pfn;
- cma->count = count;
- cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
-
- if (!cma->bitmap)
- goto no_mem;
-
- ret = cma_activate_area(base_pfn, count);
- if (ret)
- goto error;
-
- pr_debug("%s: returned %p\n", __func__, (void *)cma);
- return cma;
-
-error:
- kfree(cma->bitmap);
-no_mem:
- kfree(cma);
- return ERR_PTR(ret);
-}
-
-static struct cma_reserved {
- phys_addr_t start;
- unsigned long size;
- struct device *dev;
-} cma_reserved[MAX_CMA_AREAS] __initdata;
-static unsigned cma_reserved_count __initdata;
+static struct cma cma_areas[MAX_CMA_AREAS];
+static unsigned cma_area_count;
static int __init cma_init_reserved_areas(void)
{
- struct cma_reserved *r = cma_reserved;
- unsigned i = cma_reserved_count;
+ int i;
- pr_debug("%s()\n", __func__);
-
- for (; i; --i, ++r) {
- struct cma *cma;
- cma = cma_create_area(PFN_DOWN(r->start),
- r->size >> PAGE_SHIFT);
- if (!IS_ERR(cma))
- dev_set_cma_area(r->dev, cma);
+ for (i = 0; i < cma_area_count; i++) {
+ int ret = cma_activate_area(&cma_areas[i]);
+ if (ret)
+ return ret;
}
+
return 0;
}
core_initcall(cma_init_reserved_areas);
/**
- * dma_declare_contiguous() - reserve area for contiguous memory handling
- * for particular device
- * @dev: Pointer to device structure.
- * @size: Size of the reserved memory.
- * @base: Start address of the reserved memory (optional, 0 for any).
+ * dma_contiguous_reserve_area() - reserve custom contiguous area
+ * @size: Size of the reserved area (in bytes),
+ * @base: Base address of the reserved area optional, use 0 for any
* @limit: End address of the reserved memory (optional, 0 for any).
+ * @res_cma: Pointer to store the created cma region.
*
- * This function reserves memory for specified device. It should be
- * called by board specific code when early allocator (memblock or bootmem)
- * is still activate.
+ * This function reserves memory from early allocator. It should be
+ * called by arch specific code once the early allocator (memblock or bootmem)
+ * has been activated and all other subsystems have already allocated/reserved
+ * memory. This function allows to create custom reserved areas for specific
+ * devices.
*/
-int __init dma_declare_contiguous(struct device *dev, phys_addr_t size,
- phys_addr_t base, phys_addr_t limit)
+int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
+ phys_addr_t limit, struct cma **res_cma)
{
- struct cma_reserved *r = &cma_reserved[cma_reserved_count];
+ struct cma *cma = &cma_areas[cma_area_count];
phys_addr_t alignment;
+ int ret = 0;
pr_debug("%s(size %lx, base %08lx, limit %08lx)\n", __func__,
(unsigned long)size, (unsigned long)base,
(unsigned long)limit);
/* Sanity checks */
- if (cma_reserved_count == ARRAY_SIZE(cma_reserved)) {
+ if (cma_area_count == ARRAY_SIZE(cma_areas)) {
pr_err("Not enough slots for CMA reserved regions!\n");
return -ENOSPC;
}
@@ -256,7 +224,7 @@
if (base) {
if (memblock_is_region_reserved(base, size) ||
memblock_reserve(base, size) < 0) {
- base = -EBUSY;
+ ret = -EBUSY;
goto err;
}
} else {
@@ -266,7 +234,7 @@
*/
phys_addr_t addr = __memblock_alloc_base(size, alignment, limit);
if (!addr) {
- base = -ENOMEM;
+ ret = -ENOMEM;
goto err;
} else {
base = addr;
@@ -277,10 +245,11 @@
* Each reserved area must be initialised later, when more kernel
* subsystems (like slab allocator) are available.
*/
- r->start = base;
- r->size = size;
- r->dev = dev;
- cma_reserved_count++;
+ cma->base_pfn = PFN_DOWN(base);
+ cma->count = size >> PAGE_SHIFT;
+ *res_cma = cma;
+ cma_area_count++;
+
pr_info("CMA: reserved %ld MiB at %08lx\n", (unsigned long)size / SZ_1M,
(unsigned long)base);
@@ -289,7 +258,7 @@
return 0;
err:
pr_err("CMA: failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M);
- return base;
+ return ret;
}
/**
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index 0357ac44..ea17c9a 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -42,7 +42,7 @@
config COMMON_CLK_VERSATILE
bool "Clock driver for ARM Reference designs"
- depends on ARCH_INTEGRATOR || ARCH_REALVIEW || ARCH_VEXPRESS
+ depends on ARCH_INTEGRATOR || ARCH_REALVIEW || ARCH_VEXPRESS || ARCH_LIONHEAD
---help---
Supports clocking on ARM Reference designs:
- Integrator/AP and Integrator/CP
diff --git a/drivers/clk/versatile/Makefile b/drivers/clk/versatile/Makefile
index c16ca78..96b95c1 100644
--- a/drivers/clk/versatile/Makefile
+++ b/drivers/clk/versatile/Makefile
@@ -2,6 +2,7 @@
obj-$(CONFIG_ICST) += clk-icst.o
obj-$(CONFIG_ARCH_INTEGRATOR) += clk-integrator.o
obj-$(CONFIG_INTEGRATOR_IMPD1) += clk-impd1.o
+obj-$(CONFIG_ARCH_LIONHEAD) += clk-lionhead.o clk-sp810.o
obj-$(CONFIG_ARCH_REALVIEW) += clk-realview.o
obj-$(CONFIG_ARCH_VEXPRESS) += clk-vexpress.o clk-sp810.o
obj-$(CONFIG_VEXPRESS_CONFIG) += clk-vexpress-osc.o
diff --git a/drivers/clk/versatile/clk-lionhead.c b/drivers/clk/versatile/clk-lionhead.c
new file mode 100644
index 0000000..4a4647a
--- /dev/null
+++ b/drivers/clk/versatile/clk-lionhead.c
@@ -0,0 +1,88 @@
+/*
+ * modified from clk-vexpress.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Copyright (C) 2012 ARM Limited
+ */
+
+#include <linux/amba/sp810.h>
+#include <linux/clkdev.h>
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/vexpress.h>
+
+static struct clk *lionhead_sp810_timerclken[4];
+static DEFINE_SPINLOCK(vexpress_sp810_lock);
+
+static void __init lionhead_sp810_init(void __iomem *base)
+{
+ int i;
+
+ if (WARN_ON(!base))
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(lionhead_sp810_timerclken); i++) {
+ char name[12];
+ const char *parents[] = {
+ "v2m:refclk32khz", /* REFCLK */
+ "v2m:refclk1mhz" /* TIMCLK */
+ };
+
+ snprintf(name, ARRAY_SIZE(name), "timerclken%d", i);
+
+ lionhead_sp810_timerclken[i] = clk_register_mux(NULL, name,
+ parents, 2, 0, base + SCCTRL,
+ SCCTRL_TIMERENnSEL_SHIFT(i), 1,
+ 0, &vexpress_sp810_lock);
+
+ if (WARN_ON(IS_ERR(lionhead_sp810_timerclken[i])))
+ break;
+ }
+}
+
+
+static const char * const lionhead_clk_24mhz_periphs[] __initconst = {
+ "mb:uart0", "mb:uart1", "mb:uart2", "mb:uart3",
+ "mb:kmi0", "mb:kmi1"
+};
+
+void __init lionhead_clk_init(void __iomem *sp810_base)
+{
+ struct clk *clk;
+ int i;
+
+ clk = clk_register_fixed_rate(NULL, "dummy_apb_pclk", NULL,
+ CLK_IS_ROOT, 0);
+ WARN_ON(clk_register_clkdev(clk, "apb_pclk", NULL));
+
+ clk = clk_register_fixed_rate(NULL, "v2m:clk_24mhz", NULL,
+ CLK_IS_ROOT, 24000000);
+ for (i = 0; i < ARRAY_SIZE(lionhead_clk_24mhz_periphs); i++)
+ WARN_ON(clk_register_clkdev(clk, NULL,
+ lionhead_clk_24mhz_periphs[i]));
+
+ clk = clk_register_fixed_rate(NULL, "v2m:refclk32khz", NULL,
+ CLK_IS_ROOT, 32768);
+ WARN_ON(clk_register_clkdev(clk, NULL, "v2m:wdt"));
+
+ clk = clk_register_fixed_rate(NULL, "v2m:refclk1mhz", NULL,
+ CLK_IS_ROOT, 1000000);
+
+ lionhead_sp810_init(sp810_base);
+
+ for (i = 0; i < ARRAY_SIZE(lionhead_sp810_timerclken); i++)
+ WARN_ON(clk_set_parent(lionhead_sp810_timerclken[i], clk));
+
+ WARN_ON(clk_register_clkdev(lionhead_sp810_timerclken[0],
+ "v2m-timer0", "sp804"));
+ WARN_ON(clk_register_clkdev(lionhead_sp810_timerclken[1],
+ "v2m-timer1", "sp804"));
+}
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index f151c6c..593485b0 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -67,6 +67,21 @@
bool
select CLKSRC_OF if OF
+config ARM_ARCH_TIMER_EVTSTREAM
+ bool "Support for ARM architected timer event stream generation"
+ default y if ARM_ARCH_TIMER
+ help
+ This option enables support for event stream generation based on
+ the ARM architected timer. It is used for waking up CPUs executing
+ the wfe instruction at a frequency represented as a power-of-2
+ divisor of the clock rate.
+ The main use of the event stream is wfe-based timeouts of userspace
+ locking implementations. It might also be useful for imposing timeout
+ on wfe to safeguard against any programming errors in case an expected
+ event is not generated.
+ This must be disabled for hardware validation purposes to detect any
+ hardware anomalies of missing events.
+
config CLKSRC_METAG_GENERIC
def_bool y if METAG
help
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index a2b2541..958591e 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -13,6 +13,7 @@
#include <linux/device.h>
#include <linux/smp.h>
#include <linux/cpu.h>
+#include <linux/cpu_pm.h>
#include <linux/clockchips.h>
#include <linux/interrupt.h>
#include <linux/of_irq.h>
@@ -123,6 +124,19 @@
return 0;
}
+static void arch_timer_configure_evtstream(void)
+{
+ int evt_stream_div, pos;
+
+ /* Find the closest power of two to the divisor */
+ evt_stream_div = arch_timer_rate / ARCH_TIMER_EVT_STREAM_FREQ;
+ pos = fls(evt_stream_div);
+ if (pos > 1 && !(evt_stream_div & (1 << (pos - 2))))
+ pos--;
+ /* enable event stream */
+ arch_timer_evtstrm_enable(min(pos, 15));
+}
+
static int __cpuinit arch_timer_setup(struct clock_event_device *clk)
{
clk->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_C3STOP;
@@ -154,6 +168,8 @@
}
arch_counter_set_user_access();
+ if (IS_ENABLED(CONFIG_ARM_ARCH_TIMER_EVTSTREAM))
+ arch_timer_configure_evtstream();
return 0;
}
@@ -186,27 +202,19 @@
return arch_timer_rate;
}
-/*
- * Some external users of arch_timer_read_counter (e.g. sched_clock) may try to
- * call it before it has been initialised. Rather than incur a performance
- * penalty checking for initialisation, provide a default implementation that
- * won't lead to time appearing to jump backwards.
- */
-static u64 arch_timer_read_zero(void)
+u64 arch_timer_read_counter(void)
{
- return 0;
+ return arch_counter_get_cntvct();
}
-u64 (*arch_timer_read_counter)(void) = arch_timer_read_zero;
-
static cycle_t arch_counter_read(struct clocksource *cs)
{
- return arch_timer_read_counter();
+ return arch_counter_get_cntvct();
}
static cycle_t arch_counter_read_cc(const struct cyclecounter *cc)
{
- return arch_timer_read_counter();
+ return arch_counter_get_cntvct();
}
static struct clocksource clocksource_counter = {
@@ -268,6 +276,33 @@
.notifier_call = arch_timer_cpu_notify,
};
+#ifdef CONFIG_CPU_PM
+static unsigned int saved_cntkctl;
+static int arch_timer_cpu_pm_notify(struct notifier_block *self,
+ unsigned long action, void *hcpu)
+{
+ if (action == CPU_PM_ENTER)
+ saved_cntkctl = arch_timer_get_cntkctl();
+ else if (action == CPU_PM_ENTER_FAILED || action == CPU_PM_EXIT)
+ arch_timer_set_cntkctl(saved_cntkctl);
+ return NOTIFY_OK;
+}
+
+static struct notifier_block arch_timer_cpu_pm_notifier = {
+ .notifier_call = arch_timer_cpu_pm_notify,
+};
+
+static int __init arch_timer_cpu_pm_init(void)
+{
+ return cpu_pm_register_notifier(&arch_timer_cpu_pm_notifier);
+}
+#else
+static int __init arch_timer_cpu_pm_init(void)
+{
+ return 0;
+}
+#endif
+
static int __init arch_timer_register(void)
{
int err;
@@ -287,7 +322,7 @@
cyclecounter.mult = clocksource_counter.mult;
cyclecounter.shift = clocksource_counter.shift;
timecounter_init(&timecounter, &cyclecounter,
- arch_counter_get_cntpct());
+ arch_counter_get_cntvct());
if (arch_timer_use_virtual) {
ppi = arch_timer_ppi[VIRT_PPI];
@@ -317,11 +352,17 @@
if (err)
goto out_free_irq;
+ err = arch_timer_cpu_pm_init();
+ if (err)
+ goto out_unreg_notify;
+
/* Immediately configure the timer on the boot CPU */
arch_timer_setup(this_cpu_ptr(arch_timer_evt));
return 0;
+out_unreg_notify:
+ unregister_cpu_notifier(&arch_timer_cpu_nb);
out_free_irq:
if (arch_timer_use_virtual)
free_percpu_irq(arch_timer_ppi[VIRT_PPI], arch_timer_evt);
@@ -376,11 +417,6 @@
}
}
- if (arch_timer_use_virtual)
- arch_timer_read_counter = arch_counter_get_cntvct;
- else
- arch_timer_read_counter = arch_counter_get_cntpct;
-
arch_timer_register();
arch_timer_arch_init();
}
diff --git a/drivers/input/keyboard/goldfish_events.c b/drivers/input/keyboard/goldfish_events.c
index 9f60a2e..90234eb 100644
--- a/drivers/input/keyboard/goldfish_events.c
+++ b/drivers/input/keyboard/goldfish_events.c
@@ -23,6 +23,7 @@
#include <linux/slab.h>
#include <linux/irq.h>
#include <linux/io.h>
+#include <linux/acpi.h>
enum {
REG_READ = 0x00,
@@ -179,11 +180,25 @@
return 0;
}
+static const struct of_device_id goldfish_events_of_match[] = {
+ { .compatible = "generic,goldfish-events-keypad", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, goldfish_events_of_match);
+
+static const struct acpi_device_id goldfish_events_acpi_match[] = {
+ { "GFSH0002", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, goldfish_events_acpi_match);
+
static struct platform_driver events_driver = {
.probe = events_probe,
.driver = {
.owner = THIS_MODULE,
.name = "goldfish_events",
+ .of_match_table = goldfish_events_of_match,
+ .acpi_match_table = ACPI_PTR(goldfish_events_acpi_match),
},
};
diff --git a/drivers/of/base.c b/drivers/of/base.c
index a6f584a..9b6cd2d0 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -18,6 +18,7 @@
* 2 of the License, or (at your option) any later version.
*/
#include <linux/ctype.h>
+#include <linux/cpu.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/spinlock.h>
@@ -230,6 +231,100 @@
}
EXPORT_SYMBOL(of_get_property);
+/*
+ * arch_match_cpu_phys_id - Match the given logical CPU and physical id
+ *
+ * @cpu: logical cpu index of a core/thread
+ * @phys_id: physical identifier of a core/thread
+ *
+ * CPU logical to physical index mapping is architecture specific.
+ * However this __weak function provides a default match of physical
+ * id to logical cpu index. phys_id provided here is usually values read
+ * from the device tree which must match the hardware internal registers.
+ *
+ * Returns true if the physical identifier and the logical cpu index
+ * correspond to the same core/thread, false otherwise.
+ */
+bool __weak arch_match_cpu_phys_id(int cpu, u64 phys_id)
+{
+ return (u32)phys_id == cpu;
+}
+
+/**
+ * Checks if the given "prop_name" property holds the physical id of the
+ * core/thread corresponding to the logical cpu 'cpu'. If 'thread' is not
+ * NULL, local thread number within the core is returned in it.
+ */
+static bool __of_find_n_match_cpu_property(struct device_node *cpun,
+ const char *prop_name, int cpu, unsigned int *thread)
+{
+ const __be32 *cell;
+ int ac, prop_len, tid;
+ u64 hwid;
+
+ ac = of_n_addr_cells(cpun);
+ cell = of_get_property(cpun, prop_name, &prop_len);
+ if (!cell)
+ return false;
+ prop_len /= sizeof(*cell);
+ for (tid = 0; tid < prop_len; tid++) {
+ hwid = of_read_number(cell, ac);
+ if (arch_match_cpu_phys_id(cpu, hwid)) {
+ if (thread)
+ *thread = tid;
+ return true;
+ }
+ cell += ac;
+ }
+ return false;
+}
+
+/**
+ * of_get_cpu_node - Get device node associated with the given logical CPU
+ *
+ * @cpu: CPU number(logical index) for which device node is required
+ * @thread: if not NULL, local thread number within the physical core is
+ * returned
+ *
+ * The main purpose of this function is to retrieve the device node for the
+ * given logical CPU index. It should be used to initialize the of_node in
+ * cpu device. Once of_node in cpu device is populated, all the further
+ * references can use that instead.
+ *
+ * CPU logical to physical index mapping is architecture specific and is built
+ * before booting secondary cores. This function uses arch_match_cpu_phys_id
+ * which can be overridden by architecture specific implementation.
+ *
+ * Returns a node pointer for the logical cpu if found, else NULL.
+ */
+struct device_node *of_get_cpu_node(int cpu, unsigned int *thread)
+{
+ struct device_node *cpun, *cpus;
+
+ cpus = of_find_node_by_path("/cpus");
+ if (!cpus) {
+ pr_warn("Missing cpus node, bailing out\n");
+ return NULL;
+ }
+
+ for_each_child_of_node(cpus, cpun) {
+ if (of_node_cmp(cpun->type, "cpu"))
+ continue;
+ /* Check for non-standard "ibm,ppc-interrupt-server#s" property
+ * for thread ids on PowerPC. If it doesn't exist fallback to
+ * standard "reg" property.
+ */
+ if (IS_ENABLED(CONFIG_PPC) &&
+ __of_find_n_match_cpu_property(cpun,
+ "ibm,ppc-interrupt-server#s", cpu, thread))
+ return cpun;
+ if (__of_find_n_match_cpu_property(cpun, "reg", cpu, thread))
+ return cpun;
+ }
+ return NULL;
+}
+EXPORT_SYMBOL(of_get_cpu_node);
+
/** Checks if the given "compat" string matches one of the strings in
* the device's "compatible" property
*/
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index 99a2f78..5da7421 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -550,7 +550,8 @@
*/
void __init early_init_dt_check_for_initrd(unsigned long node)
{
- unsigned long start, end, len;
+ u64 start, end;
+ unsigned long len;
__be32 *prop;
pr_debug("Looking for initrd properties... ");
@@ -558,15 +559,16 @@
prop = of_get_flat_dt_prop(node, "linux,initrd-start", &len);
if (!prop)
return;
- start = of_read_ulong(prop, len/4);
+ start = of_read_number(prop, len/4);
prop = of_get_flat_dt_prop(node, "linux,initrd-end", &len);
if (!prop)
return;
- end = of_read_ulong(prop, len/4);
+ end = of_read_number(prop, len/4);
early_init_dt_setup_initrd_arch(start, end);
- pr_debug("initrd_start=0x%lx initrd_end=0x%lx\n", start, end);
+ pr_debug("initrd_start=0x%llx initrd_end=0x%llx\n",
+ (unsigned long long)start, (unsigned long long)end);
}
#else
inline void early_init_dt_check_for_initrd(unsigned long node)
diff --git a/drivers/platform/Kconfig b/drivers/platform/Kconfig
index 69616ae..53c2802 100644
--- a/drivers/platform/Kconfig
+++ b/drivers/platform/Kconfig
@@ -1,7 +1,5 @@
if X86
source "drivers/platform/x86/Kconfig"
endif
-if GOLDFISH
source "drivers/platform/goldfish/Kconfig"
-endif
diff --git a/drivers/platform/goldfish/Kconfig b/drivers/platform/goldfish/Kconfig
index 635ef25..f4f00f7 100644
--- a/drivers/platform/goldfish/Kconfig
+++ b/drivers/platform/goldfish/Kconfig
@@ -1,5 +1,23 @@
+menuconfig GOLDFISH
+ bool "Platform support for Goldfish virtual devices"
+ depends on X86_32 || X86_64 || ARM || ARM64 || MIPS
+ ---help---
+ Say Y here to get to see options for the Goldfish virtual platform.
+ This option alone does not add any kernel code.
+
+ Unless you are building for the Android Goldfish emulator say N here.
+
+if GOLDFISH
+
+config GOLDFISH_BUS
+ tristate "Goldfish platform bus"
+ ---help---
+ This is a virtual bus to host Goldfish Android Virtual Devices.
+
config GOLDFISH_PIPE
tristate "Goldfish virtual device for QEMU pipes"
---help---
This is a virtual device to drive the QEMU pipe interface used by
the Goldfish Android Virtual Device.
+
+endif # GOLDFISH
diff --git a/drivers/platform/goldfish/Makefile b/drivers/platform/goldfish/Makefile
index a002239..d348712 100644
--- a/drivers/platform/goldfish/Makefile
+++ b/drivers/platform/goldfish/Makefile
@@ -1,5 +1,5 @@
#
# Makefile for Goldfish platform specific drivers
#
-obj-$(CONFIG_GOLDFISH) += pdev_bus.o
+obj-$(CONFIG_GOLDFISH_BUS) += pdev_bus.o
obj-$(CONFIG_GOLDFISH_PIPE) += goldfish_pipe.o
diff --git a/drivers/platform/goldfish/goldfish_pipe.c b/drivers/platform/goldfish/goldfish_pipe.c
index 4f5aa831..af93a56 100644
--- a/drivers/platform/goldfish/goldfish_pipe.c
+++ b/drivers/platform/goldfish/goldfish_pipe.c
@@ -2,6 +2,7 @@
* Copyright (C) 2011 Google, Inc.
* Copyright (C) 2012 Intel, Inc.
* Copyright (C) 2013 Intel, Inc.
+ * Copyright (C) 2014 Linaro Limited
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -56,6 +57,8 @@
#include <linux/bitops.h>
#include <linux/slab.h>
#include <linux/io.h>
+#include <linux/mm.h>
+#include <linux/acpi.h>
/*
* IMPORTANT: The following constants must match the ones used and defined
@@ -66,12 +69,19 @@
#define PIPE_REG_COMMAND 0x00 /* write: value = command */
#define PIPE_REG_STATUS 0x04 /* read */
#define PIPE_REG_CHANNEL 0x08 /* read/write: channel id */
+#ifdef CONFIG_64BIT
+#define PIPE_REG_CHANNEL_HIGH 0x30 /* read/write: channel id */
+#endif
#define PIPE_REG_SIZE 0x0c /* read/write: buffer size */
#define PIPE_REG_ADDRESS 0x10 /* write: physical address */
+#ifdef CONFIG_64BIT
+#define PIPE_REG_ADDRESS_HIGH 0x34 /* write: physical address */
+#endif
#define PIPE_REG_WAKES 0x14 /* read: wake flags */
#define PIPE_REG_PARAMS_ADDR_LOW 0x18 /* read/write: batch data address */
#define PIPE_REG_PARAMS_ADDR_HIGH 0x1c /* read/write: batch data address */
#define PIPE_REG_ACCESS_PARAMS 0x20 /* write: batch access */
+#define PIPE_REG_VERSION 0x24 /* read: device version */
/* list of commands for PIPE_REG_COMMAND */
#define CMD_OPEN 1 /* open new channel */
@@ -87,12 +97,6 @@
#define CMD_WRITE_BUFFER 4 /* send a user buffer to the emulator */
#define CMD_WAKE_ON_WRITE 5 /* tell the emulator to wake us when writing
is possible */
-
-/* The following commands are related to read operations, they must be
- * listed in the same order than the corresponding write ones, since we
- * will use (CMD_READ_BUFFER - CMD_WRITE_BUFFER) as a special offset
- * in goldfish_pipe_read_write() below.
- */
#define CMD_READ_BUFFER 6 /* receive a user buffer from the emulator */
#define CMD_WAKE_ON_READ 7 /* tell the emulator to wake us when reading
* is possible */
@@ -109,9 +113,9 @@
#define PIPE_WAKE_WRITE (1 << 2) /* pipe can now be written to */
struct access_params {
- u32 channel;
+ unsigned long channel;
u32 size;
- u32 address;
+ unsigned long address;
u32 cmd;
u32 result;
/* reserved for future extension */
@@ -127,6 +131,7 @@
unsigned char __iomem *base;
struct access_params *aps;
int irq;
+ u32 version;
};
static struct goldfish_pipe_dev pipe_dev[1];
@@ -155,7 +160,10 @@
struct goldfish_pipe_dev *dev = pipe->dev;
spin_lock_irqsave(&dev->lock, flags);
- writel((u32)pipe, dev->base + PIPE_REG_CHANNEL);
+ writel((u32)(u64)pipe, dev->base + PIPE_REG_CHANNEL);
+#ifdef CONFIG_64BIT
+ writel((u32)((u64)pipe >> 32), dev->base + PIPE_REG_CHANNEL_HIGH);
+#endif
writel(cmd, dev->base + PIPE_REG_COMMAND);
status = readl(dev->base + PIPE_REG_STATUS);
spin_unlock_irqrestore(&dev->lock, flags);
@@ -168,7 +176,10 @@
struct goldfish_pipe_dev *dev = pipe->dev;
spin_lock_irqsave(&dev->lock, flags);
- writel((u32)pipe, dev->base + PIPE_REG_CHANNEL);
+ writel((u32)(u64)pipe, dev->base + PIPE_REG_CHANNEL);
+#ifdef CONFIG_64BIT
+ writel((u32)((u64)pipe >> 32), dev->base + PIPE_REG_CHANNEL_HIGH);
+#endif
writel(cmd, dev->base + PIPE_REG_COMMAND);
spin_unlock_irqrestore(&dev->lock, flags);
}
@@ -258,19 +269,14 @@
return 0;
}
-/* This function is used for both reading from and writing to a given
- * pipe.
- */
static ssize_t goldfish_pipe_read_write(struct file *filp, char __user *buffer,
- size_t bufflen, int is_write)
+ size_t bufflen, int is_write)
{
unsigned long irq_flags;
struct goldfish_pipe *pipe = filp->private_data;
struct goldfish_pipe_dev *dev = pipe->dev;
- const int cmd_offset = is_write ? 0
- : (CMD_READ_BUFFER - CMD_WRITE_BUFFER);
unsigned long address, address_end;
- int ret = 0;
+ int count = 0, ret = -EINVAL;
/* If the emulator already closed the pipe, no need to go further */
if (test_bit(BIT_CLOSED_ON_HOST, &pipe->flags))
@@ -293,76 +299,108 @@
address_end = address + bufflen;
while (address < address_end) {
- unsigned long page_end = (address & PAGE_MASK) + PAGE_SIZE;
- unsigned long next = page_end < address_end ? page_end
- : address_end;
- unsigned long avail = next - address;
+ unsigned long page_end = (address & PAGE_MASK) + PAGE_SIZE;
+ unsigned long next = page_end < address_end ? page_end
+ : address_end;
+ unsigned long avail = next - address;
int status, wakeBit;
+ struct page *page;
- /* Ensure that the corresponding page is properly mapped */
- /* FIXME: this isn't safe or sufficient - use get_user_pages */
- if (is_write) {
- char c;
- /* Ensure that the page is mapped and readable */
- if (__get_user(c, (char __user *)address)) {
- if (!ret)
- ret = -EFAULT;
- break;
- }
+ /* Either vaddr or paddr depending on the device version */
+ unsigned long xaddr;
+
+ /*
+ * We grab the pages on a page-by-page basis in case user
+ * space gives us a potentially huge buffer but the read only
+ * returns a small amount, then there's no need to pin that
+ * much memory to the process.
+ */
+ down_read(¤t->mm->mmap_sem);
+ ret = get_user_pages(current, current->mm, address, 1,
+ !is_write, 0, &page, NULL);
+ up_read(¤t->mm->mmap_sem);
+ if (ret < 0)
+ return ret;
+
+ if (dev->version) {
+ /* Device version 1 or newer (qemu-android) expects the
+ * physical address. */
+ xaddr = page_to_phys(page) | (address & ~PAGE_MASK);
} else {
- /* Ensure that the page is mapped and writable */
- if (__put_user(0, (char __user *)address)) {
- if (!ret)
- ret = -EFAULT;
- break;
- }
+ /* Device version 0 (classic emulator) expects the
+ * virtual address. */
+ xaddr = address;
}
/* Now, try to transfer the bytes in the current page */
spin_lock_irqsave(&dev->lock, irq_flags);
- if (access_with_param(dev, CMD_WRITE_BUFFER + cmd_offset,
- address, avail, pipe, &status)) {
- writel((u32)pipe, dev->base + PIPE_REG_CHANNEL);
+ if (access_with_param(dev,
+ is_write ? CMD_WRITE_BUFFER : CMD_READ_BUFFER,
+ xaddr, avail, pipe, &status)) {
+ writel((u32)(u64)pipe, dev->base + PIPE_REG_CHANNEL);
+#ifdef CONFIG_64BIT
+ writel((u32)((u64)pipe >> 32), dev->base + PIPE_REG_CHANNEL_HIGH);
+#endif
writel(avail, dev->base + PIPE_REG_SIZE);
- writel(address, dev->base + PIPE_REG_ADDRESS);
- writel(CMD_WRITE_BUFFER + cmd_offset,
- dev->base + PIPE_REG_COMMAND);
+ writel(xaddr, dev->base + PIPE_REG_ADDRESS);
+#ifdef CONFIG_64BIT
+ writel((u32)((u64)xaddr >> 32), dev->base + PIPE_REG_ADDRESS_HIGH);
+#endif
+ writel(is_write ? CMD_WRITE_BUFFER : CMD_READ_BUFFER,
+ dev->base + PIPE_REG_COMMAND);
status = readl(dev->base + PIPE_REG_STATUS);
}
spin_unlock_irqrestore(&dev->lock, irq_flags);
+ if (status > 0 && !is_write)
+ set_page_dirty(page);
+ put_page(page);
+
if (status > 0) { /* Correct transfer */
- ret += status;
+ count += status;
address += status;
continue;
+ } else if (status == 0) { /* EOF */
+ ret = 0;
+ break;
+ } else if (status < 0 && count > 0) {
+ /*
+ * An error occured and we already transfered
+ * something on one of the previous pages.
+ * Just return what we already copied and log this
+ * err.
+ *
+ * Note: This seems like an incorrect approach but
+ * cannot change it until we check if any user space
+ * ABI relies on this behavior.
+ */
+ if (status != PIPE_ERROR_AGAIN)
+ pr_info_ratelimited("goldfish_pipe: backend returned error %d on %s\n",
+ status, is_write ? "write" : "read");
+ ret = 0;
+ break;
}
- if (status == 0) /* EOF */
- break;
-
- /* An error occured. If we already transfered stuff, just
- * return with its count. We expect the next call to return
- * an error code */
- if (ret > 0)
- break;
-
- /* If the error is not PIPE_ERROR_AGAIN, or if we are not in
- * non-blocking mode, just return the error code.
- */
+ /*
+ * If the error is not PIPE_ERROR_AGAIN, or if we are not in
+ * non-blocking mode, just return the error code.
+ */
if (status != PIPE_ERROR_AGAIN ||
(filp->f_flags & O_NONBLOCK) != 0) {
ret = goldfish_pipe_error_convert(status);
break;
}
- /* We will have to wait until more data/space is available.
- * First, mark the pipe as waiting for a specific wake signal.
- */
+ /*
+ * The backend blocked the read/write, wait until the backend
+ * tells us it's ready to process more data.
+ */
wakeBit = is_write ? BIT_WAKE_ON_WRITE : BIT_WAKE_ON_READ;
set_bit(wakeBit, &pipe->flags);
/* Tell the emulator we're going to wait for a wake event */
- goldfish_cmd(pipe, CMD_WAKE_ON_WRITE + cmd_offset);
+ goldfish_cmd(pipe,
+ is_write ? CMD_WAKE_ON_WRITE : CMD_WAKE_ON_READ);
/* Unlock the pipe, then wait for the wake signal */
mutex_unlock(&pipe->lock);
@@ -370,22 +408,29 @@
while (test_bit(wakeBit, &pipe->flags)) {
if (wait_event_interruptible(
pipe->wake_queue,
- !test_bit(wakeBit, &pipe->flags)))
- return -ERESTARTSYS;
+ !test_bit(wakeBit, &pipe->flags))) {
+ ret = -ERESTARTSYS;
+ break;
+ }
- if (test_bit(BIT_CLOSED_ON_HOST, &pipe->flags))
- return -EIO;
+ if (test_bit(BIT_CLOSED_ON_HOST, &pipe->flags)) {
+ ret = -EIO;
+ break;
+ }
}
/* Try to re-acquire the lock */
- if (mutex_lock_interruptible(&pipe->lock))
- return -ERESTARTSYS;
-
- /* Try the transfer again */
- continue;
+ if (mutex_lock_interruptible(&pipe->lock)) {
+ ret = -ERESTARTSYS;
+ break;
+ }
}
mutex_unlock(&pipe->lock);
- return ret;
+
+ if (ret < 0)
+ return ret;
+ else
+ return count;
}
static ssize_t goldfish_pipe_read(struct file *filp, char __user *buffer,
@@ -438,16 +483,25 @@
unsigned long irq_flags;
int count = 0;
- /* We're going to read from the emulator a list of (channel,flags)
- * pairs corresponding to the wake events that occured on each
- * blocked pipe (i.e. channel).
- */
+ /*
+ * We're going to read from the emulator a list of (channel,flags)
+ * pairs corresponding to the wake events that occured on each
+ * blocked pipe (i.e. channel).
+ */
spin_lock_irqsave(&dev->lock, irq_flags);
for (;;) {
/* First read the channel, 0 means the end of the list */
struct goldfish_pipe *pipe;
unsigned long wakes;
- unsigned long channel = readl(dev->base + PIPE_REG_CHANNEL);
+ unsigned long channel = 0;
+
+#ifdef CONFIG_64BIT
+ channel = (u64)readl(dev->base + PIPE_REG_CHANNEL_HIGH) << 32;
+
+ if (channel == 0)
+ break;
+#endif
+ channel |= readl(dev->base + PIPE_REG_CHANNEL);
if (channel == 0)
break;
@@ -584,6 +638,11 @@
goto error;
}
setup_access_params_addr(pdev, dev);
+
+ /* Although the pipe device in the classic Android emulator does not
+ * recognize the 'version' register, it won't treat this as an error
+ * either and will simply return 0, which is fine. */
+ dev->version = readl(dev->base + PIPE_REG_VERSION);
return 0;
error:
@@ -599,11 +658,26 @@
return 0;
}
+static const struct acpi_device_id goldfish_pipe_acpi_match[] = {
+ { "GFSH0003", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, goldfish_pipe_acpi_match);
+
+static const struct of_device_id goldfish_pipe_of_match[] = {
+ { .compatible = "generic,android-pipe", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, goldfish_pipe_of_match);
+
static struct platform_driver goldfish_pipe = {
.probe = goldfish_pipe_probe,
.remove = goldfish_pipe_remove,
.driver = {
- .name = "goldfish_pipe"
+ .name = "goldfish_pipe",
+ .owner = THIS_MODULE,
+ .of_match_table = goldfish_pipe_of_match,
+ .acpi_match_table = ACPI_PTR(goldfish_pipe_acpi_match),
}
};
diff --git a/drivers/platform/goldfish/pdev_bus.c b/drivers/platform/goldfish/pdev_bus.c
index 92cc4cf..eb25440 100644
--- a/drivers/platform/goldfish/pdev_bus.c
+++ b/drivers/platform/goldfish/pdev_bus.c
@@ -36,6 +36,7 @@
#define PDEV_BUS_IO_SIZE (0x14)
#define PDEV_BUS_IRQ (0x18)
#define PDEV_BUS_IRQ_COUNT (0x1c)
+#define PDEV_BUS_GET_NAME_HIGH (0x20)
struct pdev_bus_dev {
struct list_head list;
@@ -129,7 +130,10 @@
dev->pdev.dev.dma_mask = (void *)(dev->pdev.name + name_len + 1);
*dev->pdev.dev.dma_mask = ~0;
- writel((unsigned long)name, pdev_bus_base + PDEV_BUS_GET_NAME);
+#ifdef CONFIG_64BIT
+ writel((u32)((u64)name>>32), pdev_bus_base + PDEV_BUS_GET_NAME_HIGH);
+#endif
+ writel((u32)(u64)name, pdev_bus_base + PDEV_BUS_GET_NAME);
name[name_len] = '\0';
dev->pdev.id = readl(pdev_bus_base + PDEV_BUS_ID);
dev->pdev.resource[0].start = base;
@@ -184,11 +188,6 @@
pdev_bus_addr = r->start;
pdev_bus_len = resource_size(r);
- if (request_mem_region(pdev_bus_addr, pdev_bus_len, "goldfish")) {
- dev_err(&pdev->dev, "unable to reserve Goldfish MMIO.\n");
- return -EBUSY;
- }
-
pdev_bus_base = ioremap(pdev_bus_addr, pdev_bus_len);
if (pdev_bus_base == NULL) {
ret = -ENOMEM;
diff --git a/drivers/power/goldfish_battery.c b/drivers/power/goldfish_battery.c
index 29eba88..486949d 100644
--- a/drivers/power/goldfish_battery.c
+++ b/drivers/power/goldfish_battery.c
@@ -24,6 +24,7 @@
#include <linux/pci.h>
#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/acpi.h>
struct goldfish_battery_data {
void __iomem *reg_base;
@@ -222,11 +223,25 @@
return 0;
}
+static const struct of_device_id goldfish_battery_of_match[] = {
+ { .compatible = "generic,goldfish-battery", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, goldfish_battery_of_match);
+
+static const struct acpi_device_id goldfish_battery_acpi_match[] = {
+ { "GFSH0001", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, goldfish_battery_acpi_match);
+
static struct platform_driver goldfish_battery_device = {
.probe = goldfish_battery_probe,
.remove = goldfish_battery_remove,
.driver = {
- .name = "goldfish-battery"
+ .name = "goldfish-battery",
+ .of_match_table = goldfish_battery_of_match,
+ .acpi_match_table = ACPI_PTR(goldfish_battery_acpi_match),
}
};
module_platform_driver(goldfish_battery_device);
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index b983813..8817427 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -1026,6 +1026,12 @@
This driver can also be built as a module. If so, the module
will be called rtc-bfin.
+config RTC_DRV_GOLDFISH
+ tristate "GOLDFISH"
+ depends on GOLDFISH
+ help
+ RTC driver for Goldfish Virtual Platform
+
config RTC_DRV_RS5C313
tristate "Ricoh RS5C313"
depends on SH_LANDISK
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index c33f86f..14f1c95 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -53,6 +53,7 @@
obj-$(CONFIG_RTC_DRV_EP93XX) += rtc-ep93xx.o
obj-$(CONFIG_RTC_DRV_FM3130) += rtc-fm3130.o
obj-$(CONFIG_RTC_DRV_GENERIC) += rtc-generic.o
+obj-$(CONFIG_RTC_DRV_GOLDFISH) += rtc-goldfish.o
obj-$(CONFIG_RTC_DRV_HID_SENSOR_TIME) += rtc-hid-sensor-time.o
obj-$(CONFIG_RTC_DRV_IMXDI) += rtc-imxdi.o
obj-$(CONFIG_RTC_DRV_ISL1208) += rtc-isl1208.o
diff --git a/drivers/rtc/rtc-goldfish.c b/drivers/rtc/rtc-goldfish.c
new file mode 100644
index 0000000..829d1c7
--- /dev/null
+++ b/drivers/rtc/rtc-goldfish.c
@@ -0,0 +1,156 @@
+/* drivers/rtc/rtc-goldfish.c
+**
+** Copyright (C) 2007 Google, Inc.
+**
+** This software is licensed under the terms of the GNU General Public
+** License version 2, as published by the Free Software Foundation, and
+** may be copied, distributed, and modified under those terms.
+**
+** This program is distributed in the hope that it will be useful,
+** but WITHOUT ANY WARRANTY; without even the implied warranty of
+** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+** GNU General Public License for more details.
+**
+*/
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/platform_device.h>
+#include <linux/rtc.h>
+#include <linux/export.h>
+#include <linux/slab.h>
+#include <asm/io.h>
+
+enum {
+ TIMER_TIME_LOW = 0x00, // get low bits of current time and update TIMER_TIME_HIGH
+ TIMER_TIME_HIGH = 0x04, // get high bits of time at last TIMER_TIME_LOW read
+ TIMER_ALARM_LOW = 0x08, // set low bits of alarm and activate it
+ TIMER_ALARM_HIGH = 0x0c, // set high bits of next alarm
+ TIMER_CLEAR_INTERRUPT = 0x10,
+ TIMER_CLEAR_ALARM = 0x14
+};
+
+struct goldfish_rtc {
+ void __iomem *base;
+ uint32_t irq;
+ struct rtc_device *rtc;
+};
+
+static irqreturn_t
+goldfish_rtc_interrupt(int irq, void *dev_id)
+{
+ struct goldfish_rtc *qrtc = dev_id;
+ unsigned long events = 0;
+ void __iomem *base = qrtc->base;
+
+ writel(1, base + TIMER_CLEAR_INTERRUPT);
+ events = RTC_IRQF | RTC_AF;
+
+ rtc_update_irq(qrtc->rtc, 1, events);
+
+ return IRQ_HANDLED;
+}
+
+static int goldfish_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+ int64_t time;
+ struct goldfish_rtc *qrtc = platform_get_drvdata(to_platform_device(dev));
+ void __iomem *base = qrtc->base;
+
+ time = readl(base + TIMER_TIME_LOW);
+ time |= (int64_t)readl(base + TIMER_TIME_HIGH) << 32;
+ do_div(time, NSEC_PER_SEC);
+
+ rtc_time_to_tm(time, tm);
+ return 0;
+}
+
+static struct rtc_class_ops goldfish_rtc_ops = {
+// .ioctl = goldfish_rtc_ioctl,
+ .read_time = goldfish_rtc_read_time,
+// .set_time = goldfish_rtc_set_time,
+// .read_alarm = goldfish_rtc_read_alarm,
+// .set_alarm = goldfish_rtc_set_alarm,
+};
+
+
+static int goldfish_rtc_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct resource *r;
+ struct goldfish_rtc *qrtc;
+ unsigned long rtc_dev_len;
+ unsigned long rtc_dev_addr;
+
+ qrtc = kzalloc(sizeof(*qrtc), GFP_KERNEL);
+ if(qrtc == NULL) {
+ ret = -ENOMEM;
+ goto err_qrtc_alloc_failed;
+ }
+ platform_set_drvdata(pdev, qrtc);
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if(r == NULL) {
+ ret = -ENODEV;
+ goto err_no_io_base;
+ }
+
+ rtc_dev_addr = r->start;
+ rtc_dev_len = resource_size(r);
+ qrtc->base = ioremap(rtc_dev_addr, rtc_dev_len);
+ qrtc->irq = platform_get_irq(pdev, 0);
+ if(qrtc->irq < 0) {
+ ret = -ENODEV;
+ goto err_no_irq;
+ }
+ qrtc->rtc = rtc_device_register(pdev->name, &pdev->dev,
+ &goldfish_rtc_ops, THIS_MODULE);
+ if (IS_ERR(qrtc->rtc)) {
+ ret = PTR_ERR(qrtc->rtc);
+ goto err_rtc_device_register_failed;
+ }
+
+ ret = request_irq(qrtc->irq, goldfish_rtc_interrupt, 0, pdev->name, qrtc);
+ if(ret)
+ goto request_irq;
+
+ return 0;
+
+ free_irq(qrtc->irq, qrtc);
+request_irq:
+ rtc_device_unregister(qrtc->rtc);
+err_rtc_device_register_failed:
+err_no_irq:
+err_no_io_base:
+ kfree(qrtc);
+err_qrtc_alloc_failed:
+ return ret;
+}
+
+static int goldfish_rtc_remove(struct platform_device *pdev)
+{
+ struct goldfish_rtc *qrtc = platform_get_drvdata(pdev);
+ free_irq(qrtc->irq, qrtc);
+ rtc_device_unregister(qrtc->rtc);
+ kfree(qrtc);
+ return 0;
+}
+
+static const struct of_device_id goldfish_rtc_of_match[] = {
+ { .compatible = "generic,goldfish-rtc", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, goldfish_rtc_of_match);
+
+static struct platform_driver goldfish_rtc = {
+ .probe = goldfish_rtc_probe,
+ .remove = goldfish_rtc_remove,
+ .driver = {
+ .name = "goldfish_rtc",
+ .of_match_table = goldfish_rtc_of_match,
+ }
+};
+
+module_platform_driver(goldfish_rtc);
+
diff --git a/drivers/staging/goldfish/goldfish_audio.c b/drivers/staging/goldfish/goldfish_audio.c
index d3bed21f..2655454 100644
--- a/drivers/staging/goldfish/goldfish_audio.c
+++ b/drivers/staging/goldfish/goldfish_audio.c
@@ -25,6 +25,8 @@
#include <linux/sched.h>
#include <linux/dma-mapping.h>
#include <linux/uaccess.h>
+#include <linux/slab.h>
+#include <linux/acpi.h>
MODULE_AUTHOR("Google, Inc.");
MODULE_DESCRIPTION("Android QEMU Audio Driver");
@@ -74,11 +76,18 @@
/* set number of bytes in buffer to write */
AUDIO_WRITE_BUFFER_1 = 0x10,
AUDIO_WRITE_BUFFER_2 = 0x14,
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ AUDIO_SET_WRITE_BUFFER_1_HIGH = 0x28,
+ AUDIO_SET_WRITE_BUFFER_2_HIGH = 0x30,
+#endif
/* true if audio input is supported */
AUDIO_READ_SUPPORTED = 0x18,
/* buffer to use for audio input */
AUDIO_SET_READ_BUFFER = 0x1C,
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ AUDIO_SET_READ_BUFFER_HIGH = 0x34,
+#endif
/* driver writes number of bytes to read */
AUDIO_START_READ = 0x20,
@@ -258,6 +267,9 @@
struct resource *r;
struct goldfish_audio *data;
dma_addr_t buf_addr;
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ u32 buf_addr_high, buf_addr_low;
+#endif
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (data == NULL) {
@@ -313,6 +325,28 @@
goto err_misc_register_failed;
}
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ buf_addr_low = (u32)(buf_addr);
+ buf_addr_high = (u32)((buf_addr) >> 32);
+
+ AUDIO_WRITE(data, AUDIO_SET_WRITE_BUFFER_1, buf_addr_low);
+ AUDIO_WRITE(data, AUDIO_SET_WRITE_BUFFER_1_HIGH, buf_addr_high);
+
+ buf_addr_low = (u32)(buf_addr + WRITE_BUFFER_SIZE);
+ buf_addr_high = (u32)((buf_addr + WRITE_BUFFER_SIZE) >> 32);
+
+ AUDIO_WRITE(data, AUDIO_SET_WRITE_BUFFER_2, buf_addr_low);
+ AUDIO_WRITE(data, AUDIO_SET_WRITE_BUFFER_2_HIGH, buf_addr_high);
+
+ buf_addr_low = (u32)(buf_addr + 2 * WRITE_BUFFER_SIZE);
+ buf_addr_high = (u32)((buf_addr + 2 * WRITE_BUFFER_SIZE) >> 32);
+
+ data->read_supported = AUDIO_READ(data, AUDIO_READ_SUPPORTED);
+ if (data->read_supported){
+ AUDIO_WRITE(data, AUDIO_SET_READ_BUFFER, buf_addr_low);
+ AUDIO_WRITE(data, AUDIO_SET_READ_BUFFER_HIGH, buf_addr_high);
+ }
+#else
AUDIO_WRITE(data, AUDIO_SET_WRITE_BUFFER_1, buf_addr);
AUDIO_WRITE(data, AUDIO_SET_WRITE_BUFFER_2,
buf_addr + WRITE_BUFFER_SIZE);
@@ -321,6 +355,7 @@
if (data->read_supported)
AUDIO_WRITE(data, AUDIO_SET_READ_BUFFER,
buf_addr + 2 * WRITE_BUFFER_SIZE);
+#endif
audio_data = data;
return 0;
@@ -352,11 +387,25 @@
return 0;
}
+static const struct of_device_id goldfish_audio_of_match[] = {
+ { .compatible = "generic,goldfish-audio", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, goldfish_audio_of_match);
+
+static const struct acpi_device_id goldfish_audio_acpi_match[] = {
+ { "GFSH0005", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, goldfish_audio_acpi_match);
+
static struct platform_driver goldfish_audio_driver = {
.probe = goldfish_audio_probe,
.remove = goldfish_audio_remove,
.driver = {
- .name = "goldfish_audio"
+ .name = "goldfish_audio",
+ .of_match_table = goldfish_audio_of_match,
+ .acpi_match_table = ACPI_PTR(goldfish_audio_acpi_match),
}
};
diff --git a/drivers/staging/goldfish/goldfish_nand.c b/drivers/staging/goldfish/goldfish_nand.c
index ab1f019..5e165a8 100644
--- a/drivers/staging/goldfish/goldfish_nand.c
+++ b/drivers/staging/goldfish/goldfish_nand.c
@@ -67,7 +67,7 @@
cps->addr_high = (u32)(addr >> 32);
cps->addr_low = (u32)addr;
cps->transfer_size = len;
- cps->data = (u32)ptr;
+ cps->data = (unsigned long)ptr;
writel(cmdp, base + NAND_COMMAND);
*rv = cps->result;
return 0;
@@ -88,6 +88,9 @@
writel((u32)addr, base + NAND_ADDR_LOW);
writel(len, base + NAND_TRANSFER_SIZE);
writel((u32)ptr, base + NAND_DATA);
+#ifdef CONFIG_64BIT
+ writel((u32)((u64)ptr >> 32), base + NAND_DATA_HIGH);
+#endif
writel(cmd, base + NAND_COMMAND);
rv = readl(base + NAND_RESULT);
}
@@ -200,8 +203,6 @@
if (from + len > mtd->size)
goto invalid_arg;
- if (len != mtd->writesize)
- goto invalid_arg;
rem = do_div(from, mtd->writesize);
if (rem)
@@ -224,8 +225,6 @@
if (to + len > mtd->size)
goto invalid_arg;
- if (len != mtd->writesize)
- goto invalid_arg;
rem = do_div(to, mtd->writesize);
if (rem)
diff --git a/drivers/staging/goldfish/goldfish_nand_reg.h b/drivers/staging/goldfish/goldfish_nand_reg.h
index 956c6c3..39eb6ef 100644
--- a/drivers/staging/goldfish/goldfish_nand_reg.h
+++ b/drivers/staging/goldfish/goldfish_nand_reg.h
@@ -54,6 +54,9 @@
NAND_RESULT = 0x040,
NAND_COMMAND = 0x044,
NAND_DATA = 0x048,
+#ifdef CONFIG_64BIT
+ NAND_DATA_HIGH = 0x100,
+#endif
NAND_TRANSFER_SIZE = 0x04c,
NAND_ADDR_LOW = 0x050,
NAND_ADDR_HIGH = 0x054,
@@ -66,7 +69,7 @@
uint32_t addr_low;
uint32_t addr_high;
uint32_t transfer_size;
- uint32_t data;
+ unsigned long data;
uint32_t result;
};
#endif
diff --git a/drivers/tty/goldfish.c b/drivers/tty/goldfish.c
index f17d2e4..ac8462b 100644
--- a/drivers/tty/goldfish.c
+++ b/drivers/tty/goldfish.c
@@ -22,6 +22,7 @@
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/module.h>
+#include <linux/mm.h>
enum {
GOLDFISH_TTY_PUT_CHAR = 0x00,
@@ -30,6 +31,11 @@
GOLDFISH_TTY_DATA_PTR = 0x10,
GOLDFISH_TTY_DATA_LEN = 0x14,
+#ifdef CONFIG_64BIT
+ GOLDFISH_TTY_DATA_PTR_HIGH = 0x18,
+#endif
+
+ GOLDFISH_TTY_VERSION = 0x20,
GOLDFISH_TTY_CMD_INT_DISABLE = 0,
GOLDFISH_TTY_CMD_INT_ENABLE = 1,
@@ -44,6 +50,7 @@
u32 irq;
int opencount;
struct console console;
+ u32 version;
};
static DEFINE_MUTEX(goldfish_tty_lock);
@@ -52,24 +59,77 @@
static u32 goldfish_tty_current_line_count;
static struct goldfish_tty *goldfish_ttys;
-static void goldfish_tty_do_write(int line, const char *buf, unsigned count)
+static inline void do_rw_io(struct goldfish_tty *qtty,
+ unsigned long address,
+ unsigned count, int is_write)
{
unsigned long irq_flags;
- struct goldfish_tty *qtty = &goldfish_ttys[line];
void __iomem *base = qtty->base;
+
spin_lock_irqsave(&qtty->lock, irq_flags);
- writel((u32)buf, base + GOLDFISH_TTY_DATA_PTR);
+ writel((u32)address, base + GOLDFISH_TTY_DATA_PTR);
+#ifdef CONFIG_64BIT
+ writel((u32)((u64)address >> 32), base + GOLDFISH_TTY_DATA_PTR_HIGH);
+#endif
writel(count, base + GOLDFISH_TTY_DATA_LEN);
- writel(GOLDFISH_TTY_CMD_WRITE_BUFFER, base + GOLDFISH_TTY_CMD);
+
+ if (is_write)
+ writel(GOLDFISH_TTY_CMD_WRITE_BUFFER, base + GOLDFISH_TTY_CMD);
+ else
+ writel(GOLDFISH_TTY_CMD_READ_BUFFER, base + GOLDFISH_TTY_CMD);
+
spin_unlock_irqrestore(&qtty->lock, irq_flags);
}
+static inline void goldfish_tty_rw(struct goldfish_tty *qtty,
+ unsigned long address,
+ unsigned count, int is_write)
+{
+ if (qtty->version) {
+ /* Goldfish TTY for Ranchu
+ * platform uses physical addresses
+ */
+ unsigned long address_end = address + count;
+ while (address < address_end) {
+ unsigned long page_end = (address & PAGE_MASK) + PAGE_SIZE;
+ unsigned long next = page_end < address_end ? page_end
+ : address_end;
+ unsigned long avail = next - address;
+ unsigned long paddr;
+ struct page *page;
+
+ /*
+ * We go through a buffer on a page-by-page
+ * basis in case of potentially huge buffer.
+ */
+ page = virt_to_page((void *)address);
+ paddr = page_to_phys(page) | (address & ~PAGE_MASK);
+ do_rw_io(qtty, paddr, avail, is_write);
+ address += avail;
+ }
+ } else {
+ /* Old style Goldfish TTY used
+ * on the Goldfish platform uses
+ * virtual addresses
+ */
+ do_rw_io(qtty, address, count, is_write);
+ }
+
+}
+
+static void goldfish_tty_do_write(int line, const char *buf, unsigned count)
+{
+ struct goldfish_tty *qtty = &goldfish_ttys[line];
+ unsigned long address = (unsigned long)(void *)buf;
+
+ goldfish_tty_rw(qtty, address, count, 1);
+}
+
static irqreturn_t goldfish_tty_interrupt(int irq, void *dev_id)
{
- struct platform_device *pdev = dev_id;
- struct goldfish_tty *qtty = &goldfish_ttys[pdev->id];
+ struct goldfish_tty *qtty = dev_id;
void __iomem *base = qtty->base;
- unsigned long irq_flags;
+ unsigned long address;
unsigned char *buf;
u32 count;
@@ -78,11 +138,10 @@
return IRQ_NONE;
count = tty_prepare_flip_string(&qtty->port, &buf, count);
- spin_lock_irqsave(&qtty->lock, irq_flags);
- writel((u32)buf, base + GOLDFISH_TTY_DATA_PTR);
- writel(count, base + GOLDFISH_TTY_DATA_LEN);
- writel(GOLDFISH_TTY_CMD_READ_BUFFER, base + GOLDFISH_TTY_CMD);
- spin_unlock_irqrestore(&qtty->lock, irq_flags);
+
+ address = (unsigned long)(void *)buf;
+ goldfish_tty_rw(qtty, address, count, 0);
+
tty_schedule_flip(&qtty->port);
return IRQ_HANDLED;
}
@@ -224,6 +283,7 @@
struct device *ttydev;
void __iomem *base;
u32 irq;
+ unsigned int line;
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if(r == NULL)
@@ -239,10 +299,16 @@
irq = r->start;
- if(pdev->id >= goldfish_tty_line_count)
- goto err_unmap;
-
mutex_lock(&goldfish_tty_lock);
+
+ if (pdev->id == PLATFORM_DEVID_NONE)
+ line = goldfish_tty_current_line_count;
+ else
+ line = pdev->id;
+
+ if(line >= goldfish_tty_line_count)
+ goto err_create_driver_failed;
+
if(goldfish_tty_current_line_count == 0) {
ret = goldfish_tty_create_driver();
if(ret)
@@ -250,22 +316,30 @@
}
goldfish_tty_current_line_count++;
- qtty = &goldfish_ttys[pdev->id];
+ qtty = &goldfish_ttys[line];
spin_lock_init(&qtty->lock);
tty_port_init(&qtty->port);
qtty->port.ops = &goldfish_port_ops;
qtty->base = base;
qtty->irq = irq;
+ /* Goldfish TTY device used by the Goldfish emulator
+ * should identify itself with 0, forcing the driver
+ * to use virtual addresses. Goldfish TTY device
+ * on Ranchu emulator (qemu2) returns 1 here and
+ * driver will use physical addresses.
+ */
+ qtty->version = readl(base + GOLDFISH_TTY_VERSION);
+
writel(GOLDFISH_TTY_CMD_INT_DISABLE, base + GOLDFISH_TTY_CMD);
- ret = request_irq(irq, goldfish_tty_interrupt, IRQF_SHARED, "goldfish_tty", pdev);
+ ret = request_irq(irq, goldfish_tty_interrupt, IRQF_SHARED, "goldfish_tty", qtty);
if(ret)
goto err_request_irq_failed;
ttydev = tty_port_register_device(&qtty->port, goldfish_tty_driver,
- pdev->id, &pdev->dev);
+ line, &pdev->dev);
if(IS_ERR(ttydev)) {
ret = PTR_ERR(ttydev);
goto err_tty_register_device_failed;
@@ -276,8 +350,9 @@
qtty->console.device = goldfish_tty_console_device;
qtty->console.setup = goldfish_tty_console_setup;
qtty->console.flags = CON_PRINTBUFFER;
- qtty->console.index = pdev->id;
+ qtty->console.index = line;
register_console(&qtty->console);
+ platform_set_drvdata(pdev, qtty);
mutex_unlock(&goldfish_tty_lock);
return 0;
@@ -298,13 +373,12 @@
static int goldfish_tty_remove(struct platform_device *pdev)
{
- struct goldfish_tty *qtty;
+ struct goldfish_tty *qtty = platform_get_drvdata(pdev);
mutex_lock(&goldfish_tty_lock);
- qtty = &goldfish_ttys[pdev->id];
unregister_console(&qtty->console);
- tty_unregister_device(goldfish_tty_driver, pdev->id);
+ tty_unregister_device(goldfish_tty_driver, qtty->console.index);
iounmap(qtty->base);
qtty->base = 0;
free_irq(qtty->irq, pdev);
@@ -315,11 +389,19 @@
return 0;
}
+static const struct of_device_id goldfish_tty_of_match[] = {
+ { .compatible = "generic,goldfish-tty", },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, goldfish_tty_of_match);
+
static struct platform_driver goldfish_tty_platform_driver = {
.probe = goldfish_tty_probe,
.remove = goldfish_tty_remove,
.driver = {
- .name = "goldfish_tty"
+ .name = "goldfish_tty",
+ .of_match_table = goldfish_tty_of_match,
}
};
diff --git a/drivers/tty/serial/8250/8250_early.c b/drivers/tty/serial/8250/8250_early.c
index 721904f..e83c9db 100644
--- a/drivers/tty/serial/8250/8250_early.c
+++ b/drivers/tty/serial/8250/8250_early.c
@@ -35,18 +35,8 @@
#include <linux/serial_8250.h>
#include <asm/io.h>
#include <asm/serial.h>
-#ifdef CONFIG_FIX_EARLYCON_MEM
-#include <asm/pgtable.h>
-#include <asm/fixmap.h>
-#endif
-struct early_serial8250_device {
- struct uart_port port;
- char options[16]; /* e.g., 115200n8 */
- unsigned int baud;
-};
-
-static struct early_serial8250_device early_device;
+static struct earlycon_device *early_device;
unsigned int __weak __init serial8250_early_in(struct uart_port *port, int offset)
{
@@ -100,7 +90,7 @@
static void __init early_serial8250_write(struct console *console,
const char *s, unsigned int count)
{
- struct uart_port *port = &early_device.port;
+ struct uart_port *port = &early_device->port;
unsigned int ier;
/* Save the IER and disable interrupts */
@@ -129,7 +119,7 @@
return (port->uartclk / 16) / quot;
}
-static void __init init_port(struct early_serial8250_device *device)
+static void __init init_port(struct earlycon_device *device)
{
struct uart_port *port = &device->port;
unsigned int divisor;
@@ -148,127 +138,32 @@
serial8250_early_out(port, UART_LCR, c & ~UART_LCR_DLAB);
}
-static int __init parse_options(struct early_serial8250_device *device,
- char *options)
+static int __init early_serial8250_setup(struct earlycon_device *device,
+ const char *options)
{
- struct uart_port *port = &device->port;
- int mmio, mmio32, length;
-
- if (!options)
- return -ENODEV;
-
- port->uartclk = BASE_BAUD * 16;
-
- mmio = !strncmp(options, "mmio,", 5);
- mmio32 = !strncmp(options, "mmio32,", 7);
- if (mmio || mmio32) {
- port->iotype = (mmio ? UPIO_MEM : UPIO_MEM32);
- port->mapbase = simple_strtoul(options + (mmio ? 5 : 7),
- &options, 0);
- if (mmio32)
- port->regshift = 2;
-#ifdef CONFIG_FIX_EARLYCON_MEM
- set_fixmap_nocache(FIX_EARLYCON_MEM_BASE,
- port->mapbase & PAGE_MASK);
- port->membase =
- (void __iomem *)__fix_to_virt(FIX_EARLYCON_MEM_BASE);
- port->membase += port->mapbase & ~PAGE_MASK;
-#else
- port->membase = ioremap_nocache(port->mapbase, 64);
- if (!port->membase) {
- printk(KERN_ERR "%s: Couldn't ioremap 0x%llx\n",
- __func__,
- (unsigned long long) port->mapbase);
- return -ENOMEM;
- }
-#endif
- } else if (!strncmp(options, "io,", 3)) {
- port->iotype = UPIO_PORT;
- port->iobase = simple_strtoul(options + 3, &options, 0);
- mmio = 0;
- } else
- return -EINVAL;
-
- options = strchr(options, ',');
- if (options) {
- options++;
- device->baud = simple_strtoul(options, NULL, 0);
- length = min(strcspn(options, " "), sizeof(device->options));
- strlcpy(device->options, options, length);
- } else {
- device->baud = probe_baud(port);
- snprintf(device->options, sizeof(device->options), "%u",
- device->baud);
- }
-
- if (mmio || mmio32)
- printk(KERN_INFO
- "Early serial console at MMIO%s 0x%llx (options '%s')\n",
- mmio32 ? "32" : "",
- (unsigned long long)port->mapbase,
- device->options);
- else
- printk(KERN_INFO
- "Early serial console at I/O port 0x%lx (options '%s')\n",
- port->iobase,
- device->options);
-
- return 0;
-}
-
-static struct console early_serial8250_console __initdata = {
- .name = "uart",
- .write = early_serial8250_write,
- .flags = CON_PRINTBUFFER | CON_BOOT,
- .index = -1,
-};
-
-static int __init early_serial8250_setup(char *options)
-{
- struct early_serial8250_device *device = &early_device;
- int err;
-
- if (device->port.membase || device->port.iobase)
+ if (!(device->port.membase || device->port.iobase))
return 0;
- err = parse_options(device, options);
- if (err < 0)
- return err;
+ if (!device->baud)
+ device->baud = probe_baud(&device->port);
init_port(device);
+
+ early_device = device;
+ device->con->write = early_serial8250_write;
return 0;
}
-
-int __init setup_early_serial8250_console(char *cmdline)
-{
- char *options;
- int err;
-
- options = strstr(cmdline, "uart8250,");
- if (!options) {
- options = strstr(cmdline, "uart,");
- if (!options)
- return 0;
- }
-
- options = strchr(cmdline, ',') + 1;
- err = early_serial8250_setup(options);
- if (err < 0)
- return err;
-
- register_console(&early_serial8250_console);
-
- return 0;
-}
+EARLYCON_DECLARE(uart8250, early_serial8250_setup);
+EARLYCON_DECLARE(uart, early_serial8250_setup);
int serial8250_find_port_for_earlycon(void)
{
- struct early_serial8250_device *device = &early_device;
- struct uart_port *port = &device->port;
+ struct earlycon_device *device = early_device;
+ struct uart_port *port = device ? &device->port : NULL;
int line;
int ret;
- if (!device->port.membase && !device->port.iobase)
+ if (!port || (!port->membase && !port->iobase))
return -ENODEV;
line = serial8250_find_port(port);
@@ -283,5 +178,3 @@
return ret;
}
-
-early_param("earlycon", setup_early_serial8250_console);
diff --git a/drivers/tty/serial/8250/Kconfig b/drivers/tty/serial/8250/Kconfig
index 80fe91e..902c4bf1 100644
--- a/drivers/tty/serial/8250/Kconfig
+++ b/drivers/tty/serial/8250/Kconfig
@@ -62,6 +62,7 @@
bool "Console on 8250/16550 and compatible serial port"
depends on SERIAL_8250=y
select SERIAL_CORE_CONSOLE
+ select SERIAL_EARLYCON
---help---
If you say Y here, it will be possible to use a serial port as the
system console (the system console is the device which receives all
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index 7e7006f..1ad9aad 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -7,6 +7,13 @@
menu "Serial drivers"
depends on HAS_IOMEM && GENERIC_HARDIRQS
+config SERIAL_EARLYCON
+ bool
+ help
+ Support for early consoles with the earlycon parameter. This enables
+ the console before standard serial driver is probed. The console is
+ enabled when early_param is processed.
+
source "drivers/tty/serial/8250/Kconfig"
comment "Non-8250 serial port support"
diff --git a/drivers/tty/serial/Makefile b/drivers/tty/serial/Makefile
index eedfec4..e51c680 100644
--- a/drivers/tty/serial/Makefile
+++ b/drivers/tty/serial/Makefile
@@ -5,6 +5,8 @@
obj-$(CONFIG_SERIAL_CORE) += serial_core.o
obj-$(CONFIG_SERIAL_21285) += 21285.o
+obj-$(CONFIG_SERIAL_EARLYCON) += earlycon.o
+
# These Sparc drivers have to appear before others such as 8250
# which share ttySx minor node space. Otherwise console device
# names change and other unplesantries.
diff --git a/drivers/tty/serial/earlycon.c b/drivers/tty/serial/earlycon.c
new file mode 100644
index 0000000..73bf1e21
--- /dev/null
+++ b/drivers/tty/serial/earlycon.c
@@ -0,0 +1,152 @@
+/*
+ * Copyright (C) 2014 Linaro Ltd.
+ * Author: Rob Herring <robh@kernel.org>
+ *
+ * Based on 8250 earlycon:
+ * (c) Copyright 2004 Hewlett-Packard Development Company, L.P.
+ * Bjorn Helgaas <bjorn.helgaas@hp.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/console.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/serial_core.h>
+
+#ifdef CONFIG_FIX_EARLYCON_MEM
+#include <asm/fixmap.h>
+#endif
+
+#include <asm/serial.h>
+
+static struct console early_con = {
+ .name = "earlycon",
+ .flags = CON_PRINTBUFFER | CON_BOOT,
+ .index = -1,
+};
+
+static struct earlycon_device early_console_dev = {
+ .con = &early_con,
+};
+
+static void __iomem * __init earlycon_map(unsigned long paddr, size_t size)
+{
+ void __iomem *base;
+#ifdef CONFIG_FIX_EARLYCON_MEM
+ set_fixmap_io(FIX_EARLYCON_MEM_BASE, paddr & PAGE_MASK);
+ base = (void __iomem *)__fix_to_virt(FIX_EARLYCON_MEM_BASE);
+ base += paddr & ~PAGE_MASK;
+#else
+ base = ioremap(paddr, size);
+#endif
+ if (!base)
+ pr_err("%s: Couldn't map 0x%llx\n", __func__,
+ (unsigned long long)paddr);
+
+ return base;
+}
+
+static int __init parse_options(struct earlycon_device *device,
+ char *options)
+{
+ struct uart_port *port = &device->port;
+ int mmio, mmio32, length, ret;
+ unsigned long addr;
+
+ if (!options)
+ return -ENODEV;
+
+ mmio = !strncmp(options, "mmio,", 5);
+ mmio32 = !strncmp(options, "mmio32,", 7);
+ if (mmio || mmio32) {
+ port->iotype = (mmio ? UPIO_MEM : UPIO_MEM32);
+ options += mmio ? 5 : 7;
+ ret = kstrtoul(options, 0, &addr);
+ if (ret)
+ return ret;
+ port->mapbase = addr;
+ if (mmio32)
+ port->regshift = 2;
+ } else if (!strncmp(options, "io,", 3)) {
+ port->iotype = UPIO_PORT;
+ options += 3;
+ ret = kstrtoul(options, 0, &addr);
+ if (ret)
+ return ret;
+ port->iobase = addr;
+ mmio = 0;
+ } else if (!strncmp(options, "0x", 2)) {
+ port->iotype = UPIO_MEM;
+ ret = kstrtoul(options, 0, &addr);
+ if (ret)
+ return ret;
+ port->mapbase = addr;
+ } else {
+ return -EINVAL;
+ }
+
+ port->uartclk = BASE_BAUD * 16;
+
+ options = strchr(options, ',');
+ if (options) {
+ options++;
+ ret = kstrtouint(options, 0, &device->baud);
+ if (ret)
+ return ret;
+ length = min(strcspn(options, " ") + 1,
+ (size_t)(sizeof(device->options)));
+ strlcpy(device->options, options, length);
+ }
+
+ if (mmio || mmio32)
+ pr_info("Early serial console at MMIO%s 0x%llx (options '%s')\n",
+ mmio32 ? "32" : "",
+ (unsigned long long)port->mapbase,
+ device->options);
+ else
+ pr_info("Early serial console at I/O port 0x%lx (options '%s')\n",
+ port->iobase,
+ device->options);
+
+ return 0;
+}
+
+int __init setup_earlycon(char *buf, const char *match,
+ int (*setup)(struct earlycon_device *, const char *))
+{
+ int err;
+ size_t len;
+ struct uart_port *port = &early_console_dev.port;
+
+ if (!buf || !match || !setup)
+ return 0;
+
+ len = strlen(match);
+ if (strncmp(buf, match, len))
+ return 0;
+ if (buf[len] && (buf[len] != ','))
+ return 0;
+
+ buf += len + 1;
+
+ err = parse_options(&early_console_dev, buf);
+ /* On parsing error, pass the options buf to the setup function */
+ if (!err)
+ buf = NULL;
+
+ if (port->mapbase)
+ port->membase = earlycon_map(port->mapbase, 64);
+
+ early_console_dev.con->data = &early_console_dev;
+ err = setup(&early_console_dev, buf);
+ if (err < 0)
+ return err;
+ if (!early_console_dev.con->write)
+ return -ENODEV;
+
+ register_console(early_console_dev.con);
+ return 0;
+}
diff --git a/drivers/video/goldfishfb.c b/drivers/video/goldfishfb.c
index 7f6c9e6..e2c8a2c 100644
--- a/drivers/video/goldfishfb.c
+++ b/drivers/video/goldfishfb.c
@@ -26,6 +26,7 @@
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/platform_device.h>
+#include <linux/acpi.h>
enum {
FB_GET_WIDTH = 0x00,
@@ -37,11 +38,53 @@
FB_SET_BLANK = 0x18,
FB_GET_PHYS_WIDTH = 0x1c,
FB_GET_PHYS_HEIGHT = 0x20,
+ FB_GET_FORMAT = 0x24,
FB_INT_VSYNC = 1U << 0,
FB_INT_BASE_UPDATE_DONE = 1U << 1
};
+/* These values *must* match the platform definitions found under
+ * <system/graphics.h>
+ */
+enum {
+ HAL_PIXEL_FORMAT_RGBA_8888 = 1,
+ HAL_PIXEL_FORMAT_RGBX_8888 = 2,
+ HAL_PIXEL_FORMAT_RGB_888 = 3,
+ HAL_PIXEL_FORMAT_RGB_565 = 4,
+ HAL_PIXEL_FORMAT_BGRA_8888 = 5,
+};
+
+struct framebuffer_config {
+ u8 bytes_per_pixel;
+ u8 red_offset;
+ u8 red_length;
+ u8 green_offset;
+ u8 green_length;
+ u8 blue_offset;
+ u8 blue_length;
+ u8 transp_offset;
+ u8 transp_length;
+};
+
+static const struct framebuffer_config* get_fb_config_from_format(int format) {
+ static const struct framebuffer_config fb_configs[] = {
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0 }, /* Invalid, assume RGB_565 */
+ { 4, 0, 8, 8, 8, 16, 8, 24, 8 }, /* HAL_PIXEL_FORMAT_RGBA_8888 */
+ { 4, 0, 8, 8, 8, 16, 8, 0, 0 }, /* HAL_PIXEL_FORMAT_RGBX_8888 */
+ { 3, 0, 8, 8, 8, 16, 8, 0, 0 }, /* HAL_PIXEL_FORMAT_RGB_888 */
+ { 2, 11, 5, 5, 6, 0, 5, 0, 0 }, /* HAL_PIXEL_FORMAT_RGB_565 */
+ { 4, 16, 8, 8, 8, 0, 8, 24, 8 }, /* HAL_PIXEL_FORMAT_BGRA_8888 */
+ };
+
+ if (format > 0 &&
+ format < sizeof(fb_configs) / sizeof(struct framebuffer_config)) {
+ return &fb_configs[format];
+ }
+
+ return &fb_configs[HAL_PIXEL_FORMAT_RGB_565]; /* legacy default */
+}
+
struct goldfish_fb {
void __iomem *reg_base;
int irq;
@@ -125,7 +168,8 @@
{
struct goldfish_fb *fb = container_of(info, struct goldfish_fb, fb);
if (fb->rotation != fb->fb.var.rotate) {
- info->fix.line_length = info->var.xres * 2;
+ info->fix.line_length = info->var.xres *
+ (fb->fb.var.bits_per_pixel / 8);
fb->rotation = fb->fb.var.rotate;
writel(fb->rotation, fb->reg_base + FB_SET_ROTATION);
}
@@ -142,8 +186,9 @@
spin_lock_irqsave(&fb->lock, irq_flags);
base_update_count = fb->base_update_count;
- writel(fb->fb.fix.smem_start + fb->fb.var.xres * 2 * var->yoffset,
- fb->reg_base + FB_SET_BASE);
+ writel(fb->fb.fix.smem_start +
+ fb->fb.var.xres * (fb->fb.var.bits_per_pixel / 8) * var->yoffset,
+ fb->reg_base + FB_SET_BASE);
spin_unlock_irqrestore(&fb->lock, irq_flags);
wait_event_timeout(fb->wait,
fb->base_update_count != base_update_count, HZ / 15);
@@ -185,8 +230,10 @@
struct resource *r;
struct goldfish_fb *fb;
size_t framesize;
- u32 width, height;
+ u32 width, height, format;
+ int bytes_per_pixel;
dma_addr_t fbpaddr;
+ const struct framebuffer_config* fb_config;
fb = kzalloc(sizeof(*fb), GFP_KERNEL);
if (fb == NULL) {
@@ -216,13 +263,20 @@
width = readl(fb->reg_base + FB_GET_WIDTH);
height = readl(fb->reg_base + FB_GET_HEIGHT);
+ format = readl(fb->reg_base + FB_GET_FORMAT);
+ fb_config = get_fb_config_from_format(format);
+ if (!fb_config) {
+ ret = -EINVAL;
+ goto err_no_irq;
+ }
+ bytes_per_pixel = fb_config->bytes_per_pixel;
fb->fb.fbops = &goldfish_fb_ops;
fb->fb.flags = FBINFO_FLAG_DEFAULT;
fb->fb.pseudo_palette = fb->cmap;
fb->fb.fix.type = FB_TYPE_PACKED_PIXELS;
fb->fb.fix.visual = FB_VISUAL_TRUECOLOR;
- fb->fb.fix.line_length = width * 2;
+ fb->fb.fix.line_length = width * bytes_per_pixel;
fb->fb.fix.accel = FB_ACCEL_NONE;
fb->fb.fix.ypanstep = 1;
@@ -230,20 +284,22 @@
fb->fb.var.yres = height;
fb->fb.var.xres_virtual = width;
fb->fb.var.yres_virtual = height * 2;
- fb->fb.var.bits_per_pixel = 16;
+ fb->fb.var.bits_per_pixel = bytes_per_pixel * 8;
fb->fb.var.activate = FB_ACTIVATE_NOW;
fb->fb.var.height = readl(fb->reg_base + FB_GET_PHYS_HEIGHT);
fb->fb.var.width = readl(fb->reg_base + FB_GET_PHYS_WIDTH);
- fb->fb.var.pixclock = 10000;
+ fb->fb.var.pixclock = 0;
- fb->fb.var.red.offset = 11;
- fb->fb.var.red.length = 5;
- fb->fb.var.green.offset = 5;
- fb->fb.var.green.length = 6;
- fb->fb.var.blue.offset = 0;
- fb->fb.var.blue.length = 5;
+ fb->fb.var.red.offset = fb_config->red_offset;
+ fb->fb.var.red.length = fb_config->red_length;
+ fb->fb.var.green.offset = fb_config->green_offset;
+ fb->fb.var.green.length = fb_config->green_length;
+ fb->fb.var.blue.offset = fb_config->blue_offset;
+ fb->fb.var.blue.length = fb_config->blue_length;
+ fb->fb.var.transp.offset = fb_config->transp_offset;
+ fb->fb.var.transp.length = fb_config->transp_length;
- framesize = width * height * 2 * 2;
+ framesize = width * height * 2 * bytes_per_pixel;
fb->fb.screen_base = (char __force __iomem *)dma_alloc_coherent(
&pdev->dev, framesize,
&fbpaddr, GFP_KERNEL);
@@ -294,7 +350,8 @@
size_t framesize;
struct goldfish_fb *fb = platform_get_drvdata(pdev);
- framesize = fb->fb.var.xres_virtual * fb->fb.var.yres_virtual * 2;
+ framesize = fb->fb.var.xres_virtual * fb->fb.var.yres_virtual *
+ (fb->fb.var.bits_per_pixel / 8);
unregister_framebuffer(&fb->fb);
free_irq(fb->irq, fb);
@@ -304,12 +361,26 @@
return 0;
}
+static const struct of_device_id goldfish_fb_of_match[] = {
+ { .compatible = "generic,goldfish-fb", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, goldfish_fb_of_match);
+
+static const struct acpi_device_id goldfish_fb_acpi_match[] = {
+ { "GFSH0004", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, goldfish_fb_acpi_match);
static struct platform_driver goldfish_fb_driver = {
.probe = goldfish_fb_probe,
.remove = goldfish_fb_remove,
.driver = {
- .name = "goldfish_fb"
+ .name = "goldfish_fb",
+ .owner = THIS_MODULE,
+ .of_match_table = goldfish_fb_of_match,
+ .acpi_match_table = ACPI_PTR(goldfish_fb_acpi_match),
}
};
diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
index 370b24c..c055d56 100644
--- a/fs/Kconfig.binfmt
+++ b/fs/Kconfig.binfmt
@@ -30,6 +30,9 @@
config ARCH_BINFMT_ELF_RANDOMIZE_PIE
bool
+config ARCH_BINFMT_ELF_STATE
+ bool
+
config BINFMT_ELF_FDPIC
bool "Kernel support for FDPIC ELF binaries"
default y
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index f8a0b0e..b854380 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -381,6 +381,127 @@
ELF_PAGESTART(cmds[first_idx].p_vaddr);
}
+/**
+ * load_elf_phdrs() - load ELF program headers
+ * @elf_ex: ELF header of the binary whose program headers should be loaded
+ * @elf_file: the opened ELF binary file
+ *
+ * Loads ELF program headers from the binary file elf_file, which has the ELF
+ * header pointed to by elf_ex, into a newly allocated array. The caller is
+ * responsible for freeing the allocated data. Returns an ERR_PTR upon failure.
+ */
+static struct elf_phdr *load_elf_phdrs(struct elfhdr *elf_ex,
+ struct file *elf_file)
+{
+ struct elf_phdr *elf_phdata = NULL;
+ int retval, size, err = -1;
+
+ /*
+ * If the size of this structure has changed, then punt, since
+ * we will be doing the wrong thing.
+ */
+ if (elf_ex->e_phentsize != sizeof(struct elf_phdr))
+ goto out;
+
+ /* Sanity check the number of program headers... */
+ if (elf_ex->e_phnum < 1 ||
+ elf_ex->e_phnum > 65536U / sizeof(struct elf_phdr))
+ goto out;
+
+ /* ...and their total size. */
+ size = sizeof(struct elf_phdr) * elf_ex->e_phnum;
+ if (size > ELF_MIN_ALIGN)
+ goto out;
+
+ elf_phdata = kmalloc(size, GFP_KERNEL);
+ if (!elf_phdata)
+ goto out;
+
+ /* Read in the program headers */
+ retval = kernel_read(elf_file, elf_ex->e_phoff,
+ (char *)elf_phdata, size);
+ if (retval != size) {
+ err = (retval < 0) ? retval : -EIO;
+ goto out;
+ }
+
+ /* Success! */
+ err = 0;
+out:
+ if (err) {
+ kfree(elf_phdata);
+ elf_phdata = NULL;
+ }
+ return elf_phdata;
+}
+
+#ifndef CONFIG_ARCH_BINFMT_ELF_STATE
+
+/**
+ * struct arch_elf_state - arch-specific ELF loading state
+ *
+ * This structure is used to preserve architecture specific data during
+ * the loading of an ELF file, throughout the checking of architecture
+ * specific ELF headers & through to the point where the ELF load is
+ * known to be proceeding (ie. SET_PERSONALITY).
+ *
+ * This implementation is a dummy for architectures which require no
+ * specific state.
+ */
+struct arch_elf_state {
+};
+
+#define INIT_ARCH_ELF_STATE {}
+
+/**
+ * arch_elf_pt_proc() - check a PT_LOPROC..PT_HIPROC ELF program header
+ * @ehdr: The main ELF header
+ * @phdr: The program header to check
+ * @elf: The open ELF file
+ * @is_interp: True if the phdr is from the interpreter of the ELF being
+ * loaded, else false.
+ * @state: Architecture-specific state preserved throughout the process
+ * of loading the ELF.
+ *
+ * Inspects the program header phdr to validate its correctness and/or
+ * suitability for the system. Called once per ELF program header in the
+ * range PT_LOPROC to PT_HIPROC, for both the ELF being loaded and its
+ * interpreter.
+ *
+ * Return: Zero to proceed with the ELF load, non-zero to fail the ELF load
+ * with that return code.
+ */
+static inline int arch_elf_pt_proc(struct elfhdr *ehdr,
+ struct elf_phdr *phdr,
+ struct file *elf, bool is_interp,
+ struct arch_elf_state *state)
+{
+ /* Dummy implementation, always proceed */
+ return 0;
+}
+
+/**
+ * arch_check_elf() - check a PT_LOPROC..PT_HIPROC ELF program header
+ * @ehdr: The main ELF header
+ * @has_interp: True if the ELF has an interpreter, else false.
+ * @state: Architecture-specific state preserved throughout the process
+ * of loading the ELF.
+ *
+ * Provides a final opportunity for architecture code to reject the loading
+ * of the ELF & cause an exec syscall to return an error. This is called after
+ * all program headers to be checked by arch_elf_pt_proc have been.
+ *
+ * Return: Zero to proceed with the ELF load, non-zero to fail the ELF load
+ * with that return code.
+ */
+static inline int arch_check_elf(struct elfhdr *ehdr, bool has_interp,
+ struct arch_elf_state *state)
+{
+ /* Dummy implementation, always proceed */
+ return 0;
+}
+
+#endif /* !CONFIG_ARCH_BINFMT_ELF_STATE */
/* This is much more generalized than the library routine read function,
so we keep this separate. Technically the library read function
@@ -389,16 +510,15 @@
static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
struct file *interpreter, unsigned long *interp_map_addr,
- unsigned long no_base)
+ unsigned long no_base, struct elf_phdr *interp_elf_phdata)
{
- struct elf_phdr *elf_phdata;
struct elf_phdr *eppnt;
unsigned long load_addr = 0;
int load_addr_set = 0;
unsigned long last_bss = 0, elf_bss = 0;
unsigned long error = ~0UL;
unsigned long total_size;
- int retval, i, size;
+ int i;
/* First of all, some simple consistency checks */
if (interp_elf_ex->e_type != ET_EXEC &&
@@ -409,40 +529,14 @@
if (!interpreter->f_op || !interpreter->f_op->mmap)
goto out;
- /*
- * If the size of this structure has changed, then punt, since
- * we will be doing the wrong thing.
- */
- if (interp_elf_ex->e_phentsize != sizeof(struct elf_phdr))
- goto out;
- if (interp_elf_ex->e_phnum < 1 ||
- interp_elf_ex->e_phnum > 65536U / sizeof(struct elf_phdr))
- goto out;
-
- /* Now read in all of the header information */
- size = sizeof(struct elf_phdr) * interp_elf_ex->e_phnum;
- if (size > ELF_MIN_ALIGN)
- goto out;
- elf_phdata = kmalloc(size, GFP_KERNEL);
- if (!elf_phdata)
- goto out;
-
- retval = kernel_read(interpreter, interp_elf_ex->e_phoff,
- (char *)elf_phdata, size);
- error = -EIO;
- if (retval != size) {
- if (retval < 0)
- error = retval;
- goto out_close;
- }
-
- total_size = total_mapping_size(elf_phdata, interp_elf_ex->e_phnum);
+ total_size = total_mapping_size(interp_elf_phdata,
+ interp_elf_ex->e_phnum);
if (!total_size) {
error = -EINVAL;
- goto out_close;
+ goto out;
}
- eppnt = elf_phdata;
+ eppnt = interp_elf_phdata;
for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
if (eppnt->p_type == PT_LOAD) {
int elf_type = MAP_PRIVATE | MAP_DENYWRITE;
@@ -469,7 +563,7 @@
*interp_map_addr = map_addr;
error = map_addr;
if (BAD_ADDR(map_addr))
- goto out_close;
+ goto out;
if (!load_addr_set &&
interp_elf_ex->e_type == ET_DYN) {
@@ -488,7 +582,7 @@
eppnt->p_memsz > TASK_SIZE ||
TASK_SIZE - eppnt->p_memsz < k) {
error = -ENOMEM;
- goto out_close;
+ goto out;
}
/*
@@ -518,7 +612,7 @@
*/
if (padzero(elf_bss)) {
error = -EFAULT;
- goto out_close;
+ goto out;
}
/* What we have mapped so far */
@@ -527,13 +621,10 @@
/* Map the last of the bss segment */
error = vm_brk(elf_bss, last_bss - elf_bss);
if (BAD_ADDR(error))
- goto out_close;
+ goto out;
}
error = load_addr;
-
-out_close:
- kfree(elf_phdata);
out:
return error;
}
@@ -573,10 +664,9 @@
int load_addr_set = 0;
char * elf_interpreter = NULL;
unsigned long error;
- struct elf_phdr *elf_ppnt, *elf_phdata;
+ struct elf_phdr *elf_ppnt, *elf_phdata, *interp_elf_phdata = NULL;
unsigned long elf_bss, elf_brk;
int retval, i;
- unsigned int size;
unsigned long elf_entry;
unsigned long interp_load_addr = 0;
unsigned long start_code, end_code, start_data, end_data;
@@ -588,6 +678,7 @@
struct elfhdr elf_ex;
struct elfhdr interp_elf_ex;
} *loc;
+ struct arch_elf_state arch_state = INIT_ARCH_ELF_STATE;
loc = kmalloc(sizeof(*loc), GFP_KERNEL);
if (!loc) {
@@ -610,26 +701,10 @@
if (!bprm->file->f_op || !bprm->file->f_op->mmap)
goto out;
- /* Now read in all of the header information */
- if (loc->elf_ex.e_phentsize != sizeof(struct elf_phdr))
- goto out;
- if (loc->elf_ex.e_phnum < 1 ||
- loc->elf_ex.e_phnum > 65536U / sizeof(struct elf_phdr))
- goto out;
- size = loc->elf_ex.e_phnum * sizeof(struct elf_phdr);
- retval = -ENOMEM;
- elf_phdata = kmalloc(size, GFP_KERNEL);
+ elf_phdata = load_elf_phdrs(&loc->elf_ex, bprm->file);
if (!elf_phdata)
goto out;
- retval = kernel_read(bprm->file, loc->elf_ex.e_phoff,
- (char *)elf_phdata, size);
- if (retval != size) {
- if (retval >= 0)
- retval = -EIO;
- goto out_free_ph;
- }
-
elf_ppnt = elf_phdata;
elf_bss = 0;
elf_brk = 0;
@@ -698,12 +773,21 @@
elf_ppnt = elf_phdata;
for (i = 0; i < loc->elf_ex.e_phnum; i++, elf_ppnt++)
- if (elf_ppnt->p_type == PT_GNU_STACK) {
+ switch (elf_ppnt->p_type) {
+ case PT_GNU_STACK:
if (elf_ppnt->p_flags & PF_X)
executable_stack = EXSTACK_ENABLE_X;
else
executable_stack = EXSTACK_DISABLE_X;
break;
+
+ case PT_LOPROC ... PT_HIPROC:
+ retval = arch_elf_pt_proc(&loc->elf_ex, elf_ppnt,
+ bprm->file, false,
+ &arch_state);
+ if (retval)
+ goto out_free_dentry;
+ break;
}
/* Some simple consistency checks for the interpreter */
@@ -715,8 +799,36 @@
/* Verify the interpreter has a valid arch */
if (!elf_check_arch(&loc->interp_elf_ex))
goto out_free_dentry;
+
+ /* Load the interpreter program headers */
+ interp_elf_phdata = load_elf_phdrs(&loc->interp_elf_ex,
+ interpreter);
+ if (!interp_elf_phdata)
+ goto out_free_dentry;
+
+ /* Pass PT_LOPROC..PT_HIPROC headers to arch code */
+ elf_ppnt = interp_elf_phdata;
+ for (i = 0; i < loc->interp_elf_ex.e_phnum; i++, elf_ppnt++)
+ switch (elf_ppnt->p_type) {
+ case PT_LOPROC ... PT_HIPROC:
+ retval = arch_elf_pt_proc(&loc->interp_elf_ex,
+ elf_ppnt, interpreter,
+ true, &arch_state);
+ if (retval)
+ goto out_free_dentry;
+ break;
+ }
}
+ /*
+ * Allow arch code to reject the ELF at this point, whilst it's
+ * still possible to return an error to the code that invoked
+ * the exec syscall.
+ */
+ retval = arch_check_elf(&loc->elf_ex, !!interpreter, &arch_state);
+ if (retval)
+ goto out_free_dentry;
+
/* Flush all traces of the currently running executable */
retval = flush_old_exec(bprm);
if (retval)
@@ -727,7 +839,7 @@
/* Do this immediately, since STACK_TOP as used in setup_arg_pages
may depend on the personality. */
- SET_PERSONALITY(loc->elf_ex);
+ SET_PERSONALITY2(loc->elf_ex, &arch_state);
if (elf_read_implies_exec(loc->elf_ex, executable_stack))
current->personality |= READ_IMPLIES_EXEC;
@@ -903,7 +1015,7 @@
elf_entry = load_elf_interp(&loc->interp_elf_ex,
interpreter,
&interp_map_addr,
- load_bias);
+ load_bias, interp_elf_phdata);
if (!IS_ERR((void *)elf_entry)) {
/*
* load_elf_interp() returns relocation
@@ -932,6 +1044,7 @@
}
}
+ kfree(interp_elf_phdata);
kfree(elf_phdata);
set_binfmt(&elf_format);
@@ -1000,6 +1113,7 @@
/* error cleanup */
out_free_dentry:
+ kfree(interp_elf_phdata);
allow_write_access(interpreter);
if (interpreter)
fput(interpreter);
diff --git a/include/asm-generic/dma-contiguous.h b/include/asm-generic/dma-contiguous.h
deleted file mode 100644
index 294b1e7..0000000
--- a/include/asm-generic/dma-contiguous.h
+++ /dev/null
@@ -1,28 +0,0 @@
-#ifndef ASM_DMA_CONTIGUOUS_H
-#define ASM_DMA_CONTIGUOUS_H
-
-#ifdef __KERNEL__
-#ifdef CONFIG_CMA
-
-#include <linux/device.h>
-#include <linux/dma-contiguous.h>
-
-static inline struct cma *dev_get_cma_area(struct device *dev)
-{
- if (dev && dev->cma_area)
- return dev->cma_area;
- return dma_contiguous_default_area;
-}
-
-static inline void dev_set_cma_area(struct device *dev, struct cma *cma)
-{
- if (dev)
- dev->cma_area = cma;
- if (!dev && !dma_contiguous_default_area)
- dma_contiguous_default_area = cma;
-}
-
-#endif
-#endif
-
-#endif
diff --git a/include/asm-generic/early_ioremap.h b/include/asm-generic/early_ioremap.h
new file mode 100644
index 0000000..a5de55c
--- /dev/null
+++ b/include/asm-generic/early_ioremap.h
@@ -0,0 +1,42 @@
+#ifndef _ASM_EARLY_IOREMAP_H_
+#define _ASM_EARLY_IOREMAP_H_
+
+#include <linux/types.h>
+
+/*
+ * early_ioremap() and early_iounmap() are for temporary early boot-time
+ * mappings, before the real ioremap() is functional.
+ */
+extern void __iomem *early_ioremap(resource_size_t phys_addr,
+ unsigned long size);
+extern void *early_memremap(resource_size_t phys_addr,
+ unsigned long size);
+extern void early_iounmap(void __iomem *addr, unsigned long size);
+extern void early_memunmap(void *addr, unsigned long size);
+
+/*
+ * Weak function called by early_ioremap_reset(). It does nothing, but
+ * architectures may provide their own version to do any needed cleanups.
+ */
+extern void early_ioremap_shutdown(void);
+
+#if defined(CONFIG_GENERIC_EARLY_IOREMAP) && defined(CONFIG_MMU)
+/* Arch-specific initialization */
+extern void early_ioremap_init(void);
+
+/* Generic initialization called by architecture code */
+extern void early_ioremap_setup(void);
+
+/*
+ * Called as last step in paging_init() so library can act
+ * accordingly for subsequent map/unmap requests.
+ */
+extern void early_ioremap_reset(void);
+
+#else
+static inline void early_ioremap_init(void) { }
+static inline void early_ioremap_setup(void) { }
+static inline void early_ioremap_reset(void) { }
+#endif
+
+#endif /* _ASM_EARLY_IOREMAP_H_ */
diff --git a/include/asm-generic/fixmap.h b/include/asm-generic/fixmap.h
new file mode 100644
index 0000000..5a64ca4
--- /dev/null
+++ b/include/asm-generic/fixmap.h
@@ -0,0 +1,97 @@
+/*
+ * fixmap.h: compile-time virtual memory allocation
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1998 Ingo Molnar
+ *
+ * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
+ * x86_32 and x86_64 integration by Gustavo F. Padovan, February 2009
+ * Break out common bits to asm-generic by Mark Salter, November 2013
+ */
+
+#ifndef __ASM_GENERIC_FIXMAP_H
+#define __ASM_GENERIC_FIXMAP_H
+
+#include <linux/bug.h>
+
+#define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT))
+#define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT)
+
+#ifndef __ASSEMBLY__
+/*
+ * 'index to address' translation. If anyone tries to use the idx
+ * directly without translation, we catch the bug with a NULL-deference
+ * kernel oops. Illegal ranges of incoming indices are caught too.
+ */
+static __always_inline unsigned long fix_to_virt(const unsigned int idx)
+{
+ BUILD_BUG_ON(idx >= __end_of_fixed_addresses);
+ return __fix_to_virt(idx);
+}
+
+static inline unsigned long virt_to_fix(const unsigned long vaddr)
+{
+ BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START);
+ return __virt_to_fix(vaddr);
+}
+
+/*
+ * Provide some reasonable defaults for page flags.
+ * Not all architectures use all of these different types and some
+ * architectures use different names.
+ */
+#ifndef FIXMAP_PAGE_NORMAL
+#define FIXMAP_PAGE_NORMAL PAGE_KERNEL
+#endif
+#ifndef FIXMAP_PAGE_NOCACHE
+#define FIXMAP_PAGE_NOCACHE PAGE_KERNEL_NOCACHE
+#endif
+#ifndef FIXMAP_PAGE_IO
+#define FIXMAP_PAGE_IO PAGE_KERNEL_IO
+#endif
+#ifndef FIXMAP_PAGE_CLEAR
+#define FIXMAP_PAGE_CLEAR __pgprot(0)
+#endif
+
+#ifndef set_fixmap
+#define set_fixmap(idx, phys) \
+ __set_fixmap(idx, phys, FIXMAP_PAGE_NORMAL)
+#endif
+
+#ifndef clear_fixmap
+#define clear_fixmap(idx) \
+ __set_fixmap(idx, 0, FIXMAP_PAGE_CLEAR)
+#endif
+
+/* Return a pointer with offset calculated */
+#define __set_fixmap_offset(idx, phys, flags) \
+({ \
+ unsigned long addr; \
+ __set_fixmap(idx, phys, flags); \
+ addr = fix_to_virt(idx) + ((phys) & (PAGE_SIZE - 1)); \
+ addr; \
+})
+
+#define set_fixmap_offset(idx, phys) \
+ __set_fixmap_offset(idx, phys, FIXMAP_PAGE_NORMAL)
+
+/*
+ * Some hardware wants to get fixmapped without caching.
+ */
+#define set_fixmap_nocache(idx, phys) \
+ __set_fixmap(idx, phys, FIXMAP_PAGE_NOCACHE)
+
+#define set_fixmap_offset_nocache(idx, phys) \
+ __set_fixmap_offset(idx, phys, FIXMAP_PAGE_NOCACHE)
+
+/*
+ * Some fixmaps are for IO
+ */
+#define set_fixmap_io(idx, phys) \
+ __set_fixmap(idx, phys, FIXMAP_PAGE_IO)
+
+#endif /* __ASSEMBLY__ */
+#endif /* __ASM_GENERIC_FIXMAP_H */
diff --git a/include/asm-generic/rwsem.h b/include/asm-generic/rwsem.h
index bb1e2cd..d48bf5a 100644
--- a/include/asm-generic/rwsem.h
+++ b/include/asm-generic/rwsem.h
@@ -1,5 +1,5 @@
-#ifndef _ASM_POWERPC_RWSEM_H
-#define _ASM_POWERPC_RWSEM_H
+#ifndef _ASM_GENERIC_RWSEM_H
+#define _ASM_GENERIC_RWSEM_H
#ifndef _LINUX_RWSEM_H
#error "Please don't include <asm/rwsem.h> directly, use <linux/rwsem.h> instead."
@@ -8,7 +8,7 @@
#ifdef __KERNEL__
/*
- * R/W semaphores for PPC using the stuff in lib/rwsem.c.
+ * R/W semaphores originally for PPC using the stuff in lib/rwsem.c.
* Adapted largely from include/asm-i386/rwsem.h
* by Paul Mackerras <paulus@samba.org>.
*/
@@ -16,7 +16,7 @@
/*
* the semaphore definition
*/
-#ifdef CONFIG_PPC64
+#ifdef CONFIG_64BIT
# define RWSEM_ACTIVE_MASK 0xffffffffL
#else
# define RWSEM_ACTIVE_MASK 0x0000ffffL
@@ -129,4 +129,4 @@
}
#endif /* __KERNEL__ */
-#endif /* _ASM_POWERPC_RWSEM_H */
+#endif /* _ASM_GENERIC_RWSEM_H */
diff --git a/include/clocksource/arm_arch_timer.h b/include/clocksource/arm_arch_timer.h
index e6c9c4c..c4d0fc4 100644
--- a/include/clocksource/arm_arch_timer.h
+++ b/include/clocksource/arm_arch_timer.h
@@ -29,10 +29,20 @@
#define ARCH_TIMER_PHYS_ACCESS 0
#define ARCH_TIMER_VIRT_ACCESS 1
+#define ARCH_TIMER_USR_PCT_ACCESS_EN (1 << 0) /* physical counter */
+#define ARCH_TIMER_USR_VCT_ACCESS_EN (1 << 1) /* virtual counter */
+#define ARCH_TIMER_VIRT_EVT_EN (1 << 2)
+#define ARCH_TIMER_EVT_TRIGGER_SHIFT (4)
+#define ARCH_TIMER_EVT_TRIGGER_MASK (0xF << ARCH_TIMER_EVT_TRIGGER_SHIFT)
+#define ARCH_TIMER_USR_VT_ACCESS_EN (1 << 8) /* virtual timer registers */
+#define ARCH_TIMER_USR_PT_ACCESS_EN (1 << 9) /* physical timer registers */
+
+#define ARCH_TIMER_EVT_STREAM_FREQ 10000 /* 100us */
+
#ifdef CONFIG_ARM_ARCH_TIMER
extern u32 arch_timer_get_rate(void);
-extern u64 (*arch_timer_read_counter)(void);
+extern u64 arch_timer_read_counter(void);
extern struct timecounter *arch_timer_get_timecounter(void);
#else
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index a4a0d43..a482811 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -29,6 +29,7 @@
extern int register_cpu(struct cpu *cpu, int num);
extern struct device *get_cpu_device(unsigned cpu);
extern bool cpu_is_hotpluggable(unsigned cpu);
+extern bool arch_match_cpu_phys_id(int cpu, u64 phys_id);
extern int cpu_add_dev_attr(struct device_attribute *attr);
extern void cpu_remove_dev_attr(struct device_attribute *attr);
diff --git a/include/linux/device.h b/include/linux/device.h
index c0a1261..d98ec77 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -698,7 +698,7 @@
struct dma_coherent_mem *dma_mem; /* internal for coherent mem
override */
-#ifdef CONFIG_CMA
+#ifdef CONFIG_DMA_CMA
struct cma *cma_area; /* contiguous memory area for dma
allocations */
#endif
diff --git a/include/linux/dma-contiguous.h b/include/linux/dma-contiguous.h
index 01b5c84..3b28f93 100644
--- a/include/linux/dma-contiguous.h
+++ b/include/linux/dma-contiguous.h
@@ -57,7 +57,7 @@
struct page;
struct device;
-#ifdef CONFIG_CMA
+#ifdef CONFIG_DMA_CMA
/*
* There is always at least global CMA area and a few optional device
@@ -67,9 +67,53 @@
extern struct cma *dma_contiguous_default_area;
+static inline struct cma *dev_get_cma_area(struct device *dev)
+{
+ if (dev && dev->cma_area)
+ return dev->cma_area;
+ return dma_contiguous_default_area;
+}
+
+static inline void dev_set_cma_area(struct device *dev, struct cma *cma)
+{
+ if (dev)
+ dev->cma_area = cma;
+}
+
+static inline void dma_contiguous_set_default(struct cma *cma)
+{
+ dma_contiguous_default_area = cma;
+}
+
void dma_contiguous_reserve(phys_addr_t addr_limit);
-int dma_declare_contiguous(struct device *dev, phys_addr_t size,
- phys_addr_t base, phys_addr_t limit);
+
+int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
+ phys_addr_t limit, struct cma **res_cma);
+
+/**
+ * dma_declare_contiguous() - reserve area for contiguous memory handling
+ * for particular device
+ * @dev: Pointer to device structure.
+ * @size: Size of the reserved memory.
+ * @base: Start address of the reserved memory (optional, 0 for any).
+ * @limit: End address of the reserved memory (optional, 0 for any).
+ *
+ * This function reserves memory for specified device. It should be
+ * called by board specific code when early allocator (memblock or bootmem)
+ * is still activate.
+ */
+
+static inline int dma_declare_contiguous(struct device *dev, phys_addr_t size,
+ phys_addr_t base, phys_addr_t limit)
+{
+ struct cma *cma;
+ int ret;
+ ret = dma_contiguous_reserve_area(size, base, limit, &cma);
+ if (ret == 0)
+ dev_set_cma_area(dev, cma);
+
+ return ret;
+}
struct page *dma_alloc_from_contiguous(struct device *dev, int count,
unsigned int order);
@@ -80,8 +124,22 @@
#define MAX_CMA_AREAS (0)
+static inline struct cma *dev_get_cma_area(struct device *dev)
+{
+ return NULL;
+}
+
+static inline void dev_set_cma_area(struct device *dev, struct cma *cma) { }
+
+static inline void dma_contiguous_set_default(struct cma *cma) { }
+
static inline void dma_contiguous_reserve(phys_addr_t limit) { }
+static inline int dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
+ phys_addr_t limit, struct cma **res_cma) {
+ return -ENOSYS;
+}
+
static inline
int dma_declare_contiguous(struct device *dev, phys_addr_t size,
phys_addr_t base, phys_addr_t limit)
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 94af418..48ef6f5 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -97,6 +97,30 @@
}
#endif
+/*
+ * Set both the DMA mask and the coherent DMA mask to the same thing.
+ * Note that we don't check the return value from dma_set_coherent_mask()
+ * as the DMA API guarantees that the coherent DMA mask can be set to
+ * the same or smaller than the streaming DMA mask.
+ */
+static inline int dma_set_mask_and_coherent(struct device *dev, u64 mask)
+{
+ int rc = dma_set_mask(dev, mask);
+ if (rc == 0)
+ dma_set_coherent_mask(dev, mask);
+ return rc;
+}
+
+/*
+ * Similar to the above, except it deals with the case where the device
+ * does not have dev->dma_mask appropriately setup.
+ */
+static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask)
+{
+ dev->dma_mask = &dev->coherent_dma_mask;
+ return dma_set_mask_and_coherent(dev, mask);
+}
+
extern u64 dma_get_required_mask(struct device *dev);
static inline unsigned int dma_get_max_seg_size(struct device *dev)
diff --git a/include/linux/elf.h b/include/linux/elf.h
index 40a3c0e0..84f3fad 100644
--- a/include/linux/elf.h
+++ b/include/linux/elf.h
@@ -15,6 +15,11 @@
set_personality(PER_LINUX | (current->personality & (~PER_MASK)))
#endif
+#ifndef SET_PERSONALITY2
+#define SET_PERSONALITY2(ex, state) \
+ SET_PERSONALITY(ex)
+#endif
+
#if ELF_CLASS == ELFCLASS32
extern Elf32_Dyn _DYNAMIC [];
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index 528454c..26ee56c 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -123,7 +123,7 @@
} while (0)
extern void split_huge_page_pmd_mm(struct mm_struct *mm, unsigned long address,
pmd_t *pmd);
-#if HPAGE_PMD_ORDER > MAX_ORDER
+#if HPAGE_PMD_ORDER >= MAX_ORDER
#error "hugepages can't be allocated by the buddy allocator"
#endif
extern int hugepage_madvise(struct vm_area_struct *vma,
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 6b4890f..981546a 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -69,6 +69,10 @@
int dequeue_hwpoisoned_huge_page(struct page *page);
void copy_huge_page(struct page *dst, struct page *src);
+#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
+pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud);
+#endif
+
extern unsigned long hugepages_treat_as_movable;
extern const unsigned long hugetlb_zero, hugetlb_infinity;
extern int sysctl_hugetlb_shm_group;
diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
index 623325e..f4101f0 100644
--- a/include/linux/irqdesc.h
+++ b/include/linux/irqdesc.h
@@ -154,6 +154,14 @@
return desc->status_use_accessors & IRQ_NO_BALANCING_MASK;
}
+static inline int irq_is_percpu(unsigned int irq)
+{
+ struct irq_desc *desc;
+
+ desc = irq_to_desc(irq);
+ return desc->status_use_accessors & IRQ_PER_CPU;
+}
+
static inline void
irq_set_lockdep_class(unsigned int irq, struct lock_class_key *class)
{
diff --git a/include/linux/of.h b/include/linux/of.h
index 76979f4..e09aa6a 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -266,6 +266,7 @@
extern const void *of_get_property(const struct device_node *node,
const char *name,
int *lenp);
+extern struct device_node *of_get_cpu_node(int cpu, unsigned int *thread);
#define for_each_property_of_node(dn, pp) \
for (pp = dn->properties; pp != NULL; pp = pp->next)
@@ -464,6 +465,12 @@
return NULL;
}
+static inline struct device_node *of_get_cpu_node(int cpu,
+ unsigned int *thread)
+{
+ return NULL;
+}
+
static inline int of_property_read_u64(const struct device_node *np,
const char *propname, u64 *out_value)
{
diff --git a/include/linux/of_fdt.h b/include/linux/of_fdt.h
index 7e190a9..61f75f6 100644
--- a/include/linux/of_fdt.h
+++ b/include/linux/of_fdt.h
@@ -127,8 +127,7 @@
* physical addresses.
*/
#ifdef CONFIG_BLK_DEV_INITRD
-extern void early_init_dt_setup_initrd_arch(unsigned long start,
- unsigned long end);
+extern void early_init_dt_setup_initrd_arch(u64 start, u64 end);
#endif
/* Early flat tree scan hooks */
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index a782f3c..21c074b 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -280,6 +280,22 @@
/*
* Console helpers.
*/
+struct earlycon_device {
+ struct console *con;
+ struct uart_port port;
+ char options[16]; /* e.g., 115200n8 */
+ unsigned int baud;
+};
+int setup_earlycon(char *buf, const char *match,
+ int (*setup)(struct earlycon_device *, const char *));
+
+#define EARLYCON_DECLARE(name, func) \
+static int __init name ## _setup_earlycon(char *buf) \
+{ \
+ return setup_earlycon(buf, __stringify(name), func); \
+} \
+early_param("earlycon", name ## _setup_earlycon);
+
struct uart_port *uart_get_console(struct uart_port *ports, int nr,
struct console *c);
void uart_parse_options(char *options, int *baud, int *parity, int *bits,
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
index a5ffd32..e7a018e 100644
--- a/include/linux/swiotlb.h
+++ b/include/linux/swiotlb.h
@@ -116,4 +116,6 @@
#endif
extern void swiotlb_print_info(void);
+extern int is_swiotlb_buffer(phys_addr_t paddr);
+
#endif /* __LINUX_SWIOTLB_H */
diff --git a/include/uapi/linux/prctl.h b/include/uapi/linux/prctl.h
index 28bb0b3..f498db4 100644
--- a/include/uapi/linux/prctl.h
+++ b/include/uapi/linux/prctl.h
@@ -158,4 +158,9 @@
#define PR_SET_VMA 0x53564d41
# define PR_SET_VMA_ANON_NAME 0
+#define PR_SET_FP_MODE 45
+#define PR_GET_FP_MODE 46
+# define PR_FP_MODE_FR (1 << 0) /* 64b FP registers */
+# define PR_FP_MODE_FRE (1 << 1) /* 32b compatibility */
+
#endif /* _LINUX_PRCTL_H */
diff --git a/kernel/sys.c b/kernel/sys.c
index a3bef5b..e9b09ff 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -96,6 +96,13 @@
# define SET_TSC_CTL(a) (-EINVAL)
#endif
+#ifndef GET_FP_MODE
+# define GET_FP_MODE(a) (-EINVAL)
+#endif
+#ifndef SET_FP_MODE
+# define SET_FP_MODE(a,b) (-EINVAL)
+#endif
+
/*
* this is where the system-wide overflow UID and GID are defined, for
* architectures that now have 32-bit UID/GID but didn't in the past
@@ -2433,6 +2440,12 @@
if (arg2 || arg3 || arg4 || arg5)
return -EINVAL;
return task_no_new_privs(current) ? 1 : 0;
+ case PR_SET_FP_MODE:
+ error = SET_FP_MODE(me, arg2);
+ break;
+ case PR_GET_FP_MODE:
+ error = GET_FP_MODE(me);
+ break;
case PR_SET_VMA:
error = prctl_set_vma(arg2, arg3, arg4, arg5);
break;
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index d23762e6..eba74ec 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -366,7 +366,7 @@
io_tlb_nslabs = 0;
}
-static int is_swiotlb_buffer(phys_addr_t paddr)
+int is_swiotlb_buffer(phys_addr_t paddr)
{
return paddr >= io_tlb_start && paddr < io_tlb_end;
}
diff --git a/mm/Kconfig b/mm/Kconfig
index 72ea99c..7d4c831 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -478,6 +478,33 @@
If unsure, say Y to enable frontswap.
+config GENERIC_EARLY_IOREMAP
+ bool
+
+config CMA
+ bool "Contiguous Memory Allocator"
+ depends on HAVE_MEMBLOCK
+ select MIGRATION
+ select MEMORY_ISOLATION
+ help
+ This enables the Contiguous Memory Allocator which allows other
+ subsystems to allocate big physically-contiguous blocks of memory.
+ CMA reserves a region of memory and allows only movable pages to
+ be allocated from it. This way, the kernel can use the memory for
+ pagecache and when a subsystem requests for contiguous area, the
+ allocated pages are migrated away to serve the contiguous request.
+
+ If unsure, say "n".
+
+config CMA_DEBUG
+ bool "CMA debug messages (DEVELOPMENT)"
+ depends on DEBUG_KERNEL && CMA
+ help
+ Turns on debug messages in CMA. This produces KERN_DEBUG
+ messages for every CMA call as well as various messages while
+ processing calls such as dma_alloc_from_contiguous().
+ This option does not affect warning and error messages.
+
config ZPOOL
tristate "Common API for compressed memory storage"
default n
diff --git a/mm/Makefile b/mm/Makefile
index fa2e7df..0fa6442 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -58,5 +58,6 @@
obj-$(CONFIG_DEBUG_KMEMLEAK_TEST) += kmemleak-test.o
obj-$(CONFIG_CLEANCACHE) += cleancache.o
obj-$(CONFIG_MEMORY_ISOLATION) += page_isolation.o
+obj-$(CONFIG_GENERIC_EARLY_IOREMAP) += early_ioremap.o
obj-$(CONFIG_ZPOOL) += zpool.o
obj-$(CONFIG_ZSMALLOC) += zsmalloc.o
diff --git a/mm/early_ioremap.c b/mm/early_ioremap.c
new file mode 100644
index 0000000..e10ccd2
--- /dev/null
+++ b/mm/early_ioremap.c
@@ -0,0 +1,245 @@
+/*
+ * Provide common bits of early_ioremap() support for architectures needing
+ * temporary mappings during boot before ioremap() is available.
+ *
+ * This is mostly a direct copy of the x86 early_ioremap implementation.
+ *
+ * (C) Copyright 1995 1996, 2014 Linus Torvalds
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/vmalloc.h>
+#include <asm/fixmap.h>
+
+#ifdef CONFIG_MMU
+static int early_ioremap_debug __initdata;
+
+static int __init early_ioremap_debug_setup(char *str)
+{
+ early_ioremap_debug = 1;
+
+ return 0;
+}
+early_param("early_ioremap_debug", early_ioremap_debug_setup);
+
+static int after_paging_init __initdata;
+
+void __init __weak early_ioremap_shutdown(void)
+{
+}
+
+void __init early_ioremap_reset(void)
+{
+ early_ioremap_shutdown();
+ after_paging_init = 1;
+}
+
+/*
+ * Generally, ioremap() is available after paging_init() has been called.
+ * Architectures wanting to allow early_ioremap after paging_init() can
+ * define __late_set_fixmap and __late_clear_fixmap to do the right thing.
+ */
+#ifndef __late_set_fixmap
+static inline void __init __late_set_fixmap(enum fixed_addresses idx,
+ phys_addr_t phys, pgprot_t prot)
+{
+ BUG();
+}
+#endif
+
+#ifndef __late_clear_fixmap
+static inline void __init __late_clear_fixmap(enum fixed_addresses idx)
+{
+ BUG();
+}
+#endif
+
+static void __iomem *prev_map[FIX_BTMAPS_SLOTS] __initdata;
+static unsigned long prev_size[FIX_BTMAPS_SLOTS] __initdata;
+static unsigned long slot_virt[FIX_BTMAPS_SLOTS] __initdata;
+
+void __init early_ioremap_setup(void)
+{
+ int i;
+
+ for (i = 0; i < FIX_BTMAPS_SLOTS; i++)
+ if (WARN_ON(prev_map[i]))
+ break;
+
+ for (i = 0; i < FIX_BTMAPS_SLOTS; i++)
+ slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
+}
+
+static int __init check_early_ioremap_leak(void)
+{
+ int count = 0;
+ int i;
+
+ for (i = 0; i < FIX_BTMAPS_SLOTS; i++)
+ if (prev_map[i])
+ count++;
+
+ if (WARN(count, KERN_WARNING
+ "Debug warning: early ioremap leak of %d areas detected.\n"
+ "please boot with early_ioremap_debug and report the dmesg.\n",
+ count))
+ return 1;
+ return 0;
+}
+late_initcall(check_early_ioremap_leak);
+
+static void __init __iomem *
+__early_ioremap(resource_size_t phys_addr, unsigned long size, pgprot_t prot)
+{
+ unsigned long offset;
+ resource_size_t last_addr;
+ unsigned int nrpages;
+ enum fixed_addresses idx;
+ int i, slot;
+
+ WARN_ON(system_state != SYSTEM_BOOTING);
+
+ slot = -1;
+ for (i = 0; i < FIX_BTMAPS_SLOTS; i++) {
+ if (!prev_map[i]) {
+ slot = i;
+ break;
+ }
+ }
+
+ if (WARN(slot < 0, "%s(%08llx, %08lx) not found slot\n",
+ __func__, (u64)phys_addr, size))
+ return NULL;
+
+ /* Don't allow wraparound or zero size */
+ last_addr = phys_addr + size - 1;
+ if (WARN_ON(!size || last_addr < phys_addr))
+ return NULL;
+
+ prev_size[slot] = size;
+ /*
+ * Mappings have to be page-aligned
+ */
+ offset = phys_addr & ~PAGE_MASK;
+ phys_addr &= PAGE_MASK;
+ size = PAGE_ALIGN(last_addr + 1) - phys_addr;
+
+ /*
+ * Mappings have to fit in the FIX_BTMAP area.
+ */
+ nrpages = size >> PAGE_SHIFT;
+ if (WARN_ON(nrpages > NR_FIX_BTMAPS))
+ return NULL;
+
+ /*
+ * Ok, go for it..
+ */
+ idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot;
+ while (nrpages > 0) {
+ if (after_paging_init)
+ __late_set_fixmap(idx, phys_addr, prot);
+ else
+ __early_set_fixmap(idx, phys_addr, prot);
+ phys_addr += PAGE_SIZE;
+ --idx;
+ --nrpages;
+ }
+ WARN(early_ioremap_debug, "%s(%08llx, %08lx) [%d] => %08lx + %08lx\n",
+ __func__, (u64)phys_addr, size, slot, offset, slot_virt[slot]);
+
+ prev_map[slot] = (void __iomem *)(offset + slot_virt[slot]);
+ return prev_map[slot];
+}
+
+void __init early_iounmap(void __iomem *addr, unsigned long size)
+{
+ unsigned long virt_addr;
+ unsigned long offset;
+ unsigned int nrpages;
+ enum fixed_addresses idx;
+ int i, slot;
+
+ slot = -1;
+ for (i = 0; i < FIX_BTMAPS_SLOTS; i++) {
+ if (prev_map[i] == addr) {
+ slot = i;
+ break;
+ }
+ }
+
+ if (WARN(slot < 0, "early_iounmap(%p, %08lx) not found slot\n",
+ addr, size))
+ return;
+
+ if (WARN(prev_size[slot] != size,
+ "early_iounmap(%p, %08lx) [%d] size not consistent %08lx\n",
+ addr, size, slot, prev_size[slot]))
+ return;
+
+ WARN(early_ioremap_debug, "early_iounmap(%p, %08lx) [%d]\n",
+ addr, size, slot);
+
+ virt_addr = (unsigned long)addr;
+ if (WARN_ON(virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)))
+ return;
+
+ offset = virt_addr & ~PAGE_MASK;
+ nrpages = PAGE_ALIGN(offset + size) >> PAGE_SHIFT;
+
+ idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot;
+ while (nrpages > 0) {
+ if (after_paging_init)
+ __late_clear_fixmap(idx);
+ else
+ __early_set_fixmap(idx, 0, FIXMAP_PAGE_CLEAR);
+ --idx;
+ --nrpages;
+ }
+ prev_map[slot] = NULL;
+}
+
+/* Remap an IO device */
+void __init __iomem *
+early_ioremap(resource_size_t phys_addr, unsigned long size)
+{
+ return __early_ioremap(phys_addr, size, FIXMAP_PAGE_IO);
+}
+
+/* Remap memory */
+void __init *
+early_memremap(resource_size_t phys_addr, unsigned long size)
+{
+ return (__force void *)__early_ioremap(phys_addr, size,
+ FIXMAP_PAGE_NORMAL);
+}
+#else /* CONFIG_MMU */
+
+void __init __iomem *
+early_ioremap(resource_size_t phys_addr, unsigned long size)
+{
+ return (__force void __iomem *)phys_addr;
+}
+
+/* Remap memory */
+void __init *
+early_memremap(resource_size_t phys_addr, unsigned long size)
+{
+ return (void *)phys_addr;
+}
+
+void __init early_iounmap(void __iomem *addr, unsigned long size)
+{
+}
+
+#endif /* CONFIG_MMU */
+
+
+void __init early_memunmap(void *addr, unsigned long size)
+{
+ early_iounmap((__force void __iomem *)addr, size);
+}
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index e2bfbf7..a7482d1 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2295,16 +2295,26 @@
int cow;
struct hstate *h = hstate_vma(vma);
unsigned long sz = huge_page_size(h);
+ unsigned long mmun_start; /* For mmu_notifiers */
+ unsigned long mmun_end; /* For mmu_notifiers */
+ int ret = 0;
cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
+ mmun_start = vma->vm_start;
+ mmun_end = vma->vm_end;
+ if (cow)
+ mmu_notifier_invalidate_range_start(src, mmun_start, mmun_end);
+
for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) {
src_pte = huge_pte_offset(src, addr);
if (!src_pte)
continue;
dst_pte = huge_pte_alloc(dst, addr, sz);
- if (!dst_pte)
- goto nomem;
+ if (!dst_pte) {
+ ret = -ENOMEM;
+ break;
+ }
/* If the pagetables are shared don't copy or take references */
if (dst_pte == src_pte)
@@ -2324,10 +2334,11 @@
spin_unlock(&src->page_table_lock);
spin_unlock(&dst->page_table_lock);
}
- return 0;
-nomem:
- return -ENOMEM;
+ if (cow)
+ mmu_notifier_invalidate_range_end(src, mmun_start, mmun_end);
+
+ return ret;
}
static int is_hugetlb_entry_migration(pte_t pte)
@@ -2931,15 +2942,6 @@
return ret;
}
-/* Can be overriden by architectures */
-__attribute__((weak)) struct page *
-follow_huge_pud(struct mm_struct *mm, unsigned long address,
- pud_t *pud, int write)
-{
- BUG();
- return NULL;
-}
-
long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
struct page **pages, struct vm_area_struct **vmas,
unsigned long *position, unsigned long *nr_pages,
@@ -3169,6 +3171,216 @@
hugetlb_acct_memory(h, -(chg - freed));
}
+#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
+static unsigned long page_table_shareable(struct vm_area_struct *svma,
+ struct vm_area_struct *vma,
+ unsigned long addr, pgoff_t idx)
+{
+ unsigned long saddr = ((idx - svma->vm_pgoff) << PAGE_SHIFT) +
+ svma->vm_start;
+ unsigned long sbase = saddr & PUD_MASK;
+ unsigned long s_end = sbase + PUD_SIZE;
+
+ /* Allow segments to share if only one is marked locked */
+ unsigned long vm_flags = vma->vm_flags & ~VM_LOCKED;
+ unsigned long svm_flags = svma->vm_flags & ~VM_LOCKED;
+
+ /*
+ * match the virtual addresses, permission and the alignment of the
+ * page table page.
+ */
+ if (pmd_index(addr) != pmd_index(saddr) ||
+ vm_flags != svm_flags ||
+ sbase < svma->vm_start || svma->vm_end < s_end)
+ return 0;
+
+ return saddr;
+}
+
+static int vma_shareable(struct vm_area_struct *vma, unsigned long addr)
+{
+ unsigned long base = addr & PUD_MASK;
+ unsigned long end = base + PUD_SIZE;
+
+ /*
+ * check on proper vm_flags and page table alignment
+ */
+ if (vma->vm_flags & VM_MAYSHARE &&
+ vma->vm_start <= base && end <= vma->vm_end)
+ return 1;
+ return 0;
+}
+
+/*
+ * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc()
+ * and returns the corresponding pte. While this is not necessary for the
+ * !shared pmd case because we can allocate the pmd later as well, it makes the
+ * code much cleaner. pmd allocation is essential for the shared case because
+ * pud has to be populated inside the same i_mmap_mutex section - otherwise
+ * racing tasks could either miss the sharing (see huge_pte_offset) or select a
+ * bad pmd for sharing.
+ */
+pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
+{
+ struct vm_area_struct *vma = find_vma(mm, addr);
+ struct address_space *mapping = vma->vm_file->f_mapping;
+ pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) +
+ vma->vm_pgoff;
+ struct vm_area_struct *svma;
+ unsigned long saddr;
+ pte_t *spte = NULL;
+ pte_t *pte;
+
+ if (!vma_shareable(vma, addr))
+ return (pte_t *)pmd_alloc(mm, pud, addr);
+
+ mutex_lock(&mapping->i_mmap_mutex);
+ vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) {
+ if (svma == vma)
+ continue;
+
+ saddr = page_table_shareable(svma, vma, addr, idx);
+ if (saddr) {
+ spte = huge_pte_offset(svma->vm_mm, saddr);
+ if (spte) {
+ get_page(virt_to_page(spte));
+ break;
+ }
+ }
+ }
+
+ if (!spte)
+ goto out;
+
+ spin_lock(&mm->page_table_lock);
+ if (pud_none(*pud))
+ pud_populate(mm, pud,
+ (pmd_t *)((unsigned long)spte & PAGE_MASK));
+ else
+ put_page(virt_to_page(spte));
+ spin_unlock(&mm->page_table_lock);
+out:
+ pte = (pte_t *)pmd_alloc(mm, pud, addr);
+ mutex_unlock(&mapping->i_mmap_mutex);
+ return pte;
+}
+
+/*
+ * unmap huge page backed by shared pte.
+ *
+ * Hugetlb pte page is ref counted at the time of mapping. If pte is shared
+ * indicated by page_count > 1, unmap is achieved by clearing pud and
+ * decrementing the ref count. If count == 1, the pte page is not shared.
+ *
+ * called with vma->vm_mm->page_table_lock held.
+ *
+ * returns: 1 successfully unmapped a shared pte page
+ * 0 the underlying pte page is not shared, or it is the last user
+ */
+int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
+{
+ pgd_t *pgd = pgd_offset(mm, *addr);
+ pud_t *pud = pud_offset(pgd, *addr);
+
+ BUG_ON(page_count(virt_to_page(ptep)) == 0);
+ if (page_count(virt_to_page(ptep)) == 1)
+ return 0;
+
+ pud_clear(pud);
+ put_page(virt_to_page(ptep));
+ *addr = ALIGN(*addr, HPAGE_SIZE * PTRS_PER_PTE) - HPAGE_SIZE;
+ return 1;
+}
+#define want_pmd_share() (1)
+#else /* !CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
+pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
+{
+ return NULL;
+}
+#define want_pmd_share() (0)
+#endif /* CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
+
+#ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB
+pte_t *huge_pte_alloc(struct mm_struct *mm,
+ unsigned long addr, unsigned long sz)
+{
+ pgd_t *pgd;
+ pud_t *pud;
+ pte_t *pte = NULL;
+
+ pgd = pgd_offset(mm, addr);
+ pud = pud_alloc(mm, pgd, addr);
+ if (pud) {
+ if (sz == PUD_SIZE) {
+ pte = (pte_t *)pud;
+ } else {
+ BUG_ON(sz != PMD_SIZE);
+ if (want_pmd_share() && pud_none(*pud))
+ pte = huge_pmd_share(mm, addr, pud);
+ else
+ pte = (pte_t *)pmd_alloc(mm, pud, addr);
+ }
+ }
+ BUG_ON(pte && !pte_none(*pte) && !pte_huge(*pte));
+
+ return pte;
+}
+
+pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
+{
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd = NULL;
+
+ pgd = pgd_offset(mm, addr);
+ if (pgd_present(*pgd)) {
+ pud = pud_offset(pgd, addr);
+ if (pud_present(*pud)) {
+ if (pud_huge(*pud))
+ return (pte_t *)pud;
+ pmd = pmd_offset(pud, addr);
+ }
+ }
+ return (pte_t *) pmd;
+}
+
+struct page *
+follow_huge_pmd(struct mm_struct *mm, unsigned long address,
+ pmd_t *pmd, int write)
+{
+ struct page *page;
+
+ page = pte_page(*(pte_t *)pmd);
+ if (page)
+ page += ((address & ~PMD_MASK) >> PAGE_SHIFT);
+ return page;
+}
+
+struct page *
+follow_huge_pud(struct mm_struct *mm, unsigned long address,
+ pud_t *pud, int write)
+{
+ struct page *page;
+
+ page = pte_page(*(pte_t *)pud);
+ if (page)
+ page += ((address & ~PUD_MASK) >> PAGE_SHIFT);
+ return page;
+}
+
+#else /* !CONFIG_ARCH_WANT_GENERAL_HUGETLB */
+
+/* Can be overriden by architectures */
+__attribute__((weak)) struct page *
+follow_huge_pud(struct mm_struct *mm, unsigned long address,
+ pud_t *pud, int write)
+{
+ BUG();
+ return NULL;
+}
+
+#endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */
+
#ifdef CONFIG_MEMORY_FAILURE
/* Should be called in hugetlb_lock */
diff --git a/scripts/gcc-goto.sh b/scripts/gcc-goto.sh
index a2af2e8..c9469d3 100644
--- a/scripts/gcc-goto.sh
+++ b/scripts/gcc-goto.sh
@@ -5,7 +5,7 @@
cat << "END" | $@ -x c - -c -o /dev/null >/dev/null 2>&1 && echo "y"
int main(void)
{
-#ifdef __arm__
+#if defined(__arm__) || defined(__aarch64__)
/*
* Not related to asm goto, but used by jump label
* and broken on some ARM GCC versions (see GCC Bug 48637).
diff --git a/scripts/sortextable.c b/scripts/sortextable.c
index 1f10e89..f9ce116 100644
--- a/scripts/sortextable.c
+++ b/scripts/sortextable.c
@@ -31,6 +31,10 @@
#include <tools/be_byteshift.h>
#include <tools/le_byteshift.h>
+#ifndef EM_AARCH64
+#define EM_AARCH64 183
+#endif
+
static int fd_map; /* File descriptor for file being modified. */
static int mmap_failed; /* Boolean flag. */
static void *ehdr_curr; /* current ElfXX_Ehdr * for resource cleanup */
@@ -249,6 +253,7 @@
custom_sort = sort_relative_table;
break;
case EM_ARM:
+ case EM_AARCH64:
case EM_MIPS:
break;
} /* end switch */