Merge remote-tracking branch 'android-msm-wahoo-4.4-oc-dr1' into android-msm-wahoo-4.4-oc-mr1
January 2018.1
Bug: 68996063
Change-Id: I3b8e567f3bff0422b8decbe1e2ced6c7cabe7b15
Signed-off-by: Pat Tjin <pattjin@google.com>
diff --git a/Documentation/ABI/testing/procfs-smaps_rollup b/Documentation/ABI/testing/procfs-smaps_rollup
new file mode 100644
index 0000000..8c24138
--- /dev/null
+++ b/Documentation/ABI/testing/procfs-smaps_rollup
@@ -0,0 +1,34 @@
+What: /proc/pid/smaps_rollup
+Date: August 2017
+Contact: Daniel Colascione <dancol@google.com>
+Description:
+ This file provides pre-summed memory information for a
+ process. The format is identical to /proc/pid/smaps,
+ except instead of an entry for each VMA in a process,
+ smaps_rollup has a single entry (tagged "[rollup]")
+ for which each field is the sum of the corresponding
+ fields from all the maps in /proc/pid/smaps.
+ For more details, see the procfs man page.
+
+ Typical output looks like this:
+
+ 00100000-ff709000 ---p 00000000 00:00 0 [rollup]
+ Rss: 884 kB
+ Pss: 385 kB
+ Shared_Clean: 696 kB
+ Shared_Dirty: 0 kB
+ Private_Clean: 120 kB
+ Private_Dirty: 68 kB
+ Referenced: 884 kB
+ Anonymous: 68 kB
+ LazyFree: 0 kB
+ AnonHugePages: 0 kB
+ ShmemPmdMapped: 0 kB
+ Shared_Hugetlb: 0 kB
+ Private_Hugetlb: 0 kB
+ Swap: 0 kB
+ SwapPss: 0 kB
+ Locked: 385 kB
+
+
+
diff --git a/Documentation/arm64/tagged-pointers.txt b/Documentation/arm64/tagged-pointers.txt
index d9995f1..a25a99e 100644
--- a/Documentation/arm64/tagged-pointers.txt
+++ b/Documentation/arm64/tagged-pointers.txt
@@ -11,24 +11,56 @@
The kernel configures the translation tables so that translations made
via TTBR0 (i.e. userspace mappings) have the top byte (bits 63:56) of
the virtual address ignored by the translation hardware. This frees up
-this byte for application use, with the following caveats:
+this byte for application use.
- (1) The kernel requires that all user addresses passed to EL1
- are tagged with tag 0x00. This means that any syscall
- parameters containing user virtual addresses *must* have
- their top byte cleared before trapping to the kernel.
- (2) Non-zero tags are not preserved when delivering signals.
- This means that signal handlers in applications making use
- of tags cannot rely on the tag information for user virtual
- addresses being maintained for fields inside siginfo_t.
- One exception to this rule is for signals raised in response
- to watchpoint debug exceptions, where the tag information
- will be preserved.
+Passing tagged addresses to the kernel
+--------------------------------------
- (3) Special care should be taken when using tagged pointers,
- since it is likely that C compilers will not hazard two
- virtual addresses differing only in the upper byte.
+All interpretation of userspace memory addresses by the kernel assumes
+an address tag of 0x00.
+
+This includes, but is not limited to, addresses found in:
+
+ - pointer arguments to system calls, including pointers in structures
+ passed to system calls,
+
+ - the stack pointer (sp), e.g. when interpreting it to deliver a
+ signal,
+
+ - the frame pointer (x29) and frame records, e.g. when interpreting
+ them to generate a backtrace or call graph.
+
+Using non-zero address tags in any of these locations may result in an
+error code being returned, a (fatal) signal being raised, or other modes
+of failure.
+
+For these reasons, passing non-zero address tags to the kernel via
+system calls is forbidden, and using a non-zero address tag for sp is
+strongly discouraged.
+
+Programs maintaining a frame pointer and frame records that use non-zero
+address tags may suffer impaired or inaccurate debug and profiling
+visibility.
+
+
+Preserving tags
+---------------
+
+Non-zero tags are not preserved when delivering signals. This means that
+signal handlers in applications making use of tags cannot rely on the
+tag information for user virtual addresses being maintained for fields
+inside siginfo_t. One exception to this rule is for signals raised in
+response to watchpoint debug exceptions, where the tag information will
+be preserved.
The architecture prevents the use of a tagged PC, so the upper byte will
be set to a sign-extension of bit 55 on exception return.
+
+
+Other considerations
+--------------------
+
+Special care should be taken when using tagged pointers, since it is
+likely that C compilers will not hazard two virtual addresses differing
+only in the upper byte.
diff --git a/Documentation/devicetree/bindings/clock/sunxi.txt b/Documentation/devicetree/bindings/clock/sunxi.txt
index 8a47b77..e8c74a6 100644
--- a/Documentation/devicetree/bindings/clock/sunxi.txt
+++ b/Documentation/devicetree/bindings/clock/sunxi.txt
@@ -18,6 +18,7 @@
"allwinner,sun4i-a10-cpu-clk" - for the CPU multiplexer clock
"allwinner,sun4i-a10-axi-clk" - for the AXI clock
"allwinner,sun8i-a23-axi-clk" - for the AXI clock on A23
+ "allwinner,sun4i-a10-gates-clk" - for generic gates on all compatible SoCs
"allwinner,sun4i-a10-axi-gates-clk" - for the AXI gates
"allwinner,sun4i-a10-ahb-clk" - for the AHB clock
"allwinner,sun5i-a13-ahb-clk" - for the AHB clock on A13
@@ -43,6 +44,7 @@
"allwinner,sun6i-a31-apb0-gates-clk" - for the APB0 gates on A31
"allwinner,sun7i-a20-apb0-gates-clk" - for the APB0 gates on A20
"allwinner,sun8i-a23-apb0-gates-clk" - for the APB0 gates on A23
+ "allwinner,sun8i-h3-apb0-gates-clk" - for the APB0 gates on H3
"allwinner,sun9i-a80-apb0-gates-clk" - for the APB0 gates on A80
"allwinner,sun4i-a10-apb1-clk" - for the APB1 clock
"allwinner,sun9i-a80-apb1-clk" - for the APB1 bus clock on A80
diff --git a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-fg-gen3.txt b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-fg-gen3.txt
index 0123682..4207b1f 100644
--- a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-fg-gen3.txt
+++ b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-fg-gen3.txt
@@ -271,6 +271,14 @@
Definition: A boolean property that when defined holds SOC at 100% when
the battery is full.
+- qcom,linearize-soc
+ Usage: optional
+ Value type: <empty>
+ Definition: A boolean property that when defined linearizes SOC when
+ the SOC drops after charge termination monotonically to
+ improve the user experience. This is applicable only if
+ "qcom,hold-soc-while-full" is specified.
+
- qcom,ki-coeff-soc-dischg
Usage: optional
Value type: <prop-encoded-array>
@@ -301,6 +309,13 @@
is specified to make it fully functional. Value has no
unit. Allowed range is 0 to 62200 in micro units.
+- qcom,ki-coeff-full-dischg
+ Usage: optional
+ Value type: <u32>
+ Definition: Ki coefficient full SOC value that will be applied during
+ discharging. If not specified, a value of 0 will be set.
+ Allowed range is from 245 to 62256.
+
- qcom,fg-rconn-mohms
Usage: optional
Value type: <u32>
diff --git a/Documentation/sysctl/fs.txt b/Documentation/sysctl/fs.txt
index 302b5ed..35e17f7 100644
--- a/Documentation/sysctl/fs.txt
+++ b/Documentation/sysctl/fs.txt
@@ -265,6 +265,13 @@
==============================================================
+mount-max:
+
+This denotes the maximum number of mounts that may exist
+in a mount namespace.
+
+==============================================================
+
2. /proc/sys/fs/binfmt_misc
----------------------------------------------------------
diff --git a/Makefile b/Makefile
index 2086054..0c5a063 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
VERSION = 4
PATCHLEVEL = 4
-SUBLEVEL = 56
+SUBLEVEL = 88
EXTRAVERSION =
NAME = Blurry Fish Butt
@@ -635,6 +635,9 @@
KBUILD_CFLAGS += $(call cc-option,-fno-delete-null-pointer-checks,)
KBUILD_CFLAGS += $(call cc-disable-warning,maybe-uninitialized,)
KBUILD_CFLAGS += $(call cc-disable-warning,frame-address,)
+KBUILD_CFLAGS += $(call cc-disable-warning, format-truncation)
+KBUILD_CFLAGS += $(call cc-disable-warning, format-overflow)
+KBUILD_CFLAGS += $(call cc-disable-warning, int-in-bool-context)
ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
KBUILD_CFLAGS += $(call cc-option,-Oz,-Os)
@@ -654,6 +657,12 @@
# Tell gcc to never replace conditional load with a non-conditional one
KBUILD_CFLAGS += $(call cc-option,--param=allow-store-data-races=0)
+# check for 'asm goto'
+ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC) $(KBUILD_CFLAGS)), y)
+ KBUILD_CFLAGS += -DCC_HAVE_ASM_GOTO
+ KBUILD_AFLAGS += -DCC_HAVE_ASM_GOTO
+endif
+
ifdef CONFIG_READABLE_ASM
# Disable optimizations that make assembler listings hard to read.
# reorder blocks reorders the control in the function
@@ -824,12 +833,6 @@
# use the deterministic mode of AR if available
KBUILD_ARFLAGS := $(call ar-option,D)
-# check for 'asm goto'
-ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC) $(KBUILD_CFLAGS)), y)
- KBUILD_CFLAGS += -DCC_HAVE_ASM_GOTO
- KBUILD_AFLAGS += -DCC_HAVE_ASM_GOTO
-endif
-
include scripts/Makefile.kasan
include scripts/Makefile.extrawarn
include scripts/Makefile.ubsan
diff --git a/arch/Kconfig b/arch/Kconfig
index 98f64ad..ed2539c 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -225,8 +225,8 @@
config ARCH_TASK_STRUCT_ALLOCATOR
bool
-# Select if arch has its private alloc_thread_info() function
-config ARCH_THREAD_INFO_ALLOCATOR
+# Select if arch has its private alloc_thread_stack() function
+config ARCH_THREAD_STACK_ALLOCATOR
bool
# Select if arch wants to size task_struct dynamically via arch_task_struct_size:
diff --git a/arch/alpha/include/asm/types.h b/arch/alpha/include/asm/types.h
index 4cb4b6d..0bc66e1 100644
--- a/arch/alpha/include/asm/types.h
+++ b/arch/alpha/include/asm/types.h
@@ -1,6 +1,6 @@
#ifndef _ALPHA_TYPES_H
#define _ALPHA_TYPES_H
-#include <asm-generic/int-ll64.h>
+#include <uapi/asm/types.h>
#endif /* _ALPHA_TYPES_H */
diff --git a/arch/alpha/include/uapi/asm/types.h b/arch/alpha/include/uapi/asm/types.h
index 9fd3cd4..8d1024d 100644
--- a/arch/alpha/include/uapi/asm/types.h
+++ b/arch/alpha/include/uapi/asm/types.h
@@ -9,8 +9,18 @@
* need to be careful to avoid a name clashes.
*/
-#ifndef __KERNEL__
+/*
+ * This is here because we used to use l64 for alpha
+ * and we don't want to impact user mode with our change to ll64
+ * in the kernel.
+ *
+ * However, some user programs are fine with this. They can
+ * flag __SANE_USERSPACE_TYPES__ to get int-ll64.h here.
+ */
+#if !defined(__SANE_USERSPACE_TYPES__) && !defined(__KERNEL__)
#include <asm-generic/int-l64.h>
+#else
+#include <asm-generic/int-ll64.h>
#endif
#endif /* _UAPI_ALPHA_TYPES_H */
diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
index 6cc0816..63f06a2 100644
--- a/arch/alpha/kernel/osf_sys.c
+++ b/arch/alpha/kernel/osf_sys.c
@@ -1188,8 +1188,10 @@
if (!access_ok(VERIFY_WRITE, ur, sizeof(*ur)))
return -EFAULT;
- err = 0;
- err |= put_user(status, ustatus);
+ err = put_user(status, ustatus);
+ if (ret < 0)
+ return err ? err : ret;
+
err |= __put_user(r.ru_utime.tv_sec, &ur->ru_utime.tv_sec);
err |= __put_user(r.ru_utime.tv_usec, &ur->ru_utime.tv_usec);
err |= __put_user(r.ru_stime.tv_sec, &ur->ru_stime.tv_sec);
diff --git a/arch/arc/include/asm/cache.h b/arch/arc/include/asm/cache.h
index 210ef3e..0ddd714 100644
--- a/arch/arc/include/asm/cache.h
+++ b/arch/arc/include/asm/cache.h
@@ -88,7 +88,9 @@
#define ARC_REG_SLC_FLUSH 0x904
#define ARC_REG_SLC_INVALIDATE 0x905
#define ARC_REG_SLC_RGN_START 0x914
+#define ARC_REG_SLC_RGN_START1 0x915
#define ARC_REG_SLC_RGN_END 0x916
+#define ARC_REG_SLC_RGN_END1 0x917
/* Bit val in SLC_CONTROL */
#define SLC_CTRL_IM 0x040
diff --git a/arch/arc/include/asm/entry-arcv2.h b/arch/arc/include/asm/entry-arcv2.h
index b5ff87e..aee1a77 100644
--- a/arch/arc/include/asm/entry-arcv2.h
+++ b/arch/arc/include/asm/entry-arcv2.h
@@ -16,6 +16,7 @@
;
; Now manually save: r12, sp, fp, gp, r25
+ PUSH r30
PUSH r12
; Saving pt_regs->sp correctly requires some extra work due to the way
@@ -72,6 +73,7 @@
POPAX AUX_USER_SP
1:
POP r12
+ POP r30
.endm
diff --git a/arch/arc/include/asm/ptrace.h b/arch/arc/include/asm/ptrace.h
index 69095da..47111d5 100644
--- a/arch/arc/include/asm/ptrace.h
+++ b/arch/arc/include/asm/ptrace.h
@@ -84,7 +84,7 @@
unsigned long fp;
unsigned long sp; /* user/kernel sp depending on where we came from */
- unsigned long r12;
+ unsigned long r12, r30;
/*------- Below list auto saved by h/w -----------*/
unsigned long r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11;
diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c
index d81b6d7..9a84cbdd 100644
--- a/arch/arc/mm/cache.c
+++ b/arch/arc/mm/cache.c
@@ -543,6 +543,7 @@
static DEFINE_SPINLOCK(lock);
unsigned long flags;
unsigned int ctrl;
+ phys_addr_t end;
spin_lock_irqsave(&lock, flags);
@@ -572,8 +573,16 @@
* END needs to be setup before START (latter triggers the operation)
* END can't be same as START, so add (l2_line_sz - 1) to sz
*/
- write_aux_reg(ARC_REG_SLC_RGN_END, (paddr + sz + l2_line_sz - 1));
- write_aux_reg(ARC_REG_SLC_RGN_START, paddr);
+ end = paddr + sz + l2_line_sz - 1;
+ if (is_pae40_enabled())
+ write_aux_reg(ARC_REG_SLC_RGN_END1, upper_32_bits(end));
+
+ write_aux_reg(ARC_REG_SLC_RGN_END, lower_32_bits(end));
+
+ if (is_pae40_enabled())
+ write_aux_reg(ARC_REG_SLC_RGN_START1, upper_32_bits(paddr));
+
+ write_aux_reg(ARC_REG_SLC_RGN_START, lower_32_bits(paddr));
while (read_aux_reg(ARC_REG_SLC_CTRL) & SLC_CTRL_BUSY);
diff --git a/arch/arm/boot/dts/armada-388-gp.dts b/arch/arm/boot/dts/armada-388-gp.dts
index cd31602..6c1b45c 100644
--- a/arch/arm/boot/dts/armada-388-gp.dts
+++ b/arch/arm/boot/dts/armada-388-gp.dts
@@ -89,7 +89,7 @@
pinctrl-names = "default";
pinctrl-0 = <&pca0_pins>;
interrupt-parent = <&gpio0>;
- interrupts = <18 IRQ_TYPE_EDGE_FALLING>;
+ interrupts = <18 IRQ_TYPE_LEVEL_LOW>;
gpio-controller;
#gpio-cells = <2>;
interrupt-controller;
@@ -101,7 +101,7 @@
compatible = "nxp,pca9555";
pinctrl-names = "default";
interrupt-parent = <&gpio0>;
- interrupts = <18 IRQ_TYPE_EDGE_FALLING>;
+ interrupts = <18 IRQ_TYPE_LEVEL_LOW>;
gpio-controller;
#gpio-cells = <2>;
interrupt-controller;
diff --git a/arch/arm/boot/dts/at91-sama5d3_xplained.dts b/arch/arm/boot/dts/at91-sama5d3_xplained.dts
index f3e2b96..0bd325c 100644
--- a/arch/arm/boot/dts/at91-sama5d3_xplained.dts
+++ b/arch/arm/boot/dts/at91-sama5d3_xplained.dts
@@ -162,9 +162,10 @@
};
adc0: adc@f8018000 {
+ atmel,adc-vref = <3300>;
+ atmel,adc-channels-used = <0xfe>;
pinctrl-0 = <
&pinctrl_adc0_adtrg
- &pinctrl_adc0_ad0
&pinctrl_adc0_ad1
&pinctrl_adc0_ad2
&pinctrl_adc0_ad3
@@ -172,8 +173,6 @@
&pinctrl_adc0_ad5
&pinctrl_adc0_ad6
&pinctrl_adc0_ad7
- &pinctrl_adc0_ad8
- &pinctrl_adc0_ad9
>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/bcm5301x.dtsi b/arch/arm/boot/dts/bcm5301x.dtsi
index 6f50f67..de8ac99 100644
--- a/arch/arm/boot/dts/bcm5301x.dtsi
+++ b/arch/arm/boot/dts/bcm5301x.dtsi
@@ -54,14 +54,14 @@
timer@0200 {
compatible = "arm,cortex-a9-global-timer";
reg = <0x0200 0x100>;
- interrupts = <GIC_PPI 11 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_PPI 11 IRQ_TYPE_EDGE_RISING>;
clocks = <&clk_periph>;
};
local-timer@0600 {
compatible = "arm,cortex-a9-twd-timer";
reg = <0x0600 0x100>;
- interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_PPI 13 IRQ_TYPE_EDGE_RISING>;
clocks = <&clk_periph>;
};
diff --git a/arch/arm/boot/dts/imx6dl.dtsi b/arch/arm/boot/dts/imx6dl.dtsi
index 4b0ec07..8ca9217 100644
--- a/arch/arm/boot/dts/imx6dl.dtsi
+++ b/arch/arm/boot/dts/imx6dl.dtsi
@@ -30,7 +30,7 @@
/* kHz uV */
996000 1250000
792000 1175000
- 396000 1075000
+ 396000 1150000
>;
fsl,soc-operating-points = <
/* ARM kHz SOC-PU uV */
diff --git a/arch/arm/boot/dts/omap3-n900.dts b/arch/arm/boot/dts/omap3-n900.dts
index 5f5e0f3..27cd4ab 100644
--- a/arch/arm/boot/dts/omap3-n900.dts
+++ b/arch/arm/boot/dts/omap3-n900.dts
@@ -697,6 +697,8 @@
vmmc_aux-supply = <&vsim>;
bus-width = <8>;
non-removable;
+ no-sdio;
+ no-sd;
};
&mmc3 {
diff --git a/arch/arm/boot/dts/qcom/msm8998.dtsi b/arch/arm/boot/dts/qcom/msm8998.dtsi
index 34034fc..2ca8d47f 100644
--- a/arch/arm/boot/dts/qcom/msm8998.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8998.dtsi
@@ -282,28 +282,28 @@
energy-costs {
CPU_COST_0: core-cost0 {
busy-cost-data = <
- 68 11
- 83 20
- 101 25
- 118 30
- 136 33
- 154 35
- 155 36
- 189 42
- 203 47
- 220 54
- 239 62
- 255 67
- 270 73
- 287 79
- 305 88
- 322 95
- 342 104
- 358 111
- 385 134
- 402 155
- 420 178
- 438 201
+ 65 11
+ 80 20
+ 96 25
+ 113 30
+ 130 33
+ 147 35
+ 164 36
+ 181 42
+ 194 47
+ 211 54
+ 228 62
+ 243 67
+ 258 73
+ 275 79
+ 292 88
+ 308 95
+ 326 104
+ 342 111
+ 368 134
+ 384 155
+ 401 178
+ 419 201
>;
idle-cost-data = <
4 4 0 0
@@ -311,62 +311,66 @@
};
CPU_COST_1: core-cost1 {
busy-cost-data = <
- 132 56
- 153 76
- 186 91
- 220 105
- 254 118
- 289 135
- 323 150
- 357 162
- 400 181
- 435 196
- 467 214
- 502 229
- 528 248
- 561 280
- 596 316
- 630 354
- 664 392
- 698 439
- 733 495
- 767 565
- 800 622
- 833 691
- 869 792
- 903 889
- 938 1024
- 1020 1141
- 1024 1138
+ 129 56
+ 148 76
+ 182 91
+ 216 105
+ 247 118
+ 278 135
+ 312 150
+ 344 162
+ 391 181
+ 419 196
+ 453 214
+ 487 229
+ 509 248
+ 546 280
+ 581 316
+ 615 354
+ 650 392
+ 676 439
+ 712 495
+ 739 565
+ 776 622
+ 803 691
+ 834 792
+ 881 889
+ 914 1059
+ 957 1244
+ 975 1375
+ 996 1549
+ 1016 1617
+ 1021 1677
+ 1024 1683
>;
idle-cost-data = <
- 10 10 0 0
+ 10 10 0 0
>;
};
CLUSTER_COST_0: cluster-cost0 {
busy-cost-data = <
- 68 17
- 83 18
- 101 18
- 118 20
- 136 21
- 154 22
- 155 23
- 189 24
- 203 27
- 220 29
- 239 30
- 255 32
- 270 33
- 287 35
- 305 38
- 322 39
- 342 42
- 358 46
- 385 48
- 402 53
- 420 59
- 438 66
+ 65 17
+ 80 18
+ 96 18
+ 113 20
+ 130 21
+ 147 22
+ 164 23
+ 181 24
+ 194 27
+ 211 29
+ 228 30
+ 243 32
+ 258 33
+ 275 35
+ 292 38
+ 308 39
+ 326 42
+ 342 46
+ 368 48
+ 384 53
+ 401 59
+ 419 66
>;
idle-cost-data = <
31 31 31 0
@@ -374,32 +378,36 @@
};
CLUSTER_COST_1: cluster-cost1 {
busy-cost-data = <
- 132 24
- 153 25
- 186 26
- 220 29
- 254 30
- 289 33
- 323 35
- 357 37
- 400 38
- 435 40
- 467 43
- 502 44
- 528 46
- 561 50
- 596 54
- 630 60
- 664 63
- 698 70
- 733 74
- 767 80
- 800 87
- 833 96
- 869 104
- 903 120
- 938 130
- 1020 200
+ 129 24
+ 148 25
+ 182 26
+ 216 29
+ 247 30
+ 278 33
+ 312 35
+ 344 37
+ 391 38
+ 419 40
+ 453 43
+ 487 44
+ 509 46
+ 546 50
+ 581 54
+ 615 60
+ 650 63
+ 676 70
+ 712 74
+ 739 80
+ 776 87
+ 803 96
+ 834 104
+ 881 120
+ 914 130
+ 957 171
+ 975 178
+ 996 185
+ 1016 200
+ 1021 202
1024 203
>;
idle-cost-data = <
diff --git a/arch/arm/boot/dts/sama5d2.dtsi b/arch/arm/boot/dts/sama5d2.dtsi
index 4dfca8f..1bc61ec 100644
--- a/arch/arm/boot/dts/sama5d2.dtsi
+++ b/arch/arm/boot/dts/sama5d2.dtsi
@@ -856,6 +856,13 @@
compatible = "atmel,at91sam9260-usart";
reg = <0xf801c000 0x100>;
interrupts = <24 IRQ_TYPE_LEVEL_HIGH 7>;
+ dmas = <&dma0
+ (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1) |
+ AT91_XDMAC_DT_PERID(35))>,
+ <&dma0
+ (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1) |
+ AT91_XDMAC_DT_PERID(36))>;
+ dma-names = "tx", "rx";
clocks = <&uart0_clk>;
clock-names = "usart";
status = "disabled";
@@ -865,6 +872,13 @@
compatible = "atmel,at91sam9260-usart";
reg = <0xf8020000 0x100>;
interrupts = <25 IRQ_TYPE_LEVEL_HIGH 7>;
+ dmas = <&dma0
+ (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1) |
+ AT91_XDMAC_DT_PERID(37))>,
+ <&dma0
+ (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1) |
+ AT91_XDMAC_DT_PERID(38))>;
+ dma-names = "tx", "rx";
clocks = <&uart1_clk>;
clock-names = "usart";
status = "disabled";
@@ -874,6 +888,13 @@
compatible = "atmel,at91sam9260-usart";
reg = <0xf8024000 0x100>;
interrupts = <26 IRQ_TYPE_LEVEL_HIGH 7>;
+ dmas = <&dma0
+ (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1) |
+ AT91_XDMAC_DT_PERID(39))>,
+ <&dma0
+ (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1) |
+ AT91_XDMAC_DT_PERID(40))>;
+ dma-names = "tx", "rx";
clocks = <&uart2_clk>;
clock-names = "usart";
status = "disabled";
@@ -985,6 +1006,13 @@
compatible = "atmel,at91sam9260-usart";
reg = <0xfc008000 0x100>;
interrupts = <27 IRQ_TYPE_LEVEL_HIGH 7>;
+ dmas = <&dma0
+ (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1) |
+ AT91_XDMAC_DT_PERID(41))>,
+ <&dma0
+ (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1) |
+ AT91_XDMAC_DT_PERID(42))>;
+ dma-names = "tx", "rx";
clocks = <&uart3_clk>;
clock-names = "usart";
status = "disabled";
@@ -993,6 +1021,13 @@
uart4: serial@fc00c000 {
compatible = "atmel,at91sam9260-usart";
reg = <0xfc00c000 0x100>;
+ dmas = <&dma0
+ (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1) |
+ AT91_XDMAC_DT_PERID(43))>,
+ <&dma0
+ (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1) |
+ AT91_XDMAC_DT_PERID(44))>;
+ dma-names = "tx", "rx";
interrupts = <28 IRQ_TYPE_LEVEL_HIGH 7>;
clocks = <&uart4_clk>;
clock-names = "usart";
diff --git a/arch/arm/boot/dts/tegra20-paz00.dts b/arch/arm/boot/dts/tegra20-paz00.dts
index ed7e100..d9ee0fd 100644
--- a/arch/arm/boot/dts/tegra20-paz00.dts
+++ b/arch/arm/boot/dts/tegra20-paz00.dts
@@ -565,6 +565,7 @@
regulator-name = "+3VS,vdd_pnl";
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
+ regulator-boot-on;
gpio = <&gpio TEGRA_GPIO(A, 4) GPIO_ACTIVE_HIGH>;
enable-active-high;
};
diff --git a/arch/arm/configs/s3c2410_defconfig b/arch/arm/configs/s3c2410_defconfig
index f314236..01116ee 100644
--- a/arch/arm/configs/s3c2410_defconfig
+++ b/arch/arm/configs/s3c2410_defconfig
@@ -87,9 +87,9 @@
CONFIG_NETFILTER=y
CONFIG_NF_CONNTRACK=m
CONFIG_NF_CONNTRACK_EVENTS=y
-CONFIG_NF_CT_PROTO_DCCP=m
-CONFIG_NF_CT_PROTO_SCTP=m
-CONFIG_NF_CT_PROTO_UDPLITE=m
+CONFIG_NF_CT_PROTO_DCCP=y
+CONFIG_NF_CT_PROTO_SCTP=y
+CONFIG_NF_CT_PROTO_UDPLITE=y
CONFIG_NF_CONNTRACK_AMANDA=m
CONFIG_NF_CONNTRACK_FTP=m
CONFIG_NF_CONNTRACK_H323=m
diff --git a/arch/arm/include/asm/ftrace.h b/arch/arm/include/asm/ftrace.h
index bfe2a2f..22b7311 100644
--- a/arch/arm/include/asm/ftrace.h
+++ b/arch/arm/include/asm/ftrace.h
@@ -54,6 +54,24 @@
#define ftrace_return_address(n) return_address(n)
+#define ARCH_HAS_SYSCALL_MATCH_SYM_NAME
+
+static inline bool arch_syscall_match_sym_name(const char *sym,
+ const char *name)
+{
+ if (!strcmp(sym, "sys_mmap2"))
+ sym = "sys_mmap_pgoff";
+ else if (!strcmp(sym, "sys_statfs64_wrapper"))
+ sym = "sys_statfs64";
+ else if (!strcmp(sym, "sys_fstatfs64_wrapper"))
+ sym = "sys_fstatfs64";
+ else if (!strcmp(sym, "sys_arm_fadvise64_64"))
+ sym = "sys_fadvise64_64";
+
+ /* Ignore case since sym may start with "SyS" instead of "sys" */
+ return !strcasecmp(sym, name);
+}
+
#endif /* ifndef __ASSEMBLY__ */
#endif /* _ASM_ARM_FTRACE */
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index 80856de..82bdac0 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -73,7 +73,6 @@
obj-$(CONFIG_PERF_EVENTS) += perf_regs.o perf_callchain.o
obj-$(CONFIG_HW_PERF_EVENTS) += perf_event_xscale.o perf_event_v6.o \
perf_event_v7.o
-CFLAGS_pj4-cp0.o := -marm
AFLAGS_iwmmxt.o := -Wa,-mcpu=iwmmxt
obj-$(CONFIG_ARM_CPU_TOPOLOGY) += topology.o
obj-$(CONFIG_VDSO) += vdso.o
diff --git a/arch/arm/kernel/pj4-cp0.c b/arch/arm/kernel/pj4-cp0.c
index 8153e36..7c9248b 100644
--- a/arch/arm/kernel/pj4-cp0.c
+++ b/arch/arm/kernel/pj4-cp0.c
@@ -66,9 +66,13 @@
__asm__ __volatile__ (
"mcr p15, 0, %1, c1, c0, 2\n\t"
+#ifdef CONFIG_THUMB2_KERNEL
+ "isb\n\t"
+#else
"mrc p15, 0, %0, c1, c0, 2\n\t"
"mov %0, %0\n\t"
"sub pc, pc, #4\n\t"
+#endif
: "=r" (temp) : "r" (value));
}
diff --git a/arch/arm/kvm/init.S b/arch/arm/kvm/init.S
index 3988e72..bfc5aae 100644
--- a/arch/arm/kvm/init.S
+++ b/arch/arm/kvm/init.S
@@ -110,7 +110,6 @@
@ - Write permission implies XN: disabled
@ - Instruction cache: enabled
@ - Data/Unified cache: enabled
- @ - Memory alignment checks: enabled
@ - MMU: enabled (this code must be run from an identity mapping)
mrc p15, 4, r0, c1, c0, 0 @ HSCR
ldr r2, =HSCTLR_MASK
@@ -118,8 +117,8 @@
mrc p15, 0, r1, c1, c0, 0 @ SCTLR
ldr r2, =(HSCTLR_EE | HSCTLR_FI | HSCTLR_I | HSCTLR_C)
and r1, r1, r2
- ARM( ldr r2, =(HSCTLR_M | HSCTLR_A) )
- THUMB( ldr r2, =(HSCTLR_M | HSCTLR_A | HSCTLR_TE) )
+ ARM( ldr r2, =(HSCTLR_M) )
+ THUMB( ldr r2, =(HSCTLR_M | HSCTLR_TE) )
orr r1, r1, r2
orr r0, r0, r1
isb
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index 7678724..4c055a6 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -301,6 +301,14 @@
next = kvm_pgd_addr_end(addr, end);
if (!pgd_none(*pgd))
unmap_puds(kvm, pgd, addr, next);
+ /*
+ * If we are dealing with a large range in
+ * stage2 table, release the kvm->mmu_lock
+ * to prevent starvation and lockup detector
+ * warnings.
+ */
+ if (kvm && (next != end))
+ cond_resched_lock(&kvm->mmu_lock);
} while (pgd++, addr = next, addr != end);
}
@@ -745,6 +753,7 @@
*/
static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
{
+ assert_spin_locked(&kvm->mmu_lock);
unmap_range(kvm, kvm->arch.pgd, start, size);
}
@@ -803,6 +812,7 @@
int idx;
idx = srcu_read_lock(&kvm->srcu);
+ down_read(¤t->mm->mmap_sem);
spin_lock(&kvm->mmu_lock);
slots = kvm_memslots(kvm);
@@ -810,6 +820,7 @@
stage2_unmap_memslot(kvm, memslot);
spin_unlock(&kvm->mmu_lock);
+ up_read(¤t->mm->mmap_sem);
srcu_read_unlock(&kvm->srcu, idx);
}
@@ -820,21 +831,25 @@
* Walks the level-1 page table pointed to by kvm->arch.pgd and frees all
* underlying level-2 and level-3 tables before freeing the actual level-1 table
* and setting the struct pointer to NULL.
- *
- * Note we don't need locking here as this is only called when the VM is
- * destroyed, which can only be done once.
*/
void kvm_free_stage2_pgd(struct kvm *kvm)
{
- if (kvm->arch.pgd == NULL)
- return;
+ void *pgd = NULL;
+ void *hwpgd = NULL;
- unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE);
- kvm_free_hwpgd(kvm_get_hwpgd(kvm));
- if (KVM_PREALLOC_LEVEL > 0)
- kfree(kvm->arch.pgd);
+ spin_lock(&kvm->mmu_lock);
+ if (kvm->arch.pgd) {
+ unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE);
+ pgd = READ_ONCE(kvm->arch.pgd);
+ hwpgd = kvm_get_hwpgd(kvm);
+ kvm->arch.pgd = NULL;
+ }
+ spin_unlock(&kvm->mmu_lock);
- kvm->arch.pgd = NULL;
+ if (hwpgd)
+ kvm_free_hwpgd(hwpgd);
+ if (KVM_PREALLOC_LEVEL > 0 && pgd)
+ kfree(pgd);
}
static pud_t *stage2_get_pud(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
@@ -862,6 +877,9 @@
pmd_t *pmd;
pud = stage2_get_pud(kvm, cache, addr);
+ if (!pud)
+ return NULL;
+
if (pud_none(*pud)) {
if (!cache)
return NULL;
@@ -1619,12 +1637,16 @@
int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
{
+ if (!kvm->arch.pgd)
+ return 0;
trace_kvm_age_hva(start, end);
return handle_hva_to_gpa(kvm, start, end, kvm_age_hva_handler, NULL);
}
int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
{
+ if (!kvm->arch.pgd)
+ return 0;
trace_kvm_test_age_hva(hva);
return handle_hva_to_gpa(kvm, hva, hva, kvm_test_age_hva_handler, NULL);
}
@@ -1771,6 +1793,7 @@
(KVM_PHYS_SIZE >> PAGE_SHIFT))
return -EFAULT;
+ down_read(¤t->mm->mmap_sem);
/*
* A memory region could potentially cover multiple VMAs, and any holes
* between them, so iterate over all of them to find out if we can map
@@ -1814,8 +1837,10 @@
pa += vm_start - vma->vm_start;
/* IO region dirty page logging not allowed */
- if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES)
- return -EINVAL;
+ if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES) {
+ ret = -EINVAL;
+ goto out;
+ }
ret = kvm_phys_addr_ioremap(kvm, gpa, pa,
vm_end - vm_start,
@@ -1827,7 +1852,7 @@
} while (hva < reg_end);
if (change == KVM_MR_FLAGS_ONLY)
- return ret;
+ goto out;
spin_lock(&kvm->mmu_lock);
if (ret)
@@ -1835,6 +1860,8 @@
else
stage2_flush_memslot(kvm, memslot);
spin_unlock(&kvm->mmu_lock);
+out:
+ up_read(¤t->mm->mmap_sem);
return ret;
}
diff --git a/arch/arm/kvm/psci.c b/arch/arm/kvm/psci.c
index a9b3b90..443db0c 100644
--- a/arch/arm/kvm/psci.c
+++ b/arch/arm/kvm/psci.c
@@ -208,9 +208,10 @@
static int kvm_psci_0_2_call(struct kvm_vcpu *vcpu)
{
- int ret = 1;
+ struct kvm *kvm = vcpu->kvm;
unsigned long psci_fn = vcpu_get_reg(vcpu, 0) & ~((u32) 0);
unsigned long val;
+ int ret = 1;
switch (psci_fn) {
case PSCI_0_2_FN_PSCI_VERSION:
@@ -230,7 +231,9 @@
break;
case PSCI_0_2_FN_CPU_ON:
case PSCI_0_2_FN64_CPU_ON:
+ mutex_lock(&kvm->lock);
val = kvm_psci_vcpu_on(vcpu);
+ mutex_unlock(&kvm->lock);
break;
case PSCI_0_2_FN_AFFINITY_INFO:
case PSCI_0_2_FN64_AFFINITY_INFO:
@@ -279,6 +282,7 @@
static int kvm_psci_0_1_call(struct kvm_vcpu *vcpu)
{
+ struct kvm *kvm = vcpu->kvm;
unsigned long psci_fn = vcpu_get_reg(vcpu, 0) & ~((u32) 0);
unsigned long val;
@@ -288,7 +292,9 @@
val = PSCI_RET_SUCCESS;
break;
case KVM_PSCI_FN_CPU_ON:
+ mutex_lock(&kvm->lock);
val = kvm_psci_vcpu_on(vcpu);
+ mutex_unlock(&kvm->lock);
break;
default:
val = PSCI_RET_NOT_SUPPORTED;
diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c
index 23726fb..d687f86 100644
--- a/arch/arm/mach-at91/pm.c
+++ b/arch/arm/mach-at91/pm.c
@@ -286,6 +286,22 @@
at91_ramc_write(1, AT91_DDRSDRC_LPR, saved_lpr1);
}
+static void sama5d3_ddr_standby(void)
+{
+ u32 lpr0;
+ u32 saved_lpr0;
+
+ saved_lpr0 = at91_ramc_read(0, AT91_DDRSDRC_LPR);
+ lpr0 = saved_lpr0 & ~AT91_DDRSDRC_LPCB;
+ lpr0 |= AT91_DDRSDRC_LPCB_POWER_DOWN;
+
+ at91_ramc_write(0, AT91_DDRSDRC_LPR, lpr0);
+
+ cpu_do_idle();
+
+ at91_ramc_write(0, AT91_DDRSDRC_LPR, saved_lpr0);
+}
+
/* We manage both DDRAM/SDRAM controllers, we need more than one value to
* remember.
*/
@@ -320,7 +336,7 @@
{ .compatible = "atmel,at91rm9200-sdramc", .data = at91rm9200_standby },
{ .compatible = "atmel,at91sam9260-sdramc", .data = at91sam9_sdram_standby },
{ .compatible = "atmel,at91sam9g45-ddramc", .data = at91_ddr_standby },
- { .compatible = "atmel,sama5d3-ddramc", .data = at91_ddr_standby },
+ { .compatible = "atmel,sama5d3-ddramc", .data = sama5d3_ddr_standby },
{ /*sentinel*/ }
};
diff --git a/arch/arm/mach-omap2/omap-headsmp.S b/arch/arm/mach-omap2/omap-headsmp.S
index 6d1dffc..748dde9 100644
--- a/arch/arm/mach-omap2/omap-headsmp.S
+++ b/arch/arm/mach-omap2/omap-headsmp.S
@@ -17,6 +17,7 @@
#include <linux/linkage.h>
#include <linux/init.h>
+#include <asm/assembler.h>
#include "omap44xx.h"
@@ -56,7 +57,7 @@
cmp r0, r4
bne wait_2
ldr r12, =API_HYP_ENTRY
- adr r0, hyp_boot
+ badr r0, hyp_boot
smc #0
hyp_boot:
b secondary_startup
diff --git a/arch/arm/mach-omap2/timer.c b/arch/arm/mach-omap2/timer.c
index f86692d..83fc403 100644
--- a/arch/arm/mach-omap2/timer.c
+++ b/arch/arm/mach-omap2/timer.c
@@ -496,8 +496,7 @@
__omap_sync32k_timer_init(1, "timer_32k_ck", "ti,timer-alwon",
2, "timer_sys_ck", NULL, false);
- if (of_have_populated_dt())
- clocksource_probe();
+ clocksource_probe();
}
#if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_SOC_AM43XX)
@@ -505,6 +504,8 @@
{
__omap_sync32k_timer_init(12, "secure_32k_fck", "ti,timer-secure",
2, "timer_sys_ck", NULL, false);
+
+ clocksource_probe();
}
#endif /* CONFIG_ARCH_OMAP3 */
@@ -513,6 +514,8 @@
{
__omap_sync32k_timer_init(2, "timer_sys_ck", NULL,
1, "timer_sys_ck", "ti,timer-alwon", true);
+
+ clocksource_probe();
}
#endif
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index c73f10c..83519af 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -314,8 +314,11 @@
* signal first. We do not need to release the mmap_sem because
* it would already be released in __lock_page_or_retry in
* mm/filemap.c. */
- if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
+ if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) {
+ if (!user_mode(regs))
+ goto no_context;
return 0;
+ }
/*
* Major/minor page fault accounting is only done on the
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 221b11b..f353849 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -1197,15 +1197,15 @@
high_memory = __va(arm_lowmem_limit - 1) + 1;
+ if (!memblock_limit)
+ memblock_limit = arm_lowmem_limit;
+
/*
* Round the memblock limit down to a pmd size. This
* helps to ensure that we will allocate memory from the
* last full pmd, which should be mapped.
*/
- if (memblock_limit)
- memblock_limit = round_down(memblock_limit, PMD_SIZE);
- if (!memblock_limit)
- memblock_limit = arm_lowmem_limit;
+ memblock_limit = round_down(memblock_limit, PMD_SIZE);
memblock_set_current_limit(memblock_limit);
}
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index c520b0d1..4c679fe 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -107,6 +107,7 @@
select SYSCTL_EXCEPTION_TRACE
select HAVE_CONTEXT_TRACKING
select HAVE_ARM_SMCCC
+ select THREAD_INFO_IN_TASK
help
ARM 64-bit (AArch64) Linux support.
diff --git a/arch/arm64/boot/dts/htc/batterydata-muskie.dtsi b/arch/arm64/boot/dts/htc/batterydata-muskie.dtsi
index d915679..415dec03 100644
--- a/arch/arm64/boot/dts/htc/batterydata-muskie.dtsi
+++ b/arch/arm64/boot/dts/htc/batterydata-muskie.dtsi
@@ -19,6 +19,7 @@
qcom,fg-sys-term-current = <(-575)>;
qcom,fg-cutoff-voltage = <3400>;
qcom,hold-soc-while-full;
+ qcom,linearize-soc;
};
&pmi8998_charger {
diff --git a/arch/arm64/boot/dts/htc/batterydata-walleye.dtsi b/arch/arm64/boot/dts/htc/batterydata-walleye.dtsi
index fe8b295..518820c 100644
--- a/arch/arm64/boot/dts/htc/batterydata-walleye.dtsi
+++ b/arch/arm64/boot/dts/htc/batterydata-walleye.dtsi
@@ -19,6 +19,7 @@
qcom,fg-sys-term-current = <(-405)>;
qcom,fg-cutoff-voltage = <3400>;
qcom,hold-soc-while-full;
+ qcom,linearize-soc;
};
&pmi8998_charger {
diff --git a/arch/arm64/boot/dts/htc/msm8998-audio-muskie.dtsi b/arch/arm64/boot/dts/htc/msm8998-audio-muskie.dtsi
index 87dcec8..74f972d 100644
--- a/arch/arm64/boot/dts/htc/msm8998-audio-muskie.dtsi
+++ b/arch/arm64/boot/dts/htc/msm8998-audio-muskie.dtsi
@@ -113,6 +113,7 @@
ti,echo-ref = <0>; /* 0, left channel; 1, right channel; 2, both */
ti,i2s-bits = <24>; /* support 16, 24, 32 */
ti,bypass-tmax = <0>; /* 0, not bypass; 1, bypass */
+ ti,bob-fast-profile = <1>; /* 1:voice_handset */
status = "ok";
};
};
diff --git a/arch/arm64/boot/dts/htc/msm8998-htc-common.dtsi b/arch/arm64/boot/dts/htc/msm8998-htc-common.dtsi
index a579398..d88e978 100755
--- a/arch/arm64/boot/dts/htc/msm8998-htc-common.dtsi
+++ b/arch/arm64/boot/dts/htc/msm8998-htc-common.dtsi
@@ -92,7 +92,7 @@
&reserved_memory {
- easel_mem: easel_mem@a3800000 {
+ easel_mem: easel_mem@95c00000 {
compatible = "removed-dma-pool";
reg = <0 0x95c00000 0 0x4000000>;
label = "easel_mem";
diff --git a/arch/arm64/boot/dts/lge/dsi-panel-sw43402-dsc-qhd-cmd-rev1.dtsi b/arch/arm64/boot/dts/lge/dsi-panel-sw43402-dsc-qhd-cmd-rev1.dtsi
index eca8cb4..2740182 100644
--- a/arch/arm64/boot/dts/lge/dsi-panel-sw43402-dsc-qhd-cmd-rev1.dtsi
+++ b/arch/arm64/boot/dts/lge/dsi-panel-sw43402-dsc-qhd-cmd-rev1.dtsi
@@ -70,7 +70,7 @@
5D 01 02 80 00 FF
FF 15 00 00 00 00
05 01 00 00 00 00 01 35
- 05 01 00 00 3C 00 01 11
+ 05 01 00 00 96 00 01 11
15 01 00 00 00 00 02 53
00
15 01 00 00 00 00 02 55
@@ -83,10 +83,13 @@
20 0D 08 A8 0A AA
04 44 80 80 80 5C
5C 5C
- 05 01 00 00 00 00 01 29
];
qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
+ qcom,mdss-dsi-post-panel-on-command = [
+ 05 01 00 00 00 00 01 29
+ ];
+
qcom,mdss-dsi-off-command = [
39 01 00 00 00 00 05 E8
08 90 18 05
diff --git a/arch/arm64/boot/dts/lge/dsi-panel-sw43402-dsc-qhd-cmd-rev2.dtsi b/arch/arm64/boot/dts/lge/dsi-panel-sw43402-dsc-qhd-cmd-rev2.dtsi
index 22c422e..714a409 100644
--- a/arch/arm64/boot/dts/lge/dsi-panel-sw43402-dsc-qhd-cmd-rev2.dtsi
+++ b/arch/arm64/boot/dts/lge/dsi-panel-sw43402-dsc-qhd-cmd-rev2.dtsi
@@ -70,7 +70,7 @@
5D 01 02 80 00 FF
FF 15 00 00 00 00
05 01 00 00 00 00 01 35
- 05 01 00 00 3C 00 01 11
+ 05 01 00 00 96 00 01 11
15 01 00 00 00 00 02 53
00
15 01 00 00 00 00 02 55
@@ -83,10 +83,13 @@
20 0D 08 A8 0A AA
04 44 80 80 80 5C
5C 5C
- 05 01 00 00 00 00 01 29
];
qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
+ qcom,mdss-dsi-post-panel-on-command = [
+ 05 01 00 00 00 00 01 29
+ ];
+
qcom,mdss-dsi-off-command = [
39 01 00 00 00 00 05 E8
08 90 18 05
diff --git a/arch/arm64/boot/dts/lge/dsi-panel-sw43402-dsc-qhd-cmd.dtsi b/arch/arm64/boot/dts/lge/dsi-panel-sw43402-dsc-qhd-cmd.dtsi
index 1b5cd72..480431f 100644
--- a/arch/arm64/boot/dts/lge/dsi-panel-sw43402-dsc-qhd-cmd.dtsi
+++ b/arch/arm64/boot/dts/lge/dsi-panel-sw43402-dsc-qhd-cmd.dtsi
@@ -68,7 +68,7 @@
5D 01 02 80 00 FF
FF 15 00 00 00 00
05 01 00 00 00 00 01 35
- 05 01 00 00 3C 00 01 11
+ 05 01 00 00 96 00 01 11
15 01 00 00 00 00 02 53
00
15 01 00 00 00 00 02 55
@@ -81,10 +81,13 @@
20 0D 08 A8 0A AA
04 44 80 80 80 5C
5C 5C
- 05 01 00 00 00 00 01 29
];
qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
+ qcom,mdss-dsi-post-panel-on-command = [
+ 05 01 00 00 00 00 01 29
+ ];
+
qcom,mdss-dsi-off-command = [
39 01 00 00 00 00 05 E8
08 90 18 05
@@ -131,7 +134,7 @@
39 01 00 00 00 00 07 E5 04 06 03 03 56 61
39 01 00 00 00 00 09 E7 00 0D 76 23 00 00 0D 44
05 01 00 00 00 00 01 39
- 15 01 00 00 00 00 02 51 20
+ 15 01 00 00 00 00 02 51 35
05 01 00 00 54 00 01 13
15 01 00 00 00 00 02 5E 10
];
@@ -160,7 +163,7 @@
15 01 00 00 00 00 02 5E 10
];
qcom,alpm-dim-transition-command = [
- 15 01 00 00 00 00 02 51 20
+ 15 01 00 00 00 00 02 51 35
39 01 00 00 20 00 03 E4 30 44
];
qcom,alpm-low-transition-command = [
diff --git a/arch/arm64/boot/dts/lge/lge_blt35_tocad_3620mah.dtsi b/arch/arm64/boot/dts/lge/lge_blt35_tocad_3620mah.dtsi
index 0b61784..97bc169 100644
--- a/arch/arm64/boot/dts/lge/lge_blt35_tocad_3620mah.dtsi
+++ b/arch/arm64/boot/dts/lge/lge_blt35_tocad_3620mah.dtsi
@@ -15,6 +15,8 @@
qcom,max-voltage-uv = <4400000>;
qcom,fastchg-current-ma = <500>;
qcom,fg-cc-cv-threshold-mv = <4390>;
+ qcom,fg-chg-term-current = <200>;
+ qcom,fg-sys-term-current = <(-225)>;
qcom,batt-id-kohm = <56>;
qcom,battery-beta = <4150>;
qcom,battery-type = "lge_blt35_tocad_3620mah";
diff --git a/arch/arm64/boot/dts/lge/msm8998-taimen-pinctrl.dtsi b/arch/arm64/boot/dts/lge/msm8998-taimen-pinctrl.dtsi
index 00ac04b..3d9a56e 100644
--- a/arch/arm64/boot/dts/lge/msm8998-taimen-pinctrl.dtsi
+++ b/arch/arm64/boot/dts/lge/msm8998-taimen-pinctrl.dtsi
@@ -29,6 +29,40 @@
};
};
+ /* SPI CONFIGURATION */
+
+ spi_1 {
+ spi_1_active {
+ mux {
+ pins = "gpio0", "gpio1",
+ "gpio2", "gpio3";
+ function = "blsp_spi1";
+ };
+
+ config {
+ pins = "gpio0", "gpio1",
+ "gpio2", "gpio3";
+ drive-strength = <6>;
+ bias-disable;
+ };
+ };
+
+ spi_1_sleep {
+ mux {
+ pins = "gpio0", "gpio1",
+ "gpio2", "gpio3";
+ function = "blsp_spi1";
+ };
+
+ config {
+ pins = "gpio0", "gpio1",
+ "gpio2", "gpio3";
+ drive-strength = <6>;
+ bias-pull-down;
+ };
+ };
+ };
+
spi_4 {
spi_4_active {
mux {
diff --git a/arch/arm64/boot/dts/lge/msm8998-taimen-pm.dtsi b/arch/arm64/boot/dts/lge/msm8998-taimen-pm.dtsi
index 178c441..248329c 100644
--- a/arch/arm64/boot/dts/lge/msm8998-taimen-pm.dtsi
+++ b/arch/arm64/boot/dts/lge/msm8998-taimen-pm.dtsi
@@ -218,6 +218,7 @@
qcom,fg-recharge-soc-thr = <97>;
qcom,fg-auto-recharge-soc;
qcom,hold-soc-while-full;
+ qcom,linearize-soc;
qcom,fg-esr-tight-lt-filter-micro-pct = <30000>;
qcom,fg-esr-broad-lt-filter-micro-pct = <30000>;
qcom,fg-batt-temp-compensation;
@@ -236,6 +237,7 @@
(-40) (-40) (-65) (-60)
(-60) (-40) (-40) (-30)
(-30)>;
+ qcom,fg-cutoff-voltage = <3400>;
};
&pmi8998_pdphy {
@@ -261,8 +263,8 @@
<PON_POWER_OFF_WARM_RESET>;
qcom,pon_1 { /* KPD_PWR_N */
qcom,support-reset = <1>;
- qcom,s1-timer = <6720>;
- qcom,s2-timer = <1000>;
+ qcom,s1-timer = <10256>;
+ qcom,s2-timer = <2000>;
qcom,s2-type = <8>;
};
qcom,pon_2 { /* RESIN_N */
diff --git a/arch/arm64/boot/dts/lge/msm8998-taimen-usb.dtsi b/arch/arm64/boot/dts/lge/msm8998-taimen-usb.dtsi
index b0eac2a..f97c478 100644
--- a/arch/arm64/boot/dts/lge/msm8998-taimen-usb.dtsi
+++ b/arch/arm64/boot/dts/lge/msm8998-taimen-usb.dtsi
@@ -32,7 +32,7 @@
0x80 0x2c /* pll_cmode */
0x0a 0x184 /* pll_lock_delay */
0x8c 0x21c /* imp_ctrl1 */
- 0xac 0x23c /* tune1 */
+ 0x05 0x23c /* tune1 */
0x0c 0x240 /* tune2 */
0x05 0x248 /* tune4 */
0x19 0xb4>; /* digital_timers_two */
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-ep108.dts b/arch/arm64/boot/dts/xilinx/zynqmp-ep108.dts
index ce5d848..7b34822 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp-ep108.dts
+++ b/arch/arm64/boot/dts/xilinx/zynqmp-ep108.dts
@@ -26,7 +26,7 @@
stdout-path = "serial0:115200n8";
};
- memory {
+ memory@0 {
device_type = "memory";
reg = <0x0 0x0 0x40000000>;
};
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp.dtsi b/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
index 857eda5..172402cc 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
+++ b/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
@@ -71,7 +71,7 @@
<1 10 0xf01>;
};
- amba_apu {
+ amba_apu: amba_apu@0 {
compatible = "simple-bus";
#address-cells = <2>;
#size-cells = <1>;
@@ -191,7 +191,7 @@
};
i2c0: i2c@ff020000 {
- compatible = "cdns,i2c-r1p10";
+ compatible = "cdns,i2c-r1p14", "cdns,i2c-r1p10";
status = "disabled";
interrupt-parent = <&gic>;
interrupts = <0 17 4>;
@@ -202,7 +202,7 @@
};
i2c1: i2c@ff030000 {
- compatible = "cdns,i2c-r1p10";
+ compatible = "cdns,i2c-r1p14", "cdns,i2c-r1p10";
status = "disabled";
interrupt-parent = <&gic>;
interrupts = <0 18 4>;
diff --git a/arch/arm64/configs/wahoo_defconfig b/arch/arm64/configs/wahoo_defconfig
index bb159f6..67fe63d 100644
--- a/arch/arm64/configs/wahoo_defconfig
+++ b/arch/arm64/configs/wahoo_defconfig
@@ -263,6 +263,7 @@
CONFIG_GOOGLE_EASEL=y
CONFIG_GOOGLE_EASEL_AP=y
CONFIG_ACCESS_RAMOOPS=y
+CONFIG_MNH_SIG=y
CONFIG_FPR_FPC=y
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
@@ -407,8 +408,8 @@
CONFIG_SPI_QUP=y
CONFIG_SPI_SPIDEV=y
CONFIG_SPMI=y
-CONFIG_PINCTRL_TCA6418=y
CONFIG_PINCTRL_MSM8998=y
+CONFIG_GPIOLIB=y
CONFIG_GPIO_SYSFS=y
CONFIG_GPIO_QPNP_PIN=y
CONFIG_HTC_BATTERY=m
@@ -458,7 +459,6 @@
CONFIG_MEDIA_CAMERA_SUPPORT=y
CONFIG_MEDIA_CONTROLLER=y
CONFIG_VIDEO_V4L2_SUBDEV_API=y
-CONFIG_VIDEO_ADV_DEBUG=y
CONFIG_VIDEO_FIXED_MINOR_RANGES=y
CONFIG_V4L_PLATFORM_DRIVERS=y
CONFIG_MSM_CAMERA=y
@@ -474,15 +474,6 @@
CONFIG_MSM_CSID=y
CONFIG_MSM_EEPROM=y
CONFIG_MSM_ISPIF=y
-CONFIG_IMX134=y
-CONFIG_IMX132=y
-CONFIG_OV9724=y
-CONFIG_OV5648=y
-CONFIG_GC0339=y
-CONFIG_OV8825=y
-CONFIG_OV8865=y
-CONFIG_s5k4e1=y
-CONFIG_OV12830=y
CONFIG_FW_UPDATE=y
CONFIG_MSM_V4L2_VIDEO_OVERLAY_DEVICE=y
CONFIG_MSMB_JPEG=y
@@ -547,10 +538,6 @@
CONFIG_HID_WIIMOTE=y
CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
CONFIG_USB_XHCI_HCD=y
-CONFIG_USB_EHCI_HCD=y
-CONFIG_USB_EHCI_HCD_PLATFORM=y
-CONFIG_USB_OHCI_HCD=y
-CONFIG_USB_OHCI_HCD_PLATFORM=y
CONFIG_USB_STORAGE=y
CONFIG_USB_DWC3=y
CONFIG_USB_PD_ENGINE=y
@@ -729,7 +716,6 @@
# CONFIG_PRINT_QUOTA_WARNING is not set
CONFIG_QFMT_V2=y
CONFIG_FUSE_FS=y
-CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
CONFIG_TMPFS=y
CONFIG_TMPFS_POSIX_ACL=y
@@ -760,17 +746,6 @@
CONFIG_CC_WERROR=y
CONFIG_PID_IN_CONTEXTIDR=y
CONFIG_DEBUG_SET_MODULE_RONX=y
-CONFIG_CORESIGHT=y
-CONFIG_CORESIGHT_EVENT=y
-CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y
-CONFIG_CORESIGHT_QCOM_REPLICATOR=y
-CONFIG_CORESIGHT_STM=y
-CONFIG_CORESIGHT_HWEVENT=y
-CONFIG_CORESIGHT_CTI=y
-CONFIG_CORESIGHT_TPDA=y
-CONFIG_CORESIGHT_TPDM=y
-CONFIG_CORESIGHT_QPDI=y
-CONFIG_CORESIGHT_SOURCE_DUMMY=y
CONFIG_PFK=y
CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
CONFIG_SECURITY=y
@@ -790,6 +765,12 @@
CONFIG_CRYPTO_DEV_OTA_CRYPTO=y
CONFIG_CRYPTO_DEV_QCE=y
CONFIG_CRYPTO_DEV_QCOM_ICE=y
+CONFIG_ASYMMETRIC_KEY_TYPE=y
+CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y
+CONFIG_X509_CERTIFICATE_PARSER=y
+CONFIG_PKCS7_MESSAGE_PARSER=y
+CONFIG_SYSTEM_TRUSTED_KEYRING=y
+CONFIG_SYSTEM_TRUSTED_KEYS="certs/esl_key.pem"
CONFIG_ARM64_CRYPTO=y
CONFIG_CRYPTO_SHA1_ARM64_CE=y
CONFIG_CRYPTO_SHA2_ARM64_CE=y
diff --git a/arch/arm64/crypto/Makefile b/arch/arm64/crypto/Makefile
index a2f1752..3174f2c 100644
--- a/arch/arm64/crypto/Makefile
+++ b/arch/arm64/crypto/Makefile
@@ -39,7 +39,7 @@
obj-$(CONFIG_CRYPTO_CRC32_ARM64) += crc32-arm64.o
-CFLAGS_crc32-arm64.o := -mcpu=generic+crc -Wa,-mcpu=generic+crc
+CFLAGS_crc32-arm64.o := -mcpu=generic+crc
$(obj)/aes-glue-%.o: $(src)/aes-glue.c FORCE
$(call if_changed_rule,cc_o_c)
diff --git a/arch/arm64/include/asm/acpi.h b/arch/arm64/include/asm/acpi.h
index aee323b..0a11cd5 100644
--- a/arch/arm64/include/asm/acpi.h
+++ b/arch/arm64/include/asm/acpi.h
@@ -22,9 +22,9 @@
#define ACPI_MADT_GICC_LENGTH \
(acpi_gbl_FADT.header.revision < 6 ? 76 : 80)
-#define BAD_MADT_GICC_ENTRY(entry, end) \
- (!(entry) || (unsigned long)(entry) + sizeof(*(entry)) > (end) || \
- (entry)->header.length != ACPI_MADT_GICC_LENGTH)
+#define BAD_MADT_GICC_ENTRY(entry, end) \
+ (!(entry) || (entry)->header.length != ACPI_MADT_GICC_LENGTH || \
+ (unsigned long)(entry) + ACPI_MADT_GICC_LENGTH > (end))
/* Basic configuration for ACPI */
#ifdef CONFIG_ACPI
diff --git a/arch/arm64/include/asm/asm-uaccess.h b/arch/arm64/include/asm/asm-uaccess.h
new file mode 100644
index 0000000..be2d234
--- /dev/null
+++ b/arch/arm64/include/asm/asm-uaccess.h
@@ -0,0 +1,13 @@
+#ifndef __ASM_ASM_UACCESS_H
+#define __ASM_ASM_UACCESS_H
+
+/*
+ * Remove the address tag from a virtual address, if present.
+ */
+ .macro clear_address_tag, dst, addr
+ tst \addr, #(1 << 55)
+ bic \dst, \addr, #(0xff << 56)
+ csel \dst, \dst, \addr, eq
+ .endm
+
+#endif
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index 02e4b7e..0e67a50 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -235,14 +235,25 @@
.endm
/*
+ * @dst: Result of per_cpu(sym, smp_processor_id())
* @sym: The name of the per-cpu variable
- * @reg: Result of per_cpu(sym, smp_processor_id())
* @tmp: scratch register
*/
- .macro this_cpu_ptr, sym, reg, tmp
- adr_l \reg, \sym
+ .macro adr_this_cpu, dst, sym, tmp
+ adr_l \dst, \sym
mrs \tmp, tpidr_el1
- add \reg, \reg, \tmp
+ add \dst, \dst, \tmp
+ .endm
+
+ /*
+ * @dst: Result of READ_ONCE(per_cpu(sym, smp_processor_id()))
+ * @sym: The name of the per-cpu variable
+ * @tmp: scratch register
+ */
+ .macro ldr_this_cpu dst, sym, tmp
+ adr_l \dst, \sym
+ mrs \tmp, tpidr_el1
+ ldr \dst, [\dst, \tmp]
.endm
/*
diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
index c5dbc5c..0671711 100644
--- a/arch/arm64/include/asm/barrier.h
+++ b/arch/arm64/include/asm/barrier.h
@@ -44,23 +44,33 @@
#define smp_store_release(p, v) \
do { \
+ union { typeof(*p) __val; char __c[1]; } __u = \
+ { .__val = (__force typeof(*p)) (v) }; \
compiletime_assert_atomic_type(*p); \
switch (sizeof(*p)) { \
case 1: \
asm volatile ("stlrb %w1, %0" \
- : "=Q" (*p) : "r" (v) : "memory"); \
+ : "=Q" (*p) \
+ : "r" (*(__u8 *)__u.__c) \
+ : "memory"); \
break; \
case 2: \
asm volatile ("stlrh %w1, %0" \
- : "=Q" (*p) : "r" (v) : "memory"); \
+ : "=Q" (*p) \
+ : "r" (*(__u16 *)__u.__c) \
+ : "memory"); \
break; \
case 4: \
asm volatile ("stlr %w1, %0" \
- : "=Q" (*p) : "r" (v) : "memory"); \
+ : "=Q" (*p) \
+ : "r" (*(__u32 *)__u.__c) \
+ : "memory"); \
break; \
case 8: \
asm volatile ("stlr %1, %0" \
- : "=Q" (*p) : "r" (v) : "memory"); \
+ : "=Q" (*p) \
+ : "r" (*(__u64 *)__u.__c) \
+ : "memory"); \
break; \
} \
} while (0)
diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h
index 510c7b4..270c6b7 100644
--- a/arch/arm64/include/asm/cmpxchg.h
+++ b/arch/arm64/include/asm/cmpxchg.h
@@ -48,7 +48,7 @@
" swp" #acq_lse #rel #sz "\t%" #w "3, %" #w "0, %2\n" \
" nop\n" \
" " #nop_lse) \
- : "=&r" (ret), "=&r" (tmp), "+Q" (*(u8 *)ptr) \
+ : "=&r" (ret), "=&r" (tmp), "+Q" (*(unsigned long *)ptr) \
: "r" (x) \
: cl); \
\
diff --git a/arch/arm64/include/asm/current.h b/arch/arm64/include/asm/current.h
new file mode 100644
index 0000000..483a6c9d3
--- /dev/null
+++ b/arch/arm64/include/asm/current.h
@@ -0,0 +1,35 @@
+#ifndef __ASM_CURRENT_H
+#define __ASM_CURRENT_H
+
+#include <linux/compiler.h>
+
+#include <asm/sysreg.h>
+
+#ifndef __ASSEMBLY__
+
+#ifdef CONFIG_THREAD_INFO_IN_TASK
+struct task_struct;
+
+/*
+ * We don't use read_sysreg() as we want the compiler to cache the value where
+ * possible.
+ */
+static __always_inline struct task_struct *get_current(void)
+{
+ unsigned long sp_el0;
+
+ asm ("mrs %0, sp_el0" : "=r" (sp_el0));
+
+ return (struct task_struct *)sp_el0;
+}
+#define current get_current()
+#else
+#include <linux/thread_info.h>
+#define get_current() (current_thread_info()->task)
+#define current get_current()
+#endif
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __ASM_CURRENT_H */
+
diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h
index 7875c88..cd3dfa3 100644
--- a/arch/arm64/include/asm/elf.h
+++ b/arch/arm64/include/asm/elf.h
@@ -113,12 +113,11 @@
#define ELF_EXEC_PAGESIZE PAGE_SIZE
/*
- * This is the location that an ET_DYN program is loaded if exec'ed. Typical
- * use of this is to invoke "./ld.so someprog" to test out a new version of
- * the loader. We need to make sure that it is out of the way of the program
- * that it will "exec", and that there is sufficient room for the brk.
+ * This is the base location for PIE (ET_DYN with INTERP) loads. On
+ * 64-bit, this is above 4GB to leave the entire 32-bit address
+ * space open for things that want to use the area for 32-bit pointers.
*/
-#define ELF_ET_DYN_BASE (2 * TASK_SIZE_64 / 3)
+#define ELF_ET_DYN_BASE (2 * TASK_SIZE_64 / 3)
#ifndef __ASSEMBLY__
@@ -169,7 +168,8 @@
#ifdef CONFIG_COMPAT
-#define COMPAT_ELF_ET_DYN_BASE (2 * TASK_SIZE_32 / 3)
+/* PIE load location for compat arm. Must match ARM ELF_ET_DYN_BASE. */
+#define COMPAT_ELF_ET_DYN_BASE 0x000400000UL
/* AArch32 registers. */
#define COMPAT_ELF_NGREG 18
diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h
index 8a33685..2ce1a02 100644
--- a/arch/arm64/include/asm/percpu.h
+++ b/arch/arm64/include/asm/percpu.h
@@ -16,6 +16,8 @@
#ifndef __ASM_PERCPU_H
#define __ASM_PERCPU_H
+#include <asm/stack_pointer.h>
+
static inline void set_my_cpu_offset(unsigned long off)
{
asm volatile("msr tpidr_el1, %0" :: "r" (off) : "memory");
diff --git a/arch/arm64/include/asm/perf_event.h b/arch/arm64/include/asm/perf_event.h
index 7bd3cdb..91b6be0 100644
--- a/arch/arm64/include/asm/perf_event.h
+++ b/arch/arm64/include/asm/perf_event.h
@@ -17,6 +17,8 @@
#ifndef __ASM_PERF_EVENT_H
#define __ASM_PERF_EVENT_H
+#include <asm/stack_pointer.h>
+
#ifdef CONFIG_PERF_EVENTS
struct pt_regs;
extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
diff --git a/arch/arm64/include/asm/smp.h b/arch/arm64/include/asm/smp.h
index 2013a4d..4325b36 100644
--- a/arch/arm64/include/asm/smp.h
+++ b/arch/arm64/include/asm/smp.h
@@ -16,11 +16,22 @@
#ifndef __ASM_SMP_H
#define __ASM_SMP_H
+#include <asm/percpu.h>
+
#include <linux/threads.h>
#include <linux/cpumask.h>
#include <linux/thread_info.h>
-#define raw_smp_processor_id() (current_thread_info()->cpu)
+DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number);
+
+/*
+ * We don't use this_cpu_read(cpu_number) as that has implicit writes to
+ * preempt_count, and associated (compiler) barriers, that we'd like to avoid
+ * the expense of. If we're preemptible, the value can be stale at use anyway.
+ * And we can't use this_cpu_ptr() either, as that winds up recursing back
+ * here under CONFIG_DEBUG_PREEMPT=y.
+ */
+#define raw_smp_processor_id() (*raw_cpu_ptr(&cpu_number))
struct seq_file;
@@ -57,6 +68,9 @@
*/
struct secondary_data {
void *stack;
+#ifdef CONFIG_THREAD_INFO_IN_TASK
+ struct task_struct *task;
+#endif
};
extern struct secondary_data secondary_data;
extern void secondary_entry(void);
diff --git a/arch/arm64/include/asm/stack_pointer.h b/arch/arm64/include/asm/stack_pointer.h
new file mode 100644
index 0000000..ffcdf74
--- /dev/null
+++ b/arch/arm64/include/asm/stack_pointer.h
@@ -0,0 +1,9 @@
+#ifndef __ASM_STACK_POINTER_H
+#define __ASM_STACK_POINTER_H
+
+/*
+ * how to get the current stack pointer from C
+ */
+register unsigned long current_stack_pointer asm ("sp");
+
+#endif /* __ASM_STACK_POINTER_H */
diff --git a/arch/arm64/include/asm/suspend.h b/arch/arm64/include/asm/suspend.h
index 024d623..92d6a62 100644
--- a/arch/arm64/include/asm/suspend.h
+++ b/arch/arm64/include/asm/suspend.h
@@ -1,7 +1,7 @@
#ifndef __ASM_SUSPEND_H
#define __ASM_SUSPEND_H
-#define NR_CTX_REGS 10
+#define NR_CTX_REGS 12
#define NR_CALLEE_SAVED_REGS 12
/*
diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h
index 4bb038e..db6d058 100644
--- a/arch/arm64/include/asm/thread_info.h
+++ b/arch/arm64/include/asm/thread_info.h
@@ -36,25 +36,36 @@
struct task_struct;
+#include <asm/stack_pointer.h>
#include <asm/types.h>
typedef unsigned long mm_segment_t;
/*
* low level task data that entry.S needs immediate access to.
- * __switch_to() assumes cpu_context follows immediately after cpu_domain.
*/
struct thread_info {
unsigned long flags; /* low level flags */
mm_segment_t addr_limit; /* address limit */
+#ifndef CONFIG_THREAD_INFO_IN_TASK
struct task_struct *task; /* main task structure */
+#endif
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
u64 ttbr0; /* saved TTBR0_EL1 */
#endif
int preempt_count; /* 0 => preemptable, <0 => bug */
+#ifndef CONFIG_THREAD_INFO_IN_TASK
int cpu; /* cpu */
+#endif
};
+#ifdef CONFIG_THREAD_INFO_IN_TASK
+#define INIT_THREAD_INFO(tsk) \
+{ \
+ .preempt_count = INIT_PREEMPT_COUNT, \
+ .addr_limit = KERNEL_DS, \
+}
+#else
#define INIT_THREAD_INFO(tsk) \
{ \
.task = &tsk, \
@@ -63,14 +74,6 @@
.addr_limit = KERNEL_DS, \
}
-#define init_thread_info (init_thread_union.thread_info)
-#define init_stack (init_thread_union.stack)
-
-/*
- * how to get the current stack pointer from C
- */
-register unsigned long current_stack_pointer asm ("sp");
-
/*
* how to get the thread information struct from C
*/
@@ -88,6 +91,11 @@
return (struct thread_info *)sp_el0;
}
+#define init_thread_info (init_thread_union.thread_info)
+#endif
+
+#define init_stack (init_thread_union.stack)
+
#define thread_saved_pc(tsk) \
((unsigned long)(tsk->thread.cpu_context.pc))
#define thread_saved_sp(tsk) \
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index efafdf3..064cef9 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -27,6 +27,7 @@
/*
* User space memory access functions
*/
+#include <linux/bitops.h>
#include <linux/string.h>
#include <linux/thread_info.h>
@@ -108,16 +109,24 @@
*/
#define __range_ok(addr, size) \
({ \
+ unsigned long __addr = (unsigned long __force)(addr); \
unsigned long flag, roksum; \
__chk_user_ptr(addr); \
asm("adds %1, %1, %3; ccmp %1, %4, #2, cc; cset %0, ls" \
: "=&r" (flag), "=&r" (roksum) \
- : "1" (addr), "Ir" (size), \
+ : "1" (__addr), "Ir" (size), \
"r" (current_thread_info()->addr_limit) \
: "cc"); \
flag; \
})
+/*
+ * When dealing with data aborts, watchpoints, or instruction traps we may end
+ * up with a tagged userland pointer. Clear the tag to get a sane pointer to
+ * pass on to access_ok(), for instance.
+ */
+#define untagged_addr(addr) sign_extend64(addr, 55)
+
#define access_ok(type, addr, size) __range_ok(addr, size)
#define user_addr_max get_fs
diff --git a/arch/arm64/kernel/armv8_deprecated.c b/arch/arm64/kernel/armv8_deprecated.c
index 884b317e5..10d3642 100644
--- a/arch/arm64/kernel/armv8_deprecated.c
+++ b/arch/arm64/kernel/armv8_deprecated.c
@@ -299,7 +299,8 @@
_ASM_EXTABLE(0b, 4b) \
_ASM_EXTABLE(1b, 4b) \
: "=&r" (res), "+r" (data), "=&r" (temp) \
- : "r" (addr), "i" (-EAGAIN), "i" (-EFAULT) \
+ : "r" ((unsigned long)addr), "i" (-EAGAIN), \
+ "i" (-EFAULT) \
: "memory"); \
uaccess_disable(); \
} while (0)
diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
index c9ea871..4469bdc 100644
--- a/arch/arm64/kernel/asm-offsets.c
+++ b/arch/arm64/kernel/asm-offsets.c
@@ -35,11 +35,16 @@
{
DEFINE(TSK_ACTIVE_MM, offsetof(struct task_struct, active_mm));
BLANK();
+#ifdef CONFIG_THREAD_INFO_IN_TASK
+ DEFINE(TSK_TI_FLAGS, offsetof(struct task_struct, thread_info.flags));
+ DEFINE(TSK_TI_PREEMPT, offsetof(struct task_struct, thread_info.preempt_count));
+ DEFINE(TSK_TI_ADDR_LIMIT, offsetof(struct task_struct, thread_info.addr_limit));
+ DEFINE(TSK_STACK, offsetof(struct task_struct, stack));
+#else
DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit));
- DEFINE(TI_TASK, offsetof(struct thread_info, task));
- DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
+#endif
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
DEFINE(TSK_TI_TTBR0, offsetof(struct thread_info, ttbr0));
#endif
@@ -120,6 +125,11 @@
DEFINE(TZ_MINWEST, offsetof(struct timezone, tz_minuteswest));
DEFINE(TZ_DSTTIME, offsetof(struct timezone, tz_dsttime));
BLANK();
+#ifdef CONFIG_THREAD_INFO_IN_TASK
+ DEFINE(CPU_BOOT_STACK, offsetof(struct secondary_data, stack));
+ DEFINE(CPU_BOOT_TASK, offsetof(struct secondary_data, task));
+ BLANK();
+#endif
#ifdef CONFIG_KVM_ARM_HOST
DEFINE(VCPU_CONTEXT, offsetof(struct kvm_vcpu, arch.ctxt));
DEFINE(CPU_GP_REGS, offsetof(struct kvm_cpu_context, gp_regs));
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index bab6fa1..f7c5d7e 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -32,6 +32,7 @@
#include <asm/ptrace.h>
#include <asm/thread_info.h>
#include <asm/uaccess.h>
+#include <asm/asm-uaccess.h>
#include <asm/unistd.h>
/*
@@ -92,9 +93,14 @@
.if \el == 0
mrs x21, sp_el0
+#ifdef CONFIG_THREAD_INFO_IN_TASK
+ ldr_this_cpu tsk, __entry_task, x20 // Ensure MDSCR_EL1.SS is clear,
+ ldr x19, [tsk, #TSK_TI_FLAGS] // since we can unmask debug
+#else
mov tsk, sp
and tsk, tsk, #~(THREAD_SIZE - 1) // Ensure MDSCR_EL1.SS is clear,
ldr x19, [tsk, #TI_FLAGS] // since we can unmask debug
+#endif
disable_step_tsk x19, x20 // exceptions when scheduling.
mov x29, xzr // fp pointed to user-space
@@ -102,10 +108,18 @@
add x21, sp, #S_FRAME_SIZE
get_thread_info tsk
/* Save the task's original addr_limit and set USER_DS (TASK_SIZE_64) */
+#ifdef CONFIG_THREAD_INFO_IN_TASK
+ ldr x20, [tsk, #TSK_TI_ADDR_LIMIT]
+#else
ldr x20, [tsk, #TI_ADDR_LIMIT]
+#endif
str x20, [sp, #S_ORIG_ADDR_LIMIT]
mov x20, #TASK_SIZE_64
+#ifdef CONFIG_THREAD_INFO_IN_TASK
+ str x20, [tsk, #TSK_TI_ADDR_LIMIT]
+#else
str x20, [tsk, #TI_ADDR_LIMIT]
+#endif
ALTERNATIVE(nop, SET_PSTATE_UAO(0), ARM64_HAS_UAO, CONFIG_ARM64_UAO)
.endif /* \el == 0 */
mrs x22, elr_el1
@@ -167,7 +181,11 @@
.if \el != 0
/* Restore the task's original addr_limit. */
ldr x20, [sp, #S_ORIG_ADDR_LIMIT]
+#ifdef CONFIG_THREAD_INFO_IN_TASK
+ str x20, [tsk, #TSK_TI_ADDR_LIMIT]
+#else
str x20, [tsk, #TI_ADDR_LIMIT]
+#endif
/* No need to restore UAO, it will be restored from SPSR_EL1 */
.endif
@@ -257,15 +275,22 @@
mov x19, sp // preserve the original sp
/*
- * Compare sp with the current thread_info, if the top
- * ~(THREAD_SIZE - 1) bits match, we are on a task stack, and
- * should switch to the irq stack.
+ * Compare sp with the base of the task stack.
+ * If the top ~(THREAD_SIZE - 1) bits match, we are on a task stack,
+ * and should switch to the irq stack.
*/
+#ifdef CONFIG_THREAD_INFO_IN_TASK
+ ldr x25, [tsk, TSK_STACK]
+ eor x25, x25, x19
+ and x25, x25, #~(THREAD_SIZE - 1)
+ cbnz x25, 9998f
+#else
and x25, x19, #~(THREAD_SIZE - 1)
cmp x25, tsk
b.ne 9998f
+#endif
- this_cpu_ptr irq_stack, x25, x26
+ adr_this_cpu x25, irq_stack, x26
mov x26, #IRQ_STACK_START_SP
add x26, x25, x26
@@ -437,12 +462,13 @@
/*
* Data abort handling
*/
- mrs x0, far_el1
+ mrs x3, far_el1
enable_dbg
// re-enable interrupts if they were enabled in the aborted context
tbnz x23, #7, 1f // PSR_I_BIT
enable_irq
1:
+ clear_address_tag x0, x3
mov x2, sp // struct pt_regs
bl do_mem_abort
@@ -495,9 +521,17 @@
irq_handler
#ifdef CONFIG_PREEMPT
+#ifdef CONFIG_THREAD_INFO_IN_TASK
+ ldr w24, [tsk, #TSK_TI_PREEMPT] // get preempt count
+#else
ldr w24, [tsk, #TI_PREEMPT] // get preempt count
+#endif
cbnz w24, 1f // preempt count != 0
+#ifdef CONFIG_THREAD_INFO_IN_TASK
+ ldr x0, [tsk, #TSK_TI_FLAGS] // get flags
+#else
ldr x0, [tsk, #TI_FLAGS] // get flags
+#endif
tbz x0, #TIF_NEED_RESCHED, 1f // needs rescheduling?
bl el1_preempt
1:
@@ -512,7 +546,11 @@
el1_preempt:
mov x24, lr
1: bl preempt_schedule_irq // irq en/disable is done inside
+#ifdef CONFIG_THREAD_INFO_IN_TASK
+ ldr x0, [tsk, #TSK_TI_FLAGS] // get new tasks TI_FLAGS
+#else
ldr x0, [tsk, #TI_FLAGS] // get new tasks TI_FLAGS
+#endif
tbnz x0, #TIF_NEED_RESCHED, 1b // needs rescheduling?
ret x24
#endif
@@ -603,7 +641,7 @@
// enable interrupts before calling the main handler
enable_dbg_and_irq
ct_user_exit
- bic x0, x26, #(0xff << 56)
+ clear_address_tag x0, x26
mov x1, x25
mov x2, sp
bl do_mem_abort
@@ -769,8 +807,12 @@
mov v15.16b, v15.16b
#endif
mov sp, x9
+#ifdef CONFIG_THREAD_INFO_IN_TASK
+ msr sp_el0, x1
+#else
and x9, x9, #~(THREAD_SIZE - 1)
msr sp_el0, x9
+#endif
ret
ENDPROC(cpu_switch_to)
@@ -781,7 +823,11 @@
ret_fast_syscall:
disable_irq // disable interrupts
str x0, [sp, #S_X0] // returned x0
+#ifdef CONFIG_THREAD_INFO_IN_TASK
+ ldr x1, [tsk, #TSK_TI_FLAGS] // re-check for syscall tracing
+#else
ldr x1, [tsk, #TI_FLAGS] // re-check for syscall tracing
+#endif
and x2, x1, #_TIF_SYSCALL_WORK
cbnz x2, ret_fast_syscall_trace
and x2, x1, #_TIF_WORK_MASK
@@ -813,7 +859,11 @@
*/
ret_to_user:
disable_irq // disable interrupts
+#ifdef CONFIG_THREAD_INFO_IN_TASK
+ ldr x1, [tsk, #TSK_TI_FLAGS]
+#else
ldr x1, [tsk, #TI_FLAGS]
+#endif
and x2, x1, #_TIF_WORK_MASK
cbnz x2, work_pending
enable_step_tsk x1, x2
@@ -845,7 +895,11 @@
enable_dbg_and_irq
ct_user_exit 1
+#ifdef CONFIG_THREAD_INFO_IN_TASK
+ ldr x16, [tsk, #TSK_TI_FLAGS] // check for syscall hooks
+#else
ldr x16, [tsk, #TI_FLAGS] // check for syscall hooks
+#endif
tst x16, #_TIF_SYSCALL_WORK
b.ne __sys_trace
cmp scno, sc_nr // check upper syscall limit
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c
index acc1afd..f995dae 100644
--- a/arch/arm64/kernel/fpsimd.c
+++ b/arch/arm64/kernel/fpsimd.c
@@ -157,9 +157,11 @@
void fpsimd_flush_thread(void)
{
+ preempt_disable();
memset(¤t->thread.fpsimd_state, 0, sizeof(struct fpsimd_state));
fpsimd_flush_task_state(current);
set_thread_flag(TIF_FOREIGN_FPSTATE);
+ preempt_enable();
}
/*
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index a1c2ac3..0a0cd04 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -418,6 +418,7 @@
.set initial_sp, init_thread_union + THREAD_START_SP
__primary_switched:
mov x28, lr // preserve LR
+
adr_l x8, vectors // load VBAR_EL1 with virtual
msr vbar_el1, x8 // vector table address
isb
@@ -430,10 +431,18 @@
bl __pi_memset
dsb ishst // Make zero page visible to PTW
+#ifdef CONFIG_THREAD_INFO_IN_TASK
+ adrp x4, init_thread_union
+ add sp, x4, #THREAD_SIZE
+ adr_l x5, init_task
+ msr sp_el0, x5 // Save thread_info
+#else
adr_l sp, initial_sp, x4
mov x4, sp
and x4, x4, #~(THREAD_SIZE - 1)
msr sp_el0, x4 // Save thread_info
+#endif
+
str_l x21, __fdt_pointer, x5 // Save FDT pointer
ldr_l x4, kimage_vaddr // Save the offset between
@@ -642,11 +651,18 @@
adr_l x5, vectors
msr vbar_el1, x5
isb
-
+#ifdef CONFIG_THREAD_INFO_IN_TASK
+ adr_l x0, secondary_data
+ ldr x1, [x0, #CPU_BOOT_STACK] // get secondary_data.stack
+ mov sp, x1
+ ldr x2, [x0, #CPU_BOOT_TASK]
+ msr sp_el0, x2
+#else
ldr_l x0, secondary_data // get secondary_data.stack
mov sp, x0
and x0, x0, #~(THREAD_SIZE - 1)
msr sp_el0, x0 // save thread_info
+#endif
mov x29, #0
b secondary_start_kernel
ENDPROC(__secondary_switched)
diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c
index f4dfd8c..1c694f3 100644
--- a/arch/arm64/kernel/hw_breakpoint.c
+++ b/arch/arm64/kernel/hw_breakpoint.c
@@ -36,6 +36,7 @@
#include <asm/traps.h>
#include <asm/cputype.h>
#include <asm/system_misc.h>
+#include <asm/uaccess.h>
/* Breakpoint currently in use for each BRP. */
static DEFINE_PER_CPU(struct perf_event *, bp_on_reg[ARM_MAX_BRP]);
diff --git a/arch/arm64/kernel/perf_trace_counters.c b/arch/arm64/kernel/perf_trace_counters.c
index 748ad44..fad69b9 100644
--- a/arch/arm64/kernel/perf_trace_counters.c
+++ b/arch/arm64/kernel/perf_trace_counters.c
@@ -65,7 +65,7 @@
{
u32 cnten_val;
int current_pid;
- u32 cpu = task_thread_info(next)->cpu;
+ u32 cpu = task_cpu(next);
if (tp_pid_state != 1)
return;
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 5a09efa..22d306c 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -45,6 +45,9 @@
#include <linux/personality.h>
#include <linux/notifier.h>
#include <trace/events/power.h>
+#ifdef CONFIG_THREAD_INFO_IN_TASK
+#include <linux/percpu.h>
+#endif
#include <asm/alternative.h>
#include <asm/compat.h>
@@ -393,6 +396,22 @@
}
}
+#ifdef CONFIG_THREAD_INFO_IN_TASK
+/*
+ * We store our current task in sp_el0, which is clobbered by userspace. Keep a
+ * shadow copy so that we can restore this upon entry from userspace.
+ *
+ * This is *only* for exception entry from EL0, and is not valid until we
+ * __switch_to() a user task.
+ */
+DEFINE_PER_CPU(struct task_struct *, __entry_task);
+
+static void entry_task_switch(struct task_struct *next)
+{
+ __this_cpu_write(__entry_task, next);
+}
+#endif
+
/*
* Thread switching.
*/
@@ -405,6 +424,9 @@
tls_thread_switch(next);
hw_breakpoint_thread_switch(next);
contextidr_thread_switch(next);
+#ifdef CONFIG_THREAD_INFO_IN_TASK
+ entry_task_switch(next);
+#endif
uao_thread_switch(next);
/*
@@ -422,27 +444,35 @@
unsigned long get_wchan(struct task_struct *p)
{
struct stackframe frame;
- unsigned long stack_page;
+ unsigned long stack_page, ret = 0;
int count = 0;
if (!p || p == current || p->state == TASK_RUNNING)
return 0;
+ stack_page = (unsigned long)try_get_task_stack(p);
+ if (!stack_page)
+ return 0;
+
frame.fp = thread_saved_fp(p);
frame.sp = thread_saved_sp(p);
frame.pc = thread_saved_pc(p);
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
frame.graph = p->curr_ret_stack;
#endif
- stack_page = (unsigned long)task_stack_page(p);
do {
if (frame.sp < stack_page ||
frame.sp >= stack_page + THREAD_SIZE ||
unwind_frame(p, &frame))
- return 0;
- if (!in_sched_functions(frame.pc))
- return frame.pc;
+ goto out;
+ if (!in_sched_functions(frame.pc)) {
+ ret = frame.pc;
+ goto out;
+ }
} while (count ++ < 16);
- return 0;
+
+out:
+ put_task_stack(p);
+ return ret;
}
unsigned long arch_align_stack(unsigned long sp)
diff --git a/arch/arm64/kernel/return_address.c b/arch/arm64/kernel/return_address.c
index 1718706f..12a87f2 100644
--- a/arch/arm64/kernel/return_address.c
+++ b/arch/arm64/kernel/return_address.c
@@ -12,6 +12,7 @@
#include <linux/export.h>
#include <linux/ftrace.h>
+#include <asm/stack_pointer.h>
#include <asm/stacktrace.h>
struct return_address_data {
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index 30af178..b8b40d9 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -362,12 +362,16 @@
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
/*
- * Make sure init_thread_info.ttbr0 always generates translation
+ * Make sure thread_info.ttbr0 always generates translation
* faults in case uaccess_enable() is inadvertently called by the init
* thread.
*/
+#ifdef CONFIG_THREAD_INFO_IN_TASK
+ init_task.thread_info.ttbr0 = virt_to_phys(empty_zero_page);
+#else
init_thread_info.ttbr0 = virt_to_phys(empty_zero_page);
#endif
+#endif
#ifdef CONFIG_VT
#if defined(CONFIG_VGA_CONSOLE)
diff --git a/arch/arm64/kernel/sleep.S b/arch/arm64/kernel/sleep.S
index fcc10c5..ff22492 100644
--- a/arch/arm64/kernel/sleep.S
+++ b/arch/arm64/kernel/sleep.S
@@ -158,9 +158,6 @@
/* load sp from context */
ldr x2, [x0, #CPU_CTX_SP]
mov sp, x2
- /* save thread_info */
- and x2, x2, #~(THREAD_SIZE - 1)
- msr sp_el0, x2
/*
* cpu_do_resume expects x0 to contain context address pointer
*/
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index a3a6b2e..6f36c7f 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -58,6 +58,9 @@
#define CREATE_TRACE_POINTS
#include <trace/events/ipi.h>
+DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number);
+EXPORT_PER_CPU_SYMBOL(cpu_number);
+
/*
* as from 2.5, kernels no longer have an init_tasks structure
* so we need some other way of telling a new secondary core
@@ -97,6 +100,9 @@
* We need to tell the secondary core where to find its stack and the
* page tables.
*/
+#ifdef CONFIG_THREAD_INFO_IN_TASK
+ secondary_data.task = idle;
+#endif
secondary_data.stack = task_stack_page(idle) + THREAD_START_SP;
__flush_dcache_area(&secondary_data, sizeof(secondary_data));
@@ -120,6 +126,9 @@
pr_err("CPU%u: failed to boot: %d\n", cpu, ret);
}
+#ifdef CONFIG_THREAD_INFO_IN_TASK
+ secondary_data.task = NULL;
+#endif
secondary_data.stack = NULL;
return ret;
@@ -137,7 +146,10 @@
asmlinkage void secondary_start_kernel(void)
{
struct mm_struct *mm = &init_mm;
- unsigned int cpu = smp_processor_id();
+ unsigned int cpu;
+
+ cpu = task_cpu(current);
+ set_my_cpu_offset(per_cpu_offset(cpu));
/*
* All kernel threads share the same mm context; grab a
@@ -146,8 +158,6 @@
atomic_inc(&mm->mm_count);
current->active_mm = mm;
- set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
-
pr_debug("CPU%u: Booted secondary processor\n", cpu);
/*
@@ -632,6 +642,8 @@
if (max_cpus == 0)
break;
+ per_cpu(cpu_number, cpu) = cpu;
+
if (cpu == smp_processor_id())
continue;
diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
index cb3eec8..10c982e 100644
--- a/arch/arm64/kernel/stacktrace.c
+++ b/arch/arm64/kernel/stacktrace.c
@@ -23,6 +23,7 @@
#include <linux/stacktrace.h>
#include <asm/irq.h>
+#include <asm/stack_pointer.h>
#include <asm/stacktrace.h>
/*
@@ -130,7 +131,6 @@
break;
}
}
-EXPORT_SYMBOL(walk_stackframe);
#ifdef CONFIG_STACKTRACE
struct stack_trace_data {
@@ -162,6 +162,9 @@
struct stack_trace_data data;
struct stackframe frame;
+ if (!try_get_task_stack(tsk))
+ return;
+
data.trace = trace;
data.skip = trace->skip;
@@ -183,6 +186,8 @@
walk_stackframe(tsk, &frame, save_trace, &data);
if (trace->nr_entries < trace->max_entries)
trace->entries[trace->nr_entries++] = ULONG_MAX;
+
+ put_task_stack(tsk);
}
void save_stack_trace(struct stack_trace *trace)
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index 6ce8f860..3c76fb9 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -39,6 +39,7 @@
#include <asm/esr.h>
#include <asm/insn.h>
#include <asm/traps.h>
+#include <asm/stack_pointer.h>
#include <asm/stacktrace.h>
#include <asm/exception.h>
#include <asm/system_misc.h>
@@ -154,6 +155,14 @@
unsigned long irq_stack_ptr;
int skip;
+ pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
+
+ if (!tsk)
+ tsk = current;
+
+ if (!try_get_task_stack(tsk))
+ return;
+
/*
* Switching between stacks is valid when tracing current and in
* non-preemptible context.
@@ -224,6 +233,8 @@
stack + sizeof(struct pt_regs), false);
}
}
+
+ put_task_stack(tsk);
}
void show_stack(struct task_struct *tsk, unsigned long *sp)
@@ -239,10 +250,9 @@
#endif
#define S_SMP " SMP"
-static int __die(const char *str, int err, struct thread_info *thread,
- struct pt_regs *regs)
+static int __die(const char *str, int err, struct pt_regs *regs)
{
- struct task_struct *tsk = thread->task;
+ struct task_struct *tsk = current;
static int die_counter;
int ret;
@@ -256,8 +266,9 @@
print_modules();
__show_regs(regs);
- pr_emerg("Process %.*s (pid: %d, stack limit = 0x%pP)\n",
- TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk), thread + 1);
+ pr_emerg("Process %.*s (pid: %d, stack limit = 0x%p)\n",
+ TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk),
+ end_of_stack(tsk));
if (!user_mode(regs) || in_interrupt()) {
dump_backtrace(regs, tsk);
@@ -322,7 +333,6 @@
*/
void die(const char *str, struct pt_regs *regs, int err)
{
- struct thread_info *thread = current_thread_info();
enum bug_trap_type bug_type = BUG_TRAP_TYPE_NONE;
unsigned long flags = oops_begin();
int ret;
@@ -332,7 +342,7 @@
if (bug_type != BUG_TRAP_TYPE_NONE)
str = "Oops - BUG";
- ret = __die(str, err, thread, regs);
+ ret = __die(str, err, regs);
oops_end(flags, regs, ret);
}
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index eec3598b..3ff507c 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -1055,8 +1055,8 @@
{
struct sys_reg_params params;
u32 hsr = kvm_vcpu_get_hsr(vcpu);
- int Rt = (hsr >> 5) & 0xf;
- int Rt2 = (hsr >> 10) & 0xf;
+ int Rt = (hsr >> 5) & 0x1f;
+ int Rt2 = (hsr >> 10) & 0x1f;
params.is_aarch32 = true;
params.is_32bit = false;
@@ -1107,7 +1107,7 @@
{
struct sys_reg_params params;
u32 hsr = kvm_vcpu_get_hsr(vcpu);
- int Rt = (hsr >> 5) & 0xf;
+ int Rt = (hsr >> 5) & 0x1f;
params.is_aarch32 = true;
params.is_32bit = true;
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 05dc4e8..813fa5b 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -92,21 +92,21 @@
break;
pud = pud_offset(pgd, addr);
- printk(", *pud=%016llx", pud_val(*pud));
+ pr_cont(", *pud=%016llx", pud_val(*pud));
if (pud_none(*pud) || pud_bad(*pud))
break;
pmd = pmd_offset(pud, addr);
- printk(", *pmd=%016llx", pmd_val(*pmd));
+ pr_cont(", *pmd=%016llx", pmd_val(*pmd));
if (pmd_none(*pmd) || pmd_bad(*pmd))
break;
pte = pte_offset_map(pmd, addr);
- printk(", *pte=%016llx", pte_val(*pte));
+ pr_cont(", *pte=%016llx", pte_val(*pte));
pte_unmap(pte);
} while(0);
- printk("\n");
+ pr_cont("\n");
}
#ifdef CONFIG_ARM64_HW_AFDBM
@@ -374,8 +374,11 @@
* signal first. We do not need to release the mmap_sem because it
* would already be released in __lock_page_or_retry in mm/filemap.c.
*/
- if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
+ if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) {
+ if (!user_mode(regs))
+ goto no_context;
return 0;
+ }
/*
* Major/minor page fault accounting is only done on the initial
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index 92ad35f..0123011 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -116,11 +116,14 @@
mrs x8, mdscr_el1
mrs x9, oslsr_el1
mrs x10, sctlr_el1
+ mrs x11, tpidr_el1
+ mrs x12, sp_el0
stp x2, x3, [x0]
stp x4, xzr, [x0, #16]
stp x5, x6, [x0, #32]
stp x7, x8, [x0, #48]
stp x9, x10, [x0, #64]
+ stp x11, x12, [x0, #80]
ret
ENDPROC(cpu_do_suspend)
@@ -135,6 +138,7 @@
ldp x6, x8, [x0, #32]
ldp x9, x10, [x0, #48]
ldp x11, x12, [x0, #64]
+ ldp x13, x14, [x0, #80]
msr tpidr_el0, x2
msr tpidrro_el0, x3
msr contextidr_el1, x4
@@ -148,6 +152,8 @@
msr vbar_el1, x9
msr mdscr_el1, x10
msr sctlr_el1, x12
+ msr tpidr_el1, x13
+ msr sp_el0, x14
/*
* Restore oslsr_el1 by writing oslar_el1
*/
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index b162ad7..6297140 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -728,14 +728,14 @@
int ret;
ret = build_insn(insn, ctx);
-
- if (ctx->image == NULL)
- ctx->offset[i] = ctx->idx;
-
if (ret > 0) {
i++;
+ if (ctx->image == NULL)
+ ctx->offset[i] = ctx->idx;
continue;
}
+ if (ctx->image == NULL)
+ ctx->offset[i] = ctx->idx;
if (ret)
return ret;
}
diff --git a/arch/c6x/kernel/ptrace.c b/arch/c6x/kernel/ptrace.c
index 3c494e8..a511ac1 100644
--- a/arch/c6x/kernel/ptrace.c
+++ b/arch/c6x/kernel/ptrace.c
@@ -69,46 +69,6 @@
0, sizeof(*regs));
}
-static int gpr_set(struct task_struct *target,
- const struct user_regset *regset,
- unsigned int pos, unsigned int count,
- const void *kbuf, const void __user *ubuf)
-{
- int ret;
- struct pt_regs *regs = task_pt_regs(target);
-
- /* Don't copyin TSR or CSR */
- ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
- ®s,
- 0, PT_TSR * sizeof(long));
- if (ret)
- return ret;
-
- ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
- PT_TSR * sizeof(long),
- (PT_TSR + 1) * sizeof(long));
- if (ret)
- return ret;
-
- ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
- ®s,
- (PT_TSR + 1) * sizeof(long),
- PT_CSR * sizeof(long));
- if (ret)
- return ret;
-
- ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
- PT_CSR * sizeof(long),
- (PT_CSR + 1) * sizeof(long));
- if (ret)
- return ret;
-
- ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
- ®s,
- (PT_CSR + 1) * sizeof(long), -1);
- return ret;
-}
-
enum c6x_regset {
REGSET_GPR,
};
@@ -120,7 +80,6 @@
.size = sizeof(u32),
.align = sizeof(u32),
.get = gpr_get,
- .set = gpr_set
},
};
diff --git a/arch/h8300/kernel/ptrace.c b/arch/h8300/kernel/ptrace.c
index 9207554..0dc1c8f 100644
--- a/arch/h8300/kernel/ptrace.c
+++ b/arch/h8300/kernel/ptrace.c
@@ -95,7 +95,8 @@
long *reg = (long *)®s;
/* build user regs in buffer */
- for (r = 0; r < ARRAY_SIZE(register_offset); r++)
+ BUILD_BUG_ON(sizeof(regs) % sizeof(long) != 0);
+ for (r = 0; r < sizeof(regs) / sizeof(long); r++)
*reg++ = h8300_get_reg(target, r);
return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
@@ -113,7 +114,8 @@
long *reg;
/* build user regs in buffer */
- for (reg = (long *)®s, r = 0; r < ARRAY_SIZE(register_offset); r++)
+ BUILD_BUG_ON(sizeof(regs) % sizeof(long) != 0);
+ for (reg = (long *)®s, r = 0; r < sizeof(regs) / sizeof(long); r++)
*reg++ = h8300_get_reg(target, r);
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
@@ -122,7 +124,7 @@
return ret;
/* write back to pt_regs */
- for (reg = (long *)®s, r = 0; r < ARRAY_SIZE(register_offset); r++)
+ for (reg = (long *)®s, r = 0; r < sizeof(regs) / sizeof(long); r++)
h8300_put_reg(target, r, *reg++);
return 0;
}
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 2c86a4e..7091a36 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -45,7 +45,7 @@
select GENERIC_SMP_IDLE_THREAD
select ARCH_INIT_TASK
select ARCH_TASK_STRUCT_ALLOCATOR
- select ARCH_THREAD_INFO_ALLOCATOR
+ select ARCH_THREAD_STACK_ALLOCATOR
select ARCH_CLOCKSOURCE_DATA
select GENERIC_TIME_VSYSCALL_OLD
select SYSCTL_ARCH_UNALIGN_NO_WARN
diff --git a/arch/ia64/include/asm/thread_info.h b/arch/ia64/include/asm/thread_info.h
index aa995b6..d1212b8 100644
--- a/arch/ia64/include/asm/thread_info.h
+++ b/arch/ia64/include/asm/thread_info.h
@@ -48,15 +48,15 @@
#ifndef ASM_OFFSETS_C
/* how to get the thread information struct from C */
#define current_thread_info() ((struct thread_info *) ((char *) current + IA64_TASK_SIZE))
-#define alloc_thread_info_node(tsk, node) \
- ((struct thread_info *) ((char *) (tsk) + IA64_TASK_SIZE))
+#define alloc_thread_stack_node(tsk, node) \
+ ((unsigned long *) ((char *) (tsk) + IA64_TASK_SIZE))
#define task_thread_info(tsk) ((struct thread_info *) ((char *) (tsk) + IA64_TASK_SIZE))
#else
#define current_thread_info() ((struct thread_info *) 0)
-#define alloc_thread_info_node(tsk, node) ((struct thread_info *) 0)
+#define alloc_thread_stack_node(tsk, node) ((unsigned long *) 0)
#define task_thread_info(tsk) ((struct thread_info *) 0)
#endif
-#define free_thread_info(ti) /* nothing */
+#define free_thread_stack(ti) /* nothing */
#define task_stack_page(tsk) ((void *)(tsk))
#define __HAVE_THREAD_FUNCTIONS
diff --git a/arch/ia64/kernel/init_task.c b/arch/ia64/kernel/init_task.c
index f9efe97..0eaa89f 100644
--- a/arch/ia64/kernel/init_task.c
+++ b/arch/ia64/kernel/init_task.c
@@ -26,6 +26,7 @@
* handled. This is done by having a special ".data..init_task" section...
*/
#define init_thread_info init_task_mem.s.thread_info
+#define init_stack init_task_mem.stack
union {
struct {
diff --git a/arch/metag/include/asm/uaccess.h b/arch/metag/include/asm/uaccess.h
index 273e612..3db3812 100644
--- a/arch/metag/include/asm/uaccess.h
+++ b/arch/metag/include/asm/uaccess.h
@@ -28,24 +28,32 @@
#define segment_eq(a, b) ((a).seg == (b).seg)
-#define __kernel_ok (segment_eq(get_fs(), KERNEL_DS))
-/*
- * Explicitly allow NULL pointers here. Parts of the kernel such
- * as readv/writev use access_ok to validate pointers, but want
- * to allow NULL pointers for various reasons. NULL pointers are
- * safe to allow through because the first page is not mappable on
- * Meta.
- *
- * We also wish to avoid letting user code access the system area
- * and the kernel half of the address space.
- */
-#define __user_bad(addr, size) (((addr) > 0 && (addr) < META_MEMORY_BASE) || \
- ((addr) > PAGE_OFFSET && \
- (addr) < LINCORE_BASE))
-
static inline int __access_ok(unsigned long addr, unsigned long size)
{
- return __kernel_ok || !__user_bad(addr, size);
+ /*
+ * Allow access to the user mapped memory area, but not the system area
+ * before it. The check extends to the top of the address space when
+ * kernel access is allowed (there's no real reason to user copy to the
+ * system area in any case).
+ */
+ if (likely(addr >= META_MEMORY_BASE && addr < get_fs().seg &&
+ size <= get_fs().seg - addr))
+ return true;
+ /*
+ * Explicitly allow NULL pointers here. Parts of the kernel such
+ * as readv/writev use access_ok to validate pointers, but want
+ * to allow NULL pointers for various reasons. NULL pointers are
+ * safe to allow through because the first page is not mappable on
+ * Meta.
+ */
+ if (!addr)
+ return true;
+ /* Allow access to core code memory area... */
+ if (addr >= LINCORE_CODE_BASE && addr <= LINCORE_CODE_LIMIT &&
+ size <= LINCORE_CODE_LIMIT + 1 - addr)
+ return true;
+ /* ... but no other areas. */
+ return false;
}
#define access_ok(type, addr, size) __access_ok((unsigned long)(addr), \
@@ -186,8 +194,13 @@
extern long __must_check __strncpy_from_user(char *dst, const char __user *src,
long count);
-#define strncpy_from_user(dst, src, count) __strncpy_from_user(dst, src, count)
-
+static inline long
+strncpy_from_user(char *dst, const char __user *src, long count)
+{
+ if (!access_ok(VERIFY_READ, src, 1))
+ return -EFAULT;
+ return __strncpy_from_user(dst, src, count);
+}
/*
* Return the size of a string (including the ending 0)
*
@@ -197,20 +210,21 @@
#define strlen_user(str) strnlen_user(str, 32767)
-extern unsigned long __must_check __copy_user_zeroing(void *to,
- const void __user *from,
- unsigned long n);
+extern unsigned long raw_copy_from_user(void *to, const void __user *from,
+ unsigned long n);
static inline unsigned long
copy_from_user(void *to, const void __user *from, unsigned long n)
{
+ unsigned long res = n;
if (likely(access_ok(VERIFY_READ, from, n)))
- return __copy_user_zeroing(to, from, n);
- memset(to, 0, n);
- return n;
+ res = raw_copy_from_user(to, from, n);
+ if (unlikely(res))
+ memset(to + (n - res), 0, res);
+ return res;
}
-#define __copy_from_user(to, from, n) __copy_user_zeroing(to, from, n)
+#define __copy_from_user(to, from, n) raw_copy_from_user(to, from, n)
#define __copy_from_user_inatomic __copy_from_user
extern unsigned long __must_check __copy_user(void __user *to,
diff --git a/arch/metag/kernel/ptrace.c b/arch/metag/kernel/ptrace.c
index 7563628..5e2dc7d 100644
--- a/arch/metag/kernel/ptrace.c
+++ b/arch/metag/kernel/ptrace.c
@@ -24,6 +24,16 @@
* user_regset definitions.
*/
+static unsigned long user_txstatus(const struct pt_regs *regs)
+{
+ unsigned long data = (unsigned long)regs->ctx.Flags;
+
+ if (regs->ctx.SaveMask & TBICTX_CBUF_BIT)
+ data |= USER_GP_REGS_STATUS_CATCH_BIT;
+
+ return data;
+}
+
int metag_gp_regs_copyout(const struct pt_regs *regs,
unsigned int pos, unsigned int count,
void *kbuf, void __user *ubuf)
@@ -62,9 +72,7 @@
if (ret)
goto out;
/* TXSTATUS */
- data = (unsigned long)regs->ctx.Flags;
- if (regs->ctx.SaveMask & TBICTX_CBUF_BIT)
- data |= USER_GP_REGS_STATUS_CATCH_BIT;
+ data = user_txstatus(regs);
ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
&data, 4*25, 4*26);
if (ret)
@@ -119,6 +127,7 @@
if (ret)
goto out;
/* TXSTATUS */
+ data = user_txstatus(regs);
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&data, 4*25, 4*26);
if (ret)
@@ -244,6 +253,8 @@
unsigned long long *ptr;
int ret, i;
+ if (count < 4*13)
+ return -EINVAL;
/* Read the entire pipeline before making any changes */
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&rp, 0, 4*13);
@@ -303,7 +314,7 @@
const void *kbuf, const void __user *ubuf)
{
int ret;
- void __user *tls;
+ void __user *tls = target->thread.tls_ptr;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
if (ret)
diff --git a/arch/metag/lib/usercopy.c b/arch/metag/lib/usercopy.c
index b3ebfe9..2792fc6 100644
--- a/arch/metag/lib/usercopy.c
+++ b/arch/metag/lib/usercopy.c
@@ -29,7 +29,6 @@
COPY \
"1:\n" \
" .section .fixup,\"ax\"\n" \
- " MOV D1Ar1,#0\n" \
FIXUP \
" MOVT D1Ar1,#HI(1b)\n" \
" JUMP D1Ar1,#LO(1b)\n" \
@@ -260,27 +259,31 @@
"MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
"22:\n" \
"MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
- "SUB %3, %3, #32\n" \
"23:\n" \
- "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
+ "SUB %3, %3, #32\n" \
"24:\n" \
+ "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
+ "25:\n" \
"MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
+ "26:\n" \
"SUB %3, %3, #32\n" \
"DCACHE [%1+#-64], D0Ar6\n" \
"BR $Lloop"id"\n" \
\
"MOV RAPF, %1\n" \
- "25:\n" \
- "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
- "26:\n" \
- "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
- "SUB %3, %3, #32\n" \
"27:\n" \
"MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
"28:\n" \
"MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
- "SUB %0, %0, #8\n" \
"29:\n" \
+ "SUB %3, %3, #32\n" \
+ "30:\n" \
+ "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
+ "31:\n" \
+ "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
+ "32:\n" \
+ "SUB %0, %0, #8\n" \
+ "33:\n" \
"SETL [%0++], D0.7, D1.7\n" \
"SUB %3, %3, #32\n" \
"1:" \
@@ -312,11 +315,15 @@
" .long 26b,3b\n" \
" .long 27b,3b\n" \
" .long 28b,3b\n" \
- " .long 29b,4b\n" \
+ " .long 29b,3b\n" \
+ " .long 30b,3b\n" \
+ " .long 31b,3b\n" \
+ " .long 32b,3b\n" \
+ " .long 33b,4b\n" \
" .previous\n" \
: "=r" (to), "=r" (from), "=r" (ret), "=d" (n) \
: "0" (to), "1" (from), "2" (ret), "3" (n) \
- : "D1Ar1", "D0Ar2", "memory")
+ : "D1Ar1", "D0Ar2", "cc", "memory")
/* rewind 'to' and 'from' pointers when a fault occurs
*
@@ -342,7 +349,7 @@
#define __asm_copy_to_user_64bit_rapf_loop(to, from, ret, n, id)\
__asm_copy_user_64bit_rapf_loop(to, from, ret, n, id, \
"LSR D0Ar2, D0Ar2, #8\n" \
- "AND D0Ar2, D0Ar2, #0x7\n" \
+ "ANDS D0Ar2, D0Ar2, #0x7\n" \
"ADDZ D0Ar2, D0Ar2, #4\n" \
"SUB D0Ar2, D0Ar2, #1\n" \
"MOV D1Ar1, #4\n" \
@@ -403,47 +410,55 @@
"MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
"22:\n" \
"MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
- "SUB %3, %3, #16\n" \
"23:\n" \
- "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
- "24:\n" \
- "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
"SUB %3, %3, #16\n" \
- "25:\n" \
+ "24:\n" \
"MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
- "26:\n" \
+ "25:\n" \
"MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
+ "26:\n" \
"SUB %3, %3, #16\n" \
"27:\n" \
"MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
"28:\n" \
"MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
+ "29:\n" \
+ "SUB %3, %3, #16\n" \
+ "30:\n" \
+ "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
+ "31:\n" \
+ "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
+ "32:\n" \
"SUB %3, %3, #16\n" \
"DCACHE [%1+#-64], D0Ar6\n" \
"BR $Lloop"id"\n" \
\
"MOV RAPF, %1\n" \
- "29:\n" \
- "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
- "30:\n" \
- "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
- "SUB %3, %3, #16\n" \
- "31:\n" \
- "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
- "32:\n" \
- "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
- "SUB %3, %3, #16\n" \
"33:\n" \
"MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
"34:\n" \
"MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
- "SUB %3, %3, #16\n" \
"35:\n" \
- "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
+ "SUB %3, %3, #16\n" \
"36:\n" \
- "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
- "SUB %0, %0, #4\n" \
+ "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
"37:\n" \
+ "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
+ "38:\n" \
+ "SUB %3, %3, #16\n" \
+ "39:\n" \
+ "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
+ "40:\n" \
+ "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
+ "41:\n" \
+ "SUB %3, %3, #16\n" \
+ "42:\n" \
+ "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \
+ "43:\n" \
+ "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \
+ "44:\n" \
+ "SUB %0, %0, #4\n" \
+ "45:\n" \
"SETD [%0++], D0.7\n" \
"SUB %3, %3, #16\n" \
"1:" \
@@ -483,11 +498,19 @@
" .long 34b,3b\n" \
" .long 35b,3b\n" \
" .long 36b,3b\n" \
- " .long 37b,4b\n" \
+ " .long 37b,3b\n" \
+ " .long 38b,3b\n" \
+ " .long 39b,3b\n" \
+ " .long 40b,3b\n" \
+ " .long 41b,3b\n" \
+ " .long 42b,3b\n" \
+ " .long 43b,3b\n" \
+ " .long 44b,3b\n" \
+ " .long 45b,4b\n" \
" .previous\n" \
: "=r" (to), "=r" (from), "=r" (ret), "=d" (n) \
: "0" (to), "1" (from), "2" (ret), "3" (n) \
- : "D1Ar1", "D0Ar2", "memory")
+ : "D1Ar1", "D0Ar2", "cc", "memory")
/* rewind 'to' and 'from' pointers when a fault occurs
*
@@ -513,7 +536,7 @@
#define __asm_copy_to_user_32bit_rapf_loop(to, from, ret, n, id)\
__asm_copy_user_32bit_rapf_loop(to, from, ret, n, id, \
"LSR D0Ar2, D0Ar2, #8\n" \
- "AND D0Ar2, D0Ar2, #0x7\n" \
+ "ANDS D0Ar2, D0Ar2, #0x7\n" \
"ADDZ D0Ar2, D0Ar2, #4\n" \
"SUB D0Ar2, D0Ar2, #1\n" \
"MOV D1Ar1, #4\n" \
@@ -538,23 +561,31 @@
if ((unsigned long) src & 1) {
__asm_copy_to_user_1(dst, src, retn);
n--;
+ if (retn)
+ return retn + n;
}
if ((unsigned long) dst & 1) {
/* Worst case - byte copy */
while (n > 0) {
__asm_copy_to_user_1(dst, src, retn);
n--;
+ if (retn)
+ return retn + n;
}
}
if (((unsigned long) src & 2) && n >= 2) {
__asm_copy_to_user_2(dst, src, retn);
n -= 2;
+ if (retn)
+ return retn + n;
}
if ((unsigned long) dst & 2) {
/* Second worst case - word copy */
while (n >= 2) {
__asm_copy_to_user_2(dst, src, retn);
n -= 2;
+ if (retn)
+ return retn + n;
}
}
@@ -569,6 +600,8 @@
while (n >= 8) {
__asm_copy_to_user_8x64(dst, src, retn);
n -= 8;
+ if (retn)
+ return retn + n;
}
}
if (n >= RAPF_MIN_BUF_SIZE) {
@@ -581,6 +614,8 @@
while (n >= 8) {
__asm_copy_to_user_8x64(dst, src, retn);
n -= 8;
+ if (retn)
+ return retn + n;
}
}
#endif
@@ -588,11 +623,15 @@
while (n >= 16) {
__asm_copy_to_user_16(dst, src, retn);
n -= 16;
+ if (retn)
+ return retn + n;
}
while (n >= 4) {
__asm_copy_to_user_4(dst, src, retn);
n -= 4;
+ if (retn)
+ return retn + n;
}
switch (n) {
@@ -609,6 +648,10 @@
break;
}
+ /*
+ * If we get here, retn correctly reflects the number of failing
+ * bytes.
+ */
return retn;
}
EXPORT_SYMBOL(__copy_user);
@@ -617,16 +660,14 @@
__asm_copy_user_cont(to, from, ret, \
" GETB D1Ar1,[%1++]\n" \
"2: SETB [%0++],D1Ar1\n", \
- "3: ADD %2,%2,#1\n" \
- " SETB [%0++],D1Ar1\n", \
+ "3: ADD %2,%2,#1\n", \
" .long 2b,3b\n")
#define __asm_copy_from_user_2x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
__asm_copy_user_cont(to, from, ret, \
" GETW D1Ar1,[%1++]\n" \
"2: SETW [%0++],D1Ar1\n" COPY, \
- "3: ADD %2,%2,#2\n" \
- " SETW [%0++],D1Ar1\n" FIXUP, \
+ "3: ADD %2,%2,#2\n" FIXUP, \
" .long 2b,3b\n" TENTRY)
#define __asm_copy_from_user_2(to, from, ret) \
@@ -636,145 +677,26 @@
__asm_copy_from_user_2x_cont(to, from, ret, \
" GETB D1Ar1,[%1++]\n" \
"4: SETB [%0++],D1Ar1\n", \
- "5: ADD %2,%2,#1\n" \
- " SETB [%0++],D1Ar1\n", \
+ "5: ADD %2,%2,#1\n", \
" .long 4b,5b\n")
#define __asm_copy_from_user_4x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
__asm_copy_user_cont(to, from, ret, \
" GETD D1Ar1,[%1++]\n" \
"2: SETD [%0++],D1Ar1\n" COPY, \
- "3: ADD %2,%2,#4\n" \
- " SETD [%0++],D1Ar1\n" FIXUP, \
+ "3: ADD %2,%2,#4\n" FIXUP, \
" .long 2b,3b\n" TENTRY)
#define __asm_copy_from_user_4(to, from, ret) \
__asm_copy_from_user_4x_cont(to, from, ret, "", "", "")
-#define __asm_copy_from_user_5(to, from, ret) \
- __asm_copy_from_user_4x_cont(to, from, ret, \
- " GETB D1Ar1,[%1++]\n" \
- "4: SETB [%0++],D1Ar1\n", \
- "5: ADD %2,%2,#1\n" \
- " SETB [%0++],D1Ar1\n", \
- " .long 4b,5b\n")
-
-#define __asm_copy_from_user_6x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
- __asm_copy_from_user_4x_cont(to, from, ret, \
- " GETW D1Ar1,[%1++]\n" \
- "4: SETW [%0++],D1Ar1\n" COPY, \
- "5: ADD %2,%2,#2\n" \
- " SETW [%0++],D1Ar1\n" FIXUP, \
- " .long 4b,5b\n" TENTRY)
-
-#define __asm_copy_from_user_6(to, from, ret) \
- __asm_copy_from_user_6x_cont(to, from, ret, "", "", "")
-
-#define __asm_copy_from_user_7(to, from, ret) \
- __asm_copy_from_user_6x_cont(to, from, ret, \
- " GETB D1Ar1,[%1++]\n" \
- "6: SETB [%0++],D1Ar1\n", \
- "7: ADD %2,%2,#1\n" \
- " SETB [%0++],D1Ar1\n", \
- " .long 6b,7b\n")
-
-#define __asm_copy_from_user_8x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
- __asm_copy_from_user_4x_cont(to, from, ret, \
- " GETD D1Ar1,[%1++]\n" \
- "4: SETD [%0++],D1Ar1\n" COPY, \
- "5: ADD %2,%2,#4\n" \
- " SETD [%0++],D1Ar1\n" FIXUP, \
- " .long 4b,5b\n" TENTRY)
-
-#define __asm_copy_from_user_8(to, from, ret) \
- __asm_copy_from_user_8x_cont(to, from, ret, "", "", "")
-
-#define __asm_copy_from_user_9(to, from, ret) \
- __asm_copy_from_user_8x_cont(to, from, ret, \
- " GETB D1Ar1,[%1++]\n" \
- "6: SETB [%0++],D1Ar1\n", \
- "7: ADD %2,%2,#1\n" \
- " SETB [%0++],D1Ar1\n", \
- " .long 6b,7b\n")
-
-#define __asm_copy_from_user_10x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
- __asm_copy_from_user_8x_cont(to, from, ret, \
- " GETW D1Ar1,[%1++]\n" \
- "6: SETW [%0++],D1Ar1\n" COPY, \
- "7: ADD %2,%2,#2\n" \
- " SETW [%0++],D1Ar1\n" FIXUP, \
- " .long 6b,7b\n" TENTRY)
-
-#define __asm_copy_from_user_10(to, from, ret) \
- __asm_copy_from_user_10x_cont(to, from, ret, "", "", "")
-
-#define __asm_copy_from_user_11(to, from, ret) \
- __asm_copy_from_user_10x_cont(to, from, ret, \
- " GETB D1Ar1,[%1++]\n" \
- "8: SETB [%0++],D1Ar1\n", \
- "9: ADD %2,%2,#1\n" \
- " SETB [%0++],D1Ar1\n", \
- " .long 8b,9b\n")
-
-#define __asm_copy_from_user_12x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
- __asm_copy_from_user_8x_cont(to, from, ret, \
- " GETD D1Ar1,[%1++]\n" \
- "6: SETD [%0++],D1Ar1\n" COPY, \
- "7: ADD %2,%2,#4\n" \
- " SETD [%0++],D1Ar1\n" FIXUP, \
- " .long 6b,7b\n" TENTRY)
-
-#define __asm_copy_from_user_12(to, from, ret) \
- __asm_copy_from_user_12x_cont(to, from, ret, "", "", "")
-
-#define __asm_copy_from_user_13(to, from, ret) \
- __asm_copy_from_user_12x_cont(to, from, ret, \
- " GETB D1Ar1,[%1++]\n" \
- "8: SETB [%0++],D1Ar1\n", \
- "9: ADD %2,%2,#1\n" \
- " SETB [%0++],D1Ar1\n", \
- " .long 8b,9b\n")
-
-#define __asm_copy_from_user_14x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
- __asm_copy_from_user_12x_cont(to, from, ret, \
- " GETW D1Ar1,[%1++]\n" \
- "8: SETW [%0++],D1Ar1\n" COPY, \
- "9: ADD %2,%2,#2\n" \
- " SETW [%0++],D1Ar1\n" FIXUP, \
- " .long 8b,9b\n" TENTRY)
-
-#define __asm_copy_from_user_14(to, from, ret) \
- __asm_copy_from_user_14x_cont(to, from, ret, "", "", "")
-
-#define __asm_copy_from_user_15(to, from, ret) \
- __asm_copy_from_user_14x_cont(to, from, ret, \
- " GETB D1Ar1,[%1++]\n" \
- "10: SETB [%0++],D1Ar1\n", \
- "11: ADD %2,%2,#1\n" \
- " SETB [%0++],D1Ar1\n", \
- " .long 10b,11b\n")
-
-#define __asm_copy_from_user_16x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
- __asm_copy_from_user_12x_cont(to, from, ret, \
- " GETD D1Ar1,[%1++]\n" \
- "8: SETD [%0++],D1Ar1\n" COPY, \
- "9: ADD %2,%2,#4\n" \
- " SETD [%0++],D1Ar1\n" FIXUP, \
- " .long 8b,9b\n" TENTRY)
-
-#define __asm_copy_from_user_16(to, from, ret) \
- __asm_copy_from_user_16x_cont(to, from, ret, "", "", "")
-
#define __asm_copy_from_user_8x64(to, from, ret) \
asm volatile ( \
" GETL D0Ar2,D1Ar1,[%1++]\n" \
"2: SETL [%0++],D0Ar2,D1Ar1\n" \
"1:\n" \
" .section .fixup,\"ax\"\n" \
- " MOV D1Ar1,#0\n" \
- " MOV D0Ar2,#0\n" \
"3: ADD %2,%2,#8\n" \
- " SETL [%0++],D0Ar2,D1Ar1\n" \
" MOVT D0Ar2,#HI(1b)\n" \
" JUMP D0Ar2,#LO(1b)\n" \
" .previous\n" \
@@ -789,36 +711,57 @@
*
* Rationale:
* A fault occurs while reading from user buffer, which is the
- * source. Since the fault is at a single address, we only
- * need to rewind by 8 bytes.
+ * source.
* Since we don't write to kernel buffer until we read first,
* the kernel buffer is at the right state and needn't be
- * corrected.
+ * corrected, but the source must be rewound to the beginning of
+ * the block, which is LSM_STEP*8 bytes.
+ * LSM_STEP is bits 10:8 in TXSTATUS which is already read
+ * and stored in D0Ar2
+ *
+ * NOTE: If a fault occurs at the last operation in M{G,S}ETL
+ * LSM_STEP will be 0. ie: we do 4 writes in our case, if
+ * a fault happens at the 4th write, LSM_STEP will be 0
+ * instead of 4. The code copes with that.
*/
#define __asm_copy_from_user_64bit_rapf_loop(to, from, ret, n, id) \
__asm_copy_user_64bit_rapf_loop(to, from, ret, n, id, \
- "SUB %1, %1, #8\n")
+ "LSR D0Ar2, D0Ar2, #5\n" \
+ "ANDS D0Ar2, D0Ar2, #0x38\n" \
+ "ADDZ D0Ar2, D0Ar2, #32\n" \
+ "SUB %1, %1, D0Ar2\n")
/* rewind 'from' pointer when a fault occurs
*
* Rationale:
* A fault occurs while reading from user buffer, which is the
- * source. Since the fault is at a single address, we only
- * need to rewind by 4 bytes.
+ * source.
* Since we don't write to kernel buffer until we read first,
* the kernel buffer is at the right state and needn't be
- * corrected.
+ * corrected, but the source must be rewound to the beginning of
+ * the block, which is LSM_STEP*4 bytes.
+ * LSM_STEP is bits 10:8 in TXSTATUS which is already read
+ * and stored in D0Ar2
+ *
+ * NOTE: If a fault occurs at the last operation in M{G,S}ETL
+ * LSM_STEP will be 0. ie: we do 4 writes in our case, if
+ * a fault happens at the 4th write, LSM_STEP will be 0
+ * instead of 4. The code copes with that.
*/
#define __asm_copy_from_user_32bit_rapf_loop(to, from, ret, n, id) \
__asm_copy_user_32bit_rapf_loop(to, from, ret, n, id, \
- "SUB %1, %1, #4\n")
+ "LSR D0Ar2, D0Ar2, #6\n" \
+ "ANDS D0Ar2, D0Ar2, #0x1c\n" \
+ "ADDZ D0Ar2, D0Ar2, #16\n" \
+ "SUB %1, %1, D0Ar2\n")
-/* Copy from user to kernel, zeroing the bytes that were inaccessible in
- userland. The return-value is the number of bytes that were
- inaccessible. */
-unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
- unsigned long n)
+/*
+ * Copy from user to kernel. The return-value is the number of bytes that were
+ * inaccessible.
+ */
+unsigned long raw_copy_from_user(void *pdst, const void __user *psrc,
+ unsigned long n)
{
register char *dst asm ("A0.2") = pdst;
register const char __user *src asm ("A1.2") = psrc;
@@ -830,6 +773,8 @@
if ((unsigned long) src & 1) {
__asm_copy_from_user_1(dst, src, retn);
n--;
+ if (retn)
+ return retn + n;
}
if ((unsigned long) dst & 1) {
/* Worst case - byte copy */
@@ -837,12 +782,14 @@
__asm_copy_from_user_1(dst, src, retn);
n--;
if (retn)
- goto copy_exception_bytes;
+ return retn + n;
}
}
if (((unsigned long) src & 2) && n >= 2) {
__asm_copy_from_user_2(dst, src, retn);
n -= 2;
+ if (retn)
+ return retn + n;
}
if ((unsigned long) dst & 2) {
/* Second worst case - word copy */
@@ -850,16 +797,10 @@
__asm_copy_from_user_2(dst, src, retn);
n -= 2;
if (retn)
- goto copy_exception_bytes;
+ return retn + n;
}
}
- /* We only need one check after the unalignment-adjustments,
- because if both adjustments were done, either both or
- neither reference had an exception. */
- if (retn != 0)
- goto copy_exception_bytes;
-
#ifdef USE_RAPF
/* 64 bit copy loop */
if (!(((unsigned long) src | (unsigned long) dst) & 7)) {
@@ -872,7 +813,7 @@
__asm_copy_from_user_8x64(dst, src, retn);
n -= 8;
if (retn)
- goto copy_exception_bytes;
+ return retn + n;
}
}
@@ -888,7 +829,7 @@
__asm_copy_from_user_8x64(dst, src, retn);
n -= 8;
if (retn)
- goto copy_exception_bytes;
+ return retn + n;
}
}
#endif
@@ -898,7 +839,7 @@
n -= 4;
if (retn)
- goto copy_exception_bytes;
+ return retn + n;
}
/* If we get here, there were no memory read faults. */
@@ -924,21 +865,8 @@
/* If we get here, retn correctly reflects the number of failing
bytes. */
return retn;
-
- copy_exception_bytes:
- /* We already have "retn" bytes cleared, and need to clear the
- remaining "n" bytes. A non-optimized simple byte-for-byte in-line
- memset is preferred here, since this isn't speed-critical code and
- we'd rather have this a leaf-function than calling memset. */
- {
- char *endp;
- for (endp = dst + n; dst < endp; dst++)
- *dst = 0;
- }
-
- return retn + n;
}
-EXPORT_SYMBOL(__copy_user_zeroing);
+EXPORT_SYMBOL(raw_copy_from_user);
#define __asm_clear_8x64(to, ret) \
asm volatile ( \
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index db45961..8b0424a 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -9,6 +9,7 @@
select HAVE_CONTEXT_TRACKING
select HAVE_GENERIC_DMA_COHERENT
select HAVE_IDE
+ select HAVE_IRQ_EXIT_ON_IRQ_STACK
select HAVE_OPROFILE
select HAVE_PERF_EVENTS
select PERF_USE_VMALLOC
@@ -1463,7 +1464,7 @@
select CPU_SUPPORTS_HIGHMEM
select CPU_SUPPORTS_MSA
select GENERIC_CSUM
- select MIPS_O32_FP64_SUPPORT if MIPS32_O32
+ select MIPS_O32_FP64_SUPPORT if 32BIT || MIPS32_O32
help
Choose this option to build a kernel for release 6 or later of the
MIPS64 architecture. New MIPS processors, starting with the Warrior
diff --git a/arch/mips/ath79/common.c b/arch/mips/ath79/common.c
index 3cedd1f..8ae4067 100644
--- a/arch/mips/ath79/common.c
+++ b/arch/mips/ath79/common.c
@@ -76,14 +76,14 @@
{
BUG_ON(!ath79_ddr_pci_win_base);
- __raw_writel(AR71XX_PCI_WIN0_OFFS, ath79_ddr_pci_win_base + 0);
- __raw_writel(AR71XX_PCI_WIN1_OFFS, ath79_ddr_pci_win_base + 1);
- __raw_writel(AR71XX_PCI_WIN2_OFFS, ath79_ddr_pci_win_base + 2);
- __raw_writel(AR71XX_PCI_WIN3_OFFS, ath79_ddr_pci_win_base + 3);
- __raw_writel(AR71XX_PCI_WIN4_OFFS, ath79_ddr_pci_win_base + 4);
- __raw_writel(AR71XX_PCI_WIN5_OFFS, ath79_ddr_pci_win_base + 5);
- __raw_writel(AR71XX_PCI_WIN6_OFFS, ath79_ddr_pci_win_base + 6);
- __raw_writel(AR71XX_PCI_WIN7_OFFS, ath79_ddr_pci_win_base + 7);
+ __raw_writel(AR71XX_PCI_WIN0_OFFS, ath79_ddr_pci_win_base + 0x0);
+ __raw_writel(AR71XX_PCI_WIN1_OFFS, ath79_ddr_pci_win_base + 0x4);
+ __raw_writel(AR71XX_PCI_WIN2_OFFS, ath79_ddr_pci_win_base + 0x8);
+ __raw_writel(AR71XX_PCI_WIN3_OFFS, ath79_ddr_pci_win_base + 0xc);
+ __raw_writel(AR71XX_PCI_WIN4_OFFS, ath79_ddr_pci_win_base + 0x10);
+ __raw_writel(AR71XX_PCI_WIN5_OFFS, ath79_ddr_pci_win_base + 0x14);
+ __raw_writel(AR71XX_PCI_WIN6_OFFS, ath79_ddr_pci_win_base + 0x18);
+ __raw_writel(AR71XX_PCI_WIN7_OFFS, ath79_ddr_pci_win_base + 0x1c);
}
EXPORT_SYMBOL_GPL(ath79_ddr_set_pci_windows);
diff --git a/arch/mips/include/asm/branch.h b/arch/mips/include/asm/branch.h
index de781cf..da80878 100644
--- a/arch/mips/include/asm/branch.h
+++ b/arch/mips/include/asm/branch.h
@@ -74,10 +74,7 @@
return __microMIPS_compute_return_epc(regs);
if (cpu_has_mips16)
return __MIPS16e_compute_return_epc(regs);
- return regs->cp0_epc;
- }
-
- if (!delay_slot(regs)) {
+ } else if (!delay_slot(regs)) {
regs->cp0_epc += 4;
return 0;
}
diff --git a/arch/mips/include/asm/irq.h b/arch/mips/include/asm/irq.h
index 15e0fec..ebb9efb 100644
--- a/arch/mips/include/asm/irq.h
+++ b/arch/mips/include/asm/irq.h
@@ -17,6 +17,18 @@
#include <irq.h>
+#define IRQ_STACK_SIZE THREAD_SIZE
+
+extern void *irq_stack[NR_CPUS];
+
+static inline bool on_irq_stack(int cpu, unsigned long sp)
+{
+ unsigned long low = (unsigned long)irq_stack[cpu];
+ unsigned long high = low + IRQ_STACK_SIZE;
+
+ return (low <= sp && sp <= high);
+}
+
#ifdef CONFIG_I8259
static inline int irq_canonicalize(int irq)
{
diff --git a/arch/mips/include/asm/spinlock.h b/arch/mips/include/asm/spinlock.h
index 40196be..2365ce0 100644
--- a/arch/mips/include/asm/spinlock.h
+++ b/arch/mips/include/asm/spinlock.h
@@ -112,7 +112,7 @@
" andi %[ticket], %[ticket], 0xffff \n"
" bne %[ticket], %[my_ticket], 4f \n"
" subu %[ticket], %[my_ticket], %[ticket] \n"
- "2: \n"
+ "2: .insn \n"
" .subsection 2 \n"
"4: andi %[ticket], %[ticket], 0xffff \n"
" sll %[ticket], 5 \n"
@@ -187,7 +187,7 @@
" sc %[ticket], %[ticket_ptr] \n"
" beqz %[ticket], 1b \n"
" li %[ticket], 1 \n"
- "2: \n"
+ "2: .insn \n"
" .subsection 2 \n"
"3: b 2b \n"
" li %[ticket], 0 \n"
@@ -367,7 +367,7 @@
" .set reorder \n"
__WEAK_LLSC_MB
" li %2, 1 \n"
- "2: \n"
+ "2: .insn \n"
: "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret)
: GCC_OFF_SMALL_ASM() (rw->lock)
: "memory");
@@ -407,7 +407,7 @@
" lui %1, 0x8000 \n"
" sc %1, %0 \n"
" li %2, 1 \n"
- "2: \n"
+ "2: .insn \n"
: "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp),
"=&r" (ret)
: GCC_OFF_SMALL_ASM() (rw->lock)
diff --git a/arch/mips/include/asm/stackframe.h b/arch/mips/include/asm/stackframe.h
index a71da57..5347f13 100644
--- a/arch/mips/include/asm/stackframe.h
+++ b/arch/mips/include/asm/stackframe.h
@@ -216,12 +216,19 @@
LONG_S $25, PT_R25(sp)
LONG_S $28, PT_R28(sp)
LONG_S $31, PT_R31(sp)
+
+ /* Set thread_info if we're coming from user mode */
+ mfc0 k0, CP0_STATUS
+ sll k0, 3 /* extract cu0 bit */
+ bltz k0, 9f
+
ori $28, sp, _THREAD_MASK
xori $28, _THREAD_MASK
#ifdef CONFIG_CPU_CAVIUM_OCTEON
.set mips64
pref 0, 0($28) /* Prefetch the current pointer */
#endif
+9:
.set pop
.endm
diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c
index 154e203..ec053ce 100644
--- a/arch/mips/kernel/asm-offsets.c
+++ b/arch/mips/kernel/asm-offsets.c
@@ -101,6 +101,7 @@
OFFSET(TI_REGS, thread_info, regs);
DEFINE(_THREAD_SIZE, THREAD_SIZE);
DEFINE(_THREAD_MASK, THREAD_MASK);
+ DEFINE(_IRQ_STACK_SIZE, IRQ_STACK_SIZE);
BLANK();
}
diff --git a/arch/mips/kernel/branch.c b/arch/mips/kernel/branch.c
index d8f9b35..71e8f4c 100644
--- a/arch/mips/kernel/branch.c
+++ b/arch/mips/kernel/branch.c
@@ -399,7 +399,7 @@
*
* @regs: Pointer to pt_regs
* @insn: branch instruction to decode
- * @returns: -EFAULT on error and forces SIGBUS, and on success
+ * @returns: -EFAULT on error and forces SIGILL, and on success
* returns 0 or BRANCH_LIKELY_TAKEN as appropriate after
* evaluating the branch.
*
@@ -431,7 +431,7 @@
/* Fall through */
case jr_op:
if (NO_R6EMU && insn.r_format.func == jr_op)
- goto sigill_r6;
+ goto sigill_r2r6;
regs->cp0_epc = regs->regs[insn.r_format.rs];
break;
}
@@ -446,7 +446,7 @@
switch (insn.i_format.rt) {
case bltzl_op:
if (NO_R6EMU)
- goto sigill_r6;
+ goto sigill_r2r6;
case bltz_op:
if ((long)regs->regs[insn.i_format.rs] < 0) {
epc = epc + 4 + (insn.i_format.simmediate << 2);
@@ -459,7 +459,7 @@
case bgezl_op:
if (NO_R6EMU)
- goto sigill_r6;
+ goto sigill_r2r6;
case bgez_op:
if ((long)regs->regs[insn.i_format.rs] >= 0) {
epc = epc + 4 + (insn.i_format.simmediate << 2);
@@ -473,10 +473,8 @@
case bltzal_op:
case bltzall_op:
if (NO_R6EMU && (insn.i_format.rs ||
- insn.i_format.rt == bltzall_op)) {
- ret = -SIGILL;
- break;
- }
+ insn.i_format.rt == bltzall_op))
+ goto sigill_r2r6;
regs->regs[31] = epc + 8;
/*
* OK we are here either because we hit a NAL
@@ -507,10 +505,8 @@
case bgezal_op:
case bgezall_op:
if (NO_R6EMU && (insn.i_format.rs ||
- insn.i_format.rt == bgezall_op)) {
- ret = -SIGILL;
- break;
- }
+ insn.i_format.rt == bgezall_op))
+ goto sigill_r2r6;
regs->regs[31] = epc + 8;
/*
* OK we are here either because we hit a BAL
@@ -556,6 +552,7 @@
/*
* These are unconditional and in j_format.
*/
+ case jalx_op:
case jal_op:
regs->regs[31] = regs->cp0_epc + 8;
case j_op:
@@ -573,7 +570,7 @@
*/
case beql_op:
if (NO_R6EMU)
- goto sigill_r6;
+ goto sigill_r2r6;
case beq_op:
if (regs->regs[insn.i_format.rs] ==
regs->regs[insn.i_format.rt]) {
@@ -587,7 +584,7 @@
case bnel_op:
if (NO_R6EMU)
- goto sigill_r6;
+ goto sigill_r2r6;
case bne_op:
if (regs->regs[insn.i_format.rs] !=
regs->regs[insn.i_format.rt]) {
@@ -601,7 +598,7 @@
case blezl_op: /* not really i_format */
if (!insn.i_format.rt && NO_R6EMU)
- goto sigill_r6;
+ goto sigill_r2r6;
case blez_op:
/*
* Compact branches for R6 for the
@@ -636,7 +633,7 @@
case bgtzl_op:
if (!insn.i_format.rt && NO_R6EMU)
- goto sigill_r6;
+ goto sigill_r2r6;
case bgtz_op:
/*
* Compact branches for R6 for the
@@ -816,8 +813,10 @@
break;
}
/* Compact branch: BNEZC || JIALC */
- if (insn.i_format.rs)
+ if (!insn.i_format.rs) {
+ /* JIALC: set $31/ra */
regs->regs[31] = epc + 4;
+ }
regs->cp0_epc += 8;
break;
#endif
@@ -841,11 +840,12 @@
return ret;
sigill_dsp:
- printk("%s: DSP branch but not DSP ASE - sending SIGBUS.\n", current->comm);
- force_sig(SIGBUS, current);
+ pr_info("%s: DSP branch but not DSP ASE - sending SIGILL.\n",
+ current->comm);
+ force_sig(SIGILL, current);
return -EFAULT;
-sigill_r6:
- pr_info("%s: R2 branch but r2-to-r6 emulator is not preset - sending SIGILL.\n",
+sigill_r2r6:
+ pr_info("%s: R2 branch but r2-to-r6 emulator is not present - sending SIGILL.\n",
current->comm);
force_sig(SIGILL, current);
return -EFAULT;
diff --git a/arch/mips/kernel/crash.c b/arch/mips/kernel/crash.c
index d434d5d..610f0f3 100644
--- a/arch/mips/kernel/crash.c
+++ b/arch/mips/kernel/crash.c
@@ -14,12 +14,22 @@
static cpumask_t cpus_in_crash = CPU_MASK_NONE;
#ifdef CONFIG_SMP
-static void crash_shutdown_secondary(void *ignore)
+static void crash_shutdown_secondary(void *passed_regs)
{
- struct pt_regs *regs;
+ struct pt_regs *regs = passed_regs;
int cpu = smp_processor_id();
- regs = task_pt_regs(current);
+ /*
+ * If we are passed registers, use those. Otherwise get the
+ * regs from the last interrupt, which should be correct, as
+ * we are in an interrupt. But if the regs are not there,
+ * pull them from the top of the stack. They are probably
+ * wrong, but we need something to keep from crashing again.
+ */
+ if (!regs)
+ regs = get_irq_regs();
+ if (!regs)
+ regs = task_pt_regs(current);
if (!cpu_online(cpu))
return;
diff --git a/arch/mips/kernel/elf.c b/arch/mips/kernel/elf.c
index 4a4d9e0..3afffc3 100644
--- a/arch/mips/kernel/elf.c
+++ b/arch/mips/kernel/elf.c
@@ -206,7 +206,7 @@
else if ((prog_req.fr1 && prog_req.frdefault) ||
(prog_req.single && !prog_req.frdefault))
/* Make sure 64-bit MIPS III/IV/64R1 will not pick FR1 */
- state->overall_fp_mode = ((current_cpu_data.fpu_id & MIPS_FPIR_F64) &&
+ state->overall_fp_mode = ((raw_current_cpu_data.fpu_id & MIPS_FPIR_F64) &&
cpu_has_mips_r2_r6) ?
FP_FR1 : FP_FR0;
else if (prog_req.fr1)
diff --git a/arch/mips/kernel/entry.S b/arch/mips/kernel/entry.S
index 7791840..db07793 100644
--- a/arch/mips/kernel/entry.S
+++ b/arch/mips/kernel/entry.S
@@ -11,6 +11,7 @@
#include <asm/asm.h>
#include <asm/asmmacro.h>
#include <asm/compiler.h>
+#include <asm/irqflags.h>
#include <asm/regdef.h>
#include <asm/mipsregs.h>
#include <asm/stackframe.h>
@@ -137,6 +138,7 @@
andi t0, a2, _TIF_NEED_RESCHED # a2 is preloaded with TI_FLAGS
beqz t0, work_notifysig
work_resched:
+ TRACE_IRQS_OFF
jal schedule
local_irq_disable # make sure need_resched and
@@ -173,6 +175,7 @@
beqz t0, work_pending # trace bit set?
local_irq_enable # could let syscall_trace_leave()
# call schedule() instead
+ TRACE_IRQS_ON
move a0, sp
jal syscall_trace_leave
b resume_userspace
diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S
index baa7b6f..619e30e 100644
--- a/arch/mips/kernel/genex.S
+++ b/arch/mips/kernel/genex.S
@@ -188,9 +188,44 @@
LONG_L s0, TI_REGS($28)
LONG_S sp, TI_REGS($28)
- PTR_LA ra, ret_from_irq
- PTR_LA v0, plat_irq_dispatch
- jr v0
+
+ /*
+ * SAVE_ALL ensures we are using a valid kernel stack for the thread.
+ * Check if we are already using the IRQ stack.
+ */
+ move s1, sp # Preserve the sp
+
+ /* Get IRQ stack for this CPU */
+ ASM_CPUID_MFC0 k0, ASM_SMP_CPUID_REG
+#if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)
+ lui k1, %hi(irq_stack)
+#else
+ lui k1, %highest(irq_stack)
+ daddiu k1, %higher(irq_stack)
+ dsll k1, 16
+ daddiu k1, %hi(irq_stack)
+ dsll k1, 16
+#endif
+ LONG_SRL k0, SMP_CPUID_PTRSHIFT
+ LONG_ADDU k1, k0
+ LONG_L t0, %lo(irq_stack)(k1)
+
+ # Check if already on IRQ stack
+ PTR_LI t1, ~(_THREAD_SIZE-1)
+ and t1, t1, sp
+ beq t0, t1, 2f
+
+ /* Switch to IRQ stack */
+ li t1, _IRQ_STACK_SIZE
+ PTR_ADD sp, t0, t1
+
+2:
+ jal plat_irq_dispatch
+
+ /* Restore sp */
+ move sp, s1
+
+ j ret_from_irq
#ifdef CONFIG_CPU_MICROMIPS
nop
#endif
@@ -263,8 +298,44 @@
LONG_L s0, TI_REGS($28)
LONG_S sp, TI_REGS($28)
- PTR_LA ra, ret_from_irq
- jr v0
+
+ /*
+ * SAVE_ALL ensures we are using a valid kernel stack for the thread.
+ * Check if we are already using the IRQ stack.
+ */
+ move s1, sp # Preserve the sp
+
+ /* Get IRQ stack for this CPU */
+ ASM_CPUID_MFC0 k0, ASM_SMP_CPUID_REG
+#if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)
+ lui k1, %hi(irq_stack)
+#else
+ lui k1, %highest(irq_stack)
+ daddiu k1, %higher(irq_stack)
+ dsll k1, 16
+ daddiu k1, %hi(irq_stack)
+ dsll k1, 16
+#endif
+ LONG_SRL k0, SMP_CPUID_PTRSHIFT
+ LONG_ADDU k1, k0
+ LONG_L t0, %lo(irq_stack)(k1)
+
+ # Check if already on IRQ stack
+ PTR_LI t1, ~(_THREAD_SIZE-1)
+ and t1, t1, sp
+ beq t0, t1, 2f
+
+ /* Switch to IRQ stack */
+ li t1, _IRQ_STACK_SIZE
+ PTR_ADD sp, t0, t1
+
+2:
+ jalr v0
+
+ /* Restore sp */
+ move sp, s1
+
+ j ret_from_irq
END(except_vec_vi_handler)
/*
diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c
index 8eb5af8..dc1180a 100644
--- a/arch/mips/kernel/irq.c
+++ b/arch/mips/kernel/irq.c
@@ -25,6 +25,8 @@
#include <linux/atomic.h>
#include <asm/uaccess.h>
+void *irq_stack[NR_CPUS];
+
/*
* 'what should we do if we get a hw irq event on an illegal vector'.
* each architecture has to answer this themselves.
@@ -55,6 +57,15 @@
irq_set_noprobe(i);
arch_init_irq();
+
+ for_each_possible_cpu(i) {
+ int irq_pages = IRQ_STACK_SIZE / PAGE_SIZE;
+ void *s = (void *)__get_free_pages(GFP_KERNEL, irq_pages);
+
+ irq_stack[i] = s;
+ pr_debug("CPU%d IRQ stack at 0x%p - 0x%p\n", i,
+ irq_stack[i], irq_stack[i] + IRQ_STACK_SIZE);
+ }
}
#ifdef CONFIG_DEBUG_STACKOVERFLOW
diff --git a/arch/mips/kernel/kgdb.c b/arch/mips/kernel/kgdb.c
index de63d36..732d617 100644
--- a/arch/mips/kernel/kgdb.c
+++ b/arch/mips/kernel/kgdb.c
@@ -244,9 +244,6 @@
void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
{
int reg;
- struct thread_info *ti = task_thread_info(p);
- unsigned long ksp = (unsigned long)ti + THREAD_SIZE - 32;
- struct pt_regs *regs = (struct pt_regs *)ksp - 1;
#if (KGDB_GDB_REG_SIZE == 32)
u32 *ptr = (u32 *)gdb_regs;
#else
@@ -254,25 +251,46 @@
#endif
for (reg = 0; reg < 16; reg++)
- *(ptr++) = regs->regs[reg];
+ *(ptr++) = 0;
/* S0 - S7 */
- for (reg = 16; reg < 24; reg++)
- *(ptr++) = regs->regs[reg];
+ *(ptr++) = p->thread.reg16;
+ *(ptr++) = p->thread.reg17;
+ *(ptr++) = p->thread.reg18;
+ *(ptr++) = p->thread.reg19;
+ *(ptr++) = p->thread.reg20;
+ *(ptr++) = p->thread.reg21;
+ *(ptr++) = p->thread.reg22;
+ *(ptr++) = p->thread.reg23;
for (reg = 24; reg < 28; reg++)
*(ptr++) = 0;
/* GP, SP, FP, RA */
- for (reg = 28; reg < 32; reg++)
- *(ptr++) = regs->regs[reg];
+ *(ptr++) = (long)p;
+ *(ptr++) = p->thread.reg29;
+ *(ptr++) = p->thread.reg30;
+ *(ptr++) = p->thread.reg31;
- *(ptr++) = regs->cp0_status;
- *(ptr++) = regs->lo;
- *(ptr++) = regs->hi;
- *(ptr++) = regs->cp0_badvaddr;
- *(ptr++) = regs->cp0_cause;
- *(ptr++) = regs->cp0_epc;
+ *(ptr++) = p->thread.cp0_status;
+
+ /* lo, hi */
+ *(ptr++) = 0;
+ *(ptr++) = 0;
+
+ /*
+ * BadVAddr, Cause
+ * Ideally these would come from the last exception frame up the stack
+ * but that requires unwinding, otherwise we can't know much for sure.
+ */
+ *(ptr++) = 0;
+ *(ptr++) = 0;
+
+ /*
+ * PC
+ * use return address (RA), i.e. the moment after return from resume()
+ */
+ *(ptr++) = p->thread.reg31;
}
void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)
diff --git a/arch/mips/kernel/mips-r2-to-r6-emul.c b/arch/mips/kernel/mips-r2-to-r6-emul.c
index af27334..e338406 100644
--- a/arch/mips/kernel/mips-r2-to-r6-emul.c
+++ b/arch/mips/kernel/mips-r2-to-r6-emul.c
@@ -434,8 +434,8 @@
rs = regs->regs[MIPSInst_RS(ir)];
res = (u64)rt * (u64)rs;
rt = res;
- regs->lo = (s64)rt;
- regs->hi = (s64)(res >> 32);
+ regs->lo = (s64)(s32)rt;
+ regs->hi = (s64)(s32)(res >> 32);
MIPS_R2_STATS(muls);
@@ -671,9 +671,9 @@
res += ((((s64)rt) << 32) | (u32)rs);
rt = res;
- regs->lo = (s64)rt;
+ regs->lo = (s64)(s32)rt;
rs = res >> 32;
- regs->hi = (s64)rs;
+ regs->hi = (s64)(s32)rs;
MIPS_R2_STATS(dsps);
@@ -729,9 +729,9 @@
res = ((((s64)rt) << 32) | (u32)rs) - res;
rt = res;
- regs->lo = (s64)rt;
+ regs->lo = (s64)(s32)rt;
rs = res >> 32;
- regs->hi = (s64)rs;
+ regs->hi = (s64)(s32)rs;
MIPS_R2_STATS(dsps);
diff --git a/arch/mips/kernel/pm-cps.c b/arch/mips/kernel/pm-cps.c
index f63a289..0b3e58a 100644
--- a/arch/mips/kernel/pm-cps.c
+++ b/arch/mips/kernel/pm-cps.c
@@ -55,7 +55,6 @@
* state. Actually per-core rather than per-CPU.
*/
static DEFINE_PER_CPU_ALIGNED(u32*, ready_count);
-static DEFINE_PER_CPU_ALIGNED(void*, ready_count_alloc);
/* Indicates online CPUs coupled with the current CPU */
static DEFINE_PER_CPU_ALIGNED(cpumask_t, online_coupled);
@@ -625,7 +624,6 @@
{
enum cps_pm_state state;
unsigned core = cpu_data[cpu].core;
- unsigned dlinesz = cpu_data[cpu].dcache.linesz;
void *entry_fn, *core_rc;
for (state = CPS_PM_NC_WAIT; state < CPS_PM_STATE_COUNT; state++) {
@@ -645,16 +643,11 @@
}
if (!per_cpu(ready_count, core)) {
- core_rc = kmalloc(dlinesz * 2, GFP_KERNEL);
+ core_rc = kmalloc(sizeof(u32), GFP_KERNEL);
if (!core_rc) {
pr_err("Failed allocate core %u ready_count\n", core);
return -ENOMEM;
}
- per_cpu(ready_count_alloc, core) = core_rc;
-
- /* Ensure ready_count is aligned to a cacheline boundary */
- core_rc += dlinesz - 1;
- core_rc = (void *)((unsigned long)core_rc & ~(dlinesz - 1));
per_cpu(ready_count, core) = core_rc;
}
diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c
index 298b2b7..f1fab6f 100644
--- a/arch/mips/kernel/proc.c
+++ b/arch/mips/kernel/proc.c
@@ -83,7 +83,7 @@
}
seq_printf(m, "isa\t\t\t:");
- if (cpu_has_mips_r1)
+ if (cpu_has_mips_1)
seq_printf(m, " mips1");
if (cpu_has_mips_2)
seq_printf(m, "%s", " mips2");
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index fc537d1..8c26eca 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -32,6 +32,7 @@
#include <asm/cpu.h>
#include <asm/dsp.h>
#include <asm/fpu.h>
+#include <asm/irq.h>
#include <asm/msa.h>
#include <asm/pgtable.h>
#include <asm/mipsregs.h>
@@ -552,7 +553,19 @@
unsigned long unwind_stack(struct task_struct *task, unsigned long *sp,
unsigned long pc, unsigned long *ra)
{
- unsigned long stack_page = (unsigned long)task_stack_page(task);
+ unsigned long stack_page = 0;
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ if (on_irq_stack(cpu, *sp)) {
+ stack_page = (unsigned long)irq_stack[cpu];
+ break;
+ }
+ }
+
+ if (!stack_page)
+ stack_page = (unsigned long)task_stack_page(task);
+
return unwind_stack_by_address(stack_page, sp, pc, ra);
}
#endif
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
index 74d5815..24c115a 100644
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
@@ -485,7 +485,8 @@
&target->thread.fpu,
0, sizeof(elf_fpregset_t));
- for (i = 0; i < NUM_FPU_REGS; i++) {
+ BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t));
+ for (i = 0; i < NUM_FPU_REGS && count >= sizeof(elf_fpreg_t); i++) {
err = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&fpr_val, i * sizeof(elf_fpreg_t),
(i + 1) * sizeof(elf_fpreg_t));
@@ -926,7 +927,7 @@
audit_syscall_exit(regs);
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
- trace_sys_exit(regs, regs->regs[2]);
+ trace_sys_exit(regs, regs_return_value(regs));
if (test_thread_flag(TIF_SYSCALL_TRACE))
tracehook_report_syscall_exit(regs, 0);
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
index 2d23c83..29b0c5f 100644
--- a/arch/mips/kernel/scall32-o32.S
+++ b/arch/mips/kernel/scall32-o32.S
@@ -372,7 +372,7 @@
PTR sys_writev
PTR sys_cacheflush
PTR sys_cachectl
- PTR sys_sysmips
+ PTR __sys_sysmips
PTR sys_ni_syscall /* 4150 */
PTR sys_getsid
PTR sys_fdatasync
diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
index deac633..a6323a9 100644
--- a/arch/mips/kernel/scall64-64.S
+++ b/arch/mips/kernel/scall64-64.S
@@ -312,7 +312,7 @@
PTR sys_sched_getaffinity
PTR sys_cacheflush
PTR sys_cachectl
- PTR sys_sysmips
+ PTR __sys_sysmips
PTR sys_io_setup /* 5200 */
PTR sys_io_destroy
PTR sys_io_getevents
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index ee93d5f..e0fdca8 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -298,7 +298,7 @@
PTR compat_sys_sched_getaffinity
PTR sys_cacheflush
PTR sys_cachectl
- PTR sys_sysmips
+ PTR __sys_sysmips
PTR compat_sys_io_setup /* 6200 */
PTR sys_io_destroy
PTR compat_sys_io_getevents
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index b77052e..87c6971 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -367,7 +367,7 @@
PTR compat_sys_writev
PTR sys_cacheflush
PTR sys_cachectl
- PTR sys_sysmips
+ PTR __sys_sysmips
PTR sys_ni_syscall /* 4150 */
PTR sys_getsid
PTR sys_fdatasync
diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c
index 53a7ef9..4234b2d 100644
--- a/arch/mips/kernel/syscall.c
+++ b/arch/mips/kernel/syscall.c
@@ -28,6 +28,7 @@
#include <linux/elf.h>
#include <asm/asm.h>
+#include <asm/asm-eva.h>
#include <asm/branch.h>
#include <asm/cachectl.h>
#include <asm/cacheflush.h>
@@ -138,10 +139,12 @@
__asm__ __volatile__ (
" .set "MIPS_ISA_ARCH_LEVEL" \n"
" li %[err], 0 \n"
- "1: ll %[old], (%[addr]) \n"
+ "1: \n"
+ user_ll("%[old]", "(%[addr])")
" move %[tmp], %[new] \n"
- "2: sc %[tmp], (%[addr]) \n"
- " bnez %[tmp], 4f \n"
+ "2: \n"
+ user_sc("%[tmp]", "(%[addr])")
+ " beqz %[tmp], 4f \n"
"3: \n"
" .insn \n"
" .subsection 2 \n"
@@ -199,6 +202,12 @@
unreachable();
}
+/*
+ * mips_atomic_set() normally returns directly via syscall_exit potentially
+ * clobbering static registers, so be sure to preserve them.
+ */
+save_static_function(sys_sysmips);
+
SYSCALL_DEFINE3(sysmips, long, cmd, long, arg1, long, arg2)
{
switch (cmd) {
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 99a4022..31ca2ed 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -194,6 +194,8 @@
{
struct pt_regs regs;
mm_segment_t old_fs = get_fs();
+
+ regs.cp0_status = KSU_KERNEL;
if (sp) {
regs.regs[29] = (unsigned long)sp;
regs.regs[31] = 0;
diff --git a/arch/mips/lantiq/xway/sysctrl.c b/arch/mips/lantiq/xway/sysctrl.c
index 3e390a4..daf580c 100644
--- a/arch/mips/lantiq/xway/sysctrl.c
+++ b/arch/mips/lantiq/xway/sysctrl.c
@@ -467,7 +467,7 @@
if (!np_xbar)
panic("Failed to load xbar nodes from devicetree");
- if (of_address_to_resource(np_pmu, 0, &res_xbar))
+ if (of_address_to_resource(np_xbar, 0, &res_xbar))
panic("Failed to get xbar resources");
if (request_mem_region(res_xbar.start, resource_size(&res_xbar),
res_xbar.name) < 0)
diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c
index 734a2c7..6da2e4a6 100644
--- a/arch/mips/math-emu/cp1emu.c
+++ b/arch/mips/math-emu/cp1emu.c
@@ -2496,6 +2496,35 @@
return 0;
}
+/*
+ * Emulate FPU instructions.
+ *
+ * If we use FPU hardware, then we have been typically called to handle
+ * an unimplemented operation, such as where an operand is a NaN or
+ * denormalized. In that case exit the emulation loop after a single
+ * iteration so as to let hardware execute any subsequent instructions.
+ *
+ * If we have no FPU hardware or it has been disabled, then continue
+ * emulating floating-point instructions until one of these conditions
+ * has occurred:
+ *
+ * - a non-FPU instruction has been encountered,
+ *
+ * - an attempt to emulate has ended with a signal,
+ *
+ * - the ISA mode has been switched.
+ *
+ * We need to terminate the emulation loop if we got switched to the
+ * MIPS16 mode, whether supported or not, so that we do not attempt
+ * to emulate a MIPS16 instruction as a regular MIPS FPU instruction.
+ * Similarly if we got switched to the microMIPS mode and only the
+ * regular MIPS mode is supported, so that we do not attempt to emulate
+ * a microMIPS instruction as a regular MIPS FPU instruction. Or if
+ * we got switched to the regular MIPS mode and only the microMIPS mode
+ * is supported, so that we do not attempt to emulate a regular MIPS
+ * instruction that should cause an Address Error exception instead.
+ * For simplicity we always terminate upon an ISA mode switch.
+ */
int fpu_emulator_cop1Handler(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
int has_fpu, void *__user *fault_addr)
{
@@ -2581,6 +2610,15 @@
break;
if (sig)
break;
+ /*
+ * We have to check for the ISA bit explicitly here,
+ * because `get_isa16_mode' may return 0 if support
+ * for code compression has been globally disabled,
+ * or otherwise we may produce the wrong signal or
+ * even proceed successfully where we must not.
+ */
+ if ((xcp->cp0_epc ^ prevepc) & 0x1)
+ break;
cond_resched();
} while (xcp->cp0_epc > prevepc);
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 29f73e0..63b7d6f 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -757,7 +757,8 @@
static void build_huge_handler_tail(u32 **p, struct uasm_reloc **r,
struct uasm_label **l,
unsigned int pte,
- unsigned int ptr)
+ unsigned int ptr,
+ unsigned int flush)
{
#ifdef CONFIG_SMP
UASM_i_SC(p, pte, 0, ptr);
@@ -766,6 +767,22 @@
#else
UASM_i_SW(p, pte, 0, ptr);
#endif
+ if (cpu_has_ftlb && flush) {
+ BUG_ON(!cpu_has_tlbinv);
+
+ UASM_i_MFC0(p, ptr, C0_ENTRYHI);
+ uasm_i_ori(p, ptr, ptr, MIPS_ENTRYHI_EHINV);
+ UASM_i_MTC0(p, ptr, C0_ENTRYHI);
+ build_tlb_write_entry(p, l, r, tlb_indexed);
+
+ uasm_i_xori(p, ptr, ptr, MIPS_ENTRYHI_EHINV);
+ UASM_i_MTC0(p, ptr, C0_ENTRYHI);
+ build_huge_update_entries(p, pte, ptr);
+ build_huge_tlb_write_entry(p, l, r, pte, tlb_random, 0);
+
+ return;
+ }
+
build_huge_update_entries(p, pte, ptr);
build_huge_tlb_write_entry(p, l, r, pte, tlb_indexed, 0);
}
@@ -2082,7 +2099,7 @@
uasm_l_tlbl_goaround2(&l, p);
}
uasm_i_ori(&p, wr.r1, wr.r1, (_PAGE_ACCESSED | _PAGE_VALID));
- build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2);
+ build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2, 1);
#endif
uasm_l_nopage_tlbl(&l, p);
@@ -2137,7 +2154,7 @@
build_tlb_probe_entry(&p);
uasm_i_ori(&p, wr.r1, wr.r1,
_PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY);
- build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2);
+ build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2, 1);
#endif
uasm_l_nopage_tlbs(&l, p);
@@ -2193,7 +2210,7 @@
build_tlb_probe_entry(&p);
uasm_i_ori(&p, wr.r1, wr.r1,
_PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY);
- build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2);
+ build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2, 0);
#endif
uasm_l_nopage_tlbm(&l, p);
diff --git a/arch/mips/ralink/mt7620.c b/arch/mips/ralink/mt7620.c
index dfb04fc..48d6349 100644
--- a/arch/mips/ralink/mt7620.c
+++ b/arch/mips/ralink/mt7620.c
@@ -107,31 +107,31 @@
};
static struct rt2880_pmx_func pwm1_grp_mt7628[] = {
- FUNC("sdcx", 3, 19, 1),
+ FUNC("sdxc d6", 3, 19, 1),
FUNC("utif", 2, 19, 1),
FUNC("gpio", 1, 19, 1),
- FUNC("pwm", 0, 19, 1),
+ FUNC("pwm1", 0, 19, 1),
};
static struct rt2880_pmx_func pwm0_grp_mt7628[] = {
- FUNC("sdcx", 3, 18, 1),
+ FUNC("sdxc d7", 3, 18, 1),
FUNC("utif", 2, 18, 1),
FUNC("gpio", 1, 18, 1),
- FUNC("pwm", 0, 18, 1),
+ FUNC("pwm0", 0, 18, 1),
};
static struct rt2880_pmx_func uart2_grp_mt7628[] = {
- FUNC("sdcx", 3, 20, 2),
+ FUNC("sdxc d5 d4", 3, 20, 2),
FUNC("pwm", 2, 20, 2),
FUNC("gpio", 1, 20, 2),
- FUNC("uart", 0, 20, 2),
+ FUNC("uart2", 0, 20, 2),
};
static struct rt2880_pmx_func uart1_grp_mt7628[] = {
- FUNC("sdcx", 3, 45, 2),
+ FUNC("sw_r", 3, 45, 2),
FUNC("pwm", 2, 45, 2),
FUNC("gpio", 1, 45, 2),
- FUNC("uart", 0, 45, 2),
+ FUNC("uart1", 0, 45, 2),
};
static struct rt2880_pmx_func i2c_grp_mt7628[] = {
@@ -143,21 +143,21 @@
static struct rt2880_pmx_func refclk_grp_mt7628[] = { FUNC("reclk", 0, 36, 1) };
static struct rt2880_pmx_func perst_grp_mt7628[] = { FUNC("perst", 0, 37, 1) };
-static struct rt2880_pmx_func wdt_grp_mt7628[] = { FUNC("wdt", 0, 15, 38) };
+static struct rt2880_pmx_func wdt_grp_mt7628[] = { FUNC("wdt", 0, 38, 1) };
static struct rt2880_pmx_func spi_grp_mt7628[] = { FUNC("spi", 0, 7, 4) };
static struct rt2880_pmx_func sd_mode_grp_mt7628[] = {
FUNC("jtag", 3, 22, 8),
FUNC("utif", 2, 22, 8),
FUNC("gpio", 1, 22, 8),
- FUNC("sdcx", 0, 22, 8),
+ FUNC("sdxc", 0, 22, 8),
};
static struct rt2880_pmx_func uart0_grp_mt7628[] = {
FUNC("-", 3, 12, 2),
FUNC("-", 2, 12, 2),
FUNC("gpio", 1, 12, 2),
- FUNC("uart", 0, 12, 2),
+ FUNC("uart0", 0, 12, 2),
};
static struct rt2880_pmx_func i2s_grp_mt7628[] = {
@@ -171,7 +171,7 @@
FUNC("-", 3, 6, 1),
FUNC("refclk", 2, 6, 1),
FUNC("gpio", 1, 6, 1),
- FUNC("spi", 0, 6, 1),
+ FUNC("spi cs1", 0, 6, 1),
};
static struct rt2880_pmx_func spis_grp_mt7628[] = {
@@ -188,28 +188,44 @@
FUNC("gpio", 0, 11, 1),
};
-#define MT7628_GPIO_MODE_MASK 0x3
+static struct rt2880_pmx_func wled_kn_grp_mt7628[] = {
+ FUNC("rsvd", 3, 35, 1),
+ FUNC("rsvd", 2, 35, 1),
+ FUNC("gpio", 1, 35, 1),
+ FUNC("wled_kn", 0, 35, 1),
+};
-#define MT7628_GPIO_MODE_PWM1 30
-#define MT7628_GPIO_MODE_PWM0 28
-#define MT7628_GPIO_MODE_UART2 26
-#define MT7628_GPIO_MODE_UART1 24
-#define MT7628_GPIO_MODE_I2C 20
-#define MT7628_GPIO_MODE_REFCLK 18
-#define MT7628_GPIO_MODE_PERST 16
-#define MT7628_GPIO_MODE_WDT 14
-#define MT7628_GPIO_MODE_SPI 12
-#define MT7628_GPIO_MODE_SDMODE 10
-#define MT7628_GPIO_MODE_UART0 8
-#define MT7628_GPIO_MODE_I2S 6
-#define MT7628_GPIO_MODE_CS1 4
-#define MT7628_GPIO_MODE_SPIS 2
-#define MT7628_GPIO_MODE_GPIO 0
+static struct rt2880_pmx_func wled_an_grp_mt7628[] = {
+ FUNC("rsvd", 3, 44, 1),
+ FUNC("rsvd", 2, 44, 1),
+ FUNC("gpio", 1, 44, 1),
+ FUNC("wled_an", 0, 44, 1),
+};
+
+#define MT7628_GPIO_MODE_MASK 0x3
+
+#define MT7628_GPIO_MODE_WLED_KN 48
+#define MT7628_GPIO_MODE_WLED_AN 32
+#define MT7628_GPIO_MODE_PWM1 30
+#define MT7628_GPIO_MODE_PWM0 28
+#define MT7628_GPIO_MODE_UART2 26
+#define MT7628_GPIO_MODE_UART1 24
+#define MT7628_GPIO_MODE_I2C 20
+#define MT7628_GPIO_MODE_REFCLK 18
+#define MT7628_GPIO_MODE_PERST 16
+#define MT7628_GPIO_MODE_WDT 14
+#define MT7628_GPIO_MODE_SPI 12
+#define MT7628_GPIO_MODE_SDMODE 10
+#define MT7628_GPIO_MODE_UART0 8
+#define MT7628_GPIO_MODE_I2S 6
+#define MT7628_GPIO_MODE_CS1 4
+#define MT7628_GPIO_MODE_SPIS 2
+#define MT7628_GPIO_MODE_GPIO 0
static struct rt2880_pmx_group mt7628an_pinmux_data[] = {
- GRP_G("pmw1", pwm1_grp_mt7628, MT7628_GPIO_MODE_MASK,
+ GRP_G("pwm1", pwm1_grp_mt7628, MT7628_GPIO_MODE_MASK,
1, MT7628_GPIO_MODE_PWM1),
- GRP_G("pmw1", pwm0_grp_mt7628, MT7628_GPIO_MODE_MASK,
+ GRP_G("pwm0", pwm0_grp_mt7628, MT7628_GPIO_MODE_MASK,
1, MT7628_GPIO_MODE_PWM0),
GRP_G("uart2", uart2_grp_mt7628, MT7628_GPIO_MODE_MASK,
1, MT7628_GPIO_MODE_UART2),
@@ -233,6 +249,10 @@
1, MT7628_GPIO_MODE_SPIS),
GRP_G("gpio", gpio_grp_mt7628, MT7628_GPIO_MODE_MASK,
1, MT7628_GPIO_MODE_GPIO),
+ GRP_G("wled_an", wled_an_grp_mt7628, MT7628_GPIO_MODE_MASK,
+ 1, MT7628_GPIO_MODE_WLED_AN),
+ GRP_G("wled_kn", wled_kn_grp_mt7628, MT7628_GPIO_MODE_MASK,
+ 1, MT7628_GPIO_MODE_WLED_KN),
{ 0 }
};
@@ -439,7 +459,7 @@
ralink_clk_add("10000c00.uartlite", periph_rate);
ralink_clk_add("10180000.wmac", xtal_rate);
- if (IS_ENABLED(CONFIG_USB) && is_mt76x8()) {
+ if (IS_ENABLED(CONFIG_USB) && !is_mt76x8()) {
/*
* When the CPU goes into sleep mode, the BUS clock will be
* too low for USB to function properly. Adjust the busses
diff --git a/arch/mips/ralink/rt288x.c b/arch/mips/ralink/rt288x.c
index 15506a1..9dd6774 100644
--- a/arch/mips/ralink/rt288x.c
+++ b/arch/mips/ralink/rt288x.c
@@ -109,5 +109,5 @@
soc_info->mem_size_max = RT2880_MEM_SIZE_MAX;
rt2880_pinmux_data = rt2880_pinmux_data_act;
- ralink_soc == RT2880_SOC;
+ ralink_soc = RT2880_SOC;
}
diff --git a/arch/mips/ralink/rt3883.c b/arch/mips/ralink/rt3883.c
index f42834c..3c57509 100644
--- a/arch/mips/ralink/rt3883.c
+++ b/arch/mips/ralink/rt3883.c
@@ -36,7 +36,7 @@
static struct rt2880_pmx_func jtag_func[] = { FUNC("jtag", 0, 17, 5) };
static struct rt2880_pmx_func mdio_func[] = { FUNC("mdio", 0, 22, 2) };
static struct rt2880_pmx_func lna_a_func[] = { FUNC("lna a", 0, 32, 3) };
-static struct rt2880_pmx_func lna_g_func[] = { FUNC("lna a", 0, 35, 3) };
+static struct rt2880_pmx_func lna_g_func[] = { FUNC("lna g", 0, 35, 3) };
static struct rt2880_pmx_func pci_func[] = {
FUNC("pci-dev", 0, 40, 32),
FUNC("pci-host2", 1, 40, 32),
@@ -44,7 +44,7 @@
FUNC("pci-fnc", 3, 40, 32)
};
static struct rt2880_pmx_func ge1_func[] = { FUNC("ge1", 0, 72, 12) };
-static struct rt2880_pmx_func ge2_func[] = { FUNC("ge1", 0, 84, 12) };
+static struct rt2880_pmx_func ge2_func[] = { FUNC("ge2", 0, 84, 12) };
static struct rt2880_pmx_group rt3883_pinmux_data[] = {
GRP("i2c", i2c_func, 1, RT3883_GPIO_MODE_I2C),
diff --git a/arch/mn10300/include/asm/thread_info.h b/arch/mn10300/include/asm/thread_info.h
index 4861a78..f5f90bb 100644
--- a/arch/mn10300/include/asm/thread_info.h
+++ b/arch/mn10300/include/asm/thread_info.h
@@ -115,7 +115,7 @@
}
#ifndef CONFIG_KGDB
-void arch_release_thread_info(struct thread_info *ti);
+void arch_release_thread_stack(unsigned long *stack);
#endif
#define get_thread_info(ti) get_task_struct((ti)->task)
#define put_thread_info(ti) put_task_struct((ti)->task)
diff --git a/arch/mn10300/kernel/kgdb.c b/arch/mn10300/kernel/kgdb.c
index 9977082..2d7986c 100644
--- a/arch/mn10300/kernel/kgdb.c
+++ b/arch/mn10300/kernel/kgdb.c
@@ -397,8 +397,9 @@
* single-step state is cleared. At this point the breakpoints should have
* been removed by __switch_to().
*/
-void arch_release_thread_info(struct thread_info *ti)
+void arch_release_thread_stack(unsigned long *stack)
{
+ struct thread_info *ti = (void *)stack;
if (kgdb_sstep_thread == ti) {
kgdb_sstep_thread = NULL;
diff --git a/arch/nios2/kernel/prom.c b/arch/nios2/kernel/prom.c
index 718dd19..de73beb 100644
--- a/arch/nios2/kernel/prom.c
+++ b/arch/nios2/kernel/prom.c
@@ -48,6 +48,13 @@
return alloc_bootmem_align(size, align);
}
+int __init early_init_dt_reserve_memory_arch(phys_addr_t base, phys_addr_t size,
+ bool nomap)
+{
+ reserve_bootmem(base, size, BOOTMEM_DEFAULT);
+ return 0;
+}
+
void __init early_init_devtree(void *params)
{
__be32 *dtb = (u32 *)__dtb_start;
diff --git a/arch/nios2/kernel/setup.c b/arch/nios2/kernel/setup.c
index a4ff86d..6c4e351 100644
--- a/arch/nios2/kernel/setup.c
+++ b/arch/nios2/kernel/setup.c
@@ -195,6 +195,9 @@
}
#endif /* CONFIG_BLK_DEV_INITRD */
+ early_init_fdt_reserve_self();
+ early_init_fdt_scan_reserved_mem();
+
unflatten_and_copy_device_tree();
setup_cpuinfo();
diff --git a/arch/openrisc/kernel/vmlinux.lds.S b/arch/openrisc/kernel/vmlinux.lds.S
index d936de4..341fc08 100644
--- a/arch/openrisc/kernel/vmlinux.lds.S
+++ b/arch/openrisc/kernel/vmlinux.lds.S
@@ -38,6 +38,8 @@
/* Read-only sections, merged into text segment: */
. = LOAD_BASE ;
+ _text = .;
+
/* _s_kernel_ro must be page aligned */
. = ALIGN(PAGE_SIZE);
_s_kernel_ro = .;
diff --git a/arch/parisc/include/asm/dma-mapping.h b/arch/parisc/include/asm/dma-mapping.h
index d8d60a5..f537252 100644
--- a/arch/parisc/include/asm/dma-mapping.h
+++ b/arch/parisc/include/asm/dma-mapping.h
@@ -39,6 +39,8 @@
** flush/purge and allocate "regular" cacheable pages for everything.
*/
+#define DMA_ERROR_CODE (~(dma_addr_t)0)
+
#ifdef CONFIG_PA11
extern struct hppa_dma_ops pcxl_dma_ops;
extern struct hppa_dma_ops pcx_dma_ops;
@@ -209,12 +211,13 @@
break;
}
}
- BUG_ON(!dev->platform_data);
return dev->platform_data;
}
-
-#define GET_IOC(dev) (HBA_DATA(parisc_walk_tree(dev))->iommu)
-
+
+#define GET_IOC(dev) ({ \
+ void *__pdata = parisc_walk_tree(dev); \
+ __pdata ? HBA_DATA(__pdata)->iommu : NULL; \
+})
#ifdef CONFIG_IOMMU_CCIO
struct parisc_device;
diff --git a/arch/parisc/include/asm/mmu_context.h b/arch/parisc/include/asm/mmu_context.h
index 59be257..a812262 100644
--- a/arch/parisc/include/asm/mmu_context.h
+++ b/arch/parisc/include/asm/mmu_context.h
@@ -49,15 +49,26 @@
mtctl(__space_to_prot(context), 8);
}
-static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk)
+static inline void switch_mm_irqs_off(struct mm_struct *prev,
+ struct mm_struct *next, struct task_struct *tsk)
{
-
if (prev != next) {
mtctl(__pa(next->pgd), 25);
load_context(next->context);
}
}
+static inline void switch_mm(struct mm_struct *prev,
+ struct mm_struct *next, struct task_struct *tsk)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ switch_mm_irqs_off(prev, next, tsk);
+ local_irq_restore(flags);
+}
+#define switch_mm_irqs_off switch_mm_irqs_off
+
#define deactivate_mm(tsk,mm) do { } while (0)
static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S
index d4ffcfb..041e1f9 100644
--- a/arch/parisc/kernel/syscall_table.S
+++ b/arch/parisc/kernel/syscall_table.S
@@ -361,7 +361,7 @@
ENTRY_SAME(ni_syscall) /* 263: reserved for vserver */
ENTRY_SAME(add_key)
ENTRY_SAME(request_key) /* 265 */
- ENTRY_SAME(keyctl)
+ ENTRY_COMP(keyctl)
ENTRY_SAME(ioprio_set)
ENTRY_SAME(ioprio_get)
ENTRY_SAME(inotify_init)
diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
index 16dbe81..2f33a67 100644
--- a/arch/parisc/mm/fault.c
+++ b/arch/parisc/mm/fault.c
@@ -298,7 +298,7 @@
case 15: /* Data TLB miss fault/Data page fault */
/* send SIGSEGV when outside of vma */
if (!vma ||
- address < vma->vm_start || address > vma->vm_end) {
+ address < vma->vm_start || address >= vma->vm_end) {
si.si_signo = SIGSEGV;
si.si_code = SEGV_MAPERR;
break;
diff --git a/arch/powerpc/boot/zImage.lds.S b/arch/powerpc/boot/zImage.lds.S
index 861e721..f080abf 100644
--- a/arch/powerpc/boot/zImage.lds.S
+++ b/arch/powerpc/boot/zImage.lds.S
@@ -68,6 +68,7 @@
}
#ifdef CONFIG_PPC64_BOOT_WRAPPER
+ . = ALIGN(256);
.got :
{
__toc_start = .;
diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
index 55f106e..039c4b9 100644
--- a/arch/powerpc/include/asm/atomic.h
+++ b/arch/powerpc/include/asm/atomic.h
@@ -460,7 +460,7 @@
* Atomically increments @v by 1, so long as @v is non-zero.
* Returns non-zero if @v was non-zero, and zero otherwise.
*/
-static __inline__ long atomic64_inc_not_zero(atomic64_t *v)
+static __inline__ int atomic64_inc_not_zero(atomic64_t *v)
{
long t1, t2;
@@ -479,7 +479,7 @@
: "r" (&v->counter)
: "cc", "xer", "memory");
- return t1;
+ return t1 != 0;
}
#endif /* __powerpc64__ */
diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
index ee46ffe..743ad7a 100644
--- a/arch/powerpc/include/asm/elf.h
+++ b/arch/powerpc/include/asm/elf.h
@@ -23,12 +23,13 @@
#define CORE_DUMP_USE_REGSET
#define ELF_EXEC_PAGESIZE PAGE_SIZE
-/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
- use of this is to invoke "./ld.so someprog" to test out a new version of
- the loader. We need to make sure that it is out of the way of the program
- that it will "exec", and that there is sufficient room for the brk. */
-
-#define ELF_ET_DYN_BASE 0x20000000
+/*
+ * This is the base location for PIE (ET_DYN with INTERP) loads. On
+ * 64-bit, this is raised to 4GB to leave the entire 32-bit address
+ * space open for things that want to use the area for 32-bit pointers.
+ */
+#define ELF_ET_DYN_BASE (is_32bit_task() ? 0x000400000UL : \
+ 0x100000000UL)
#define ELF_CORE_EFLAGS (is_elf2_task() ? 2 : 0)
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index 627d129..ca372bb 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -1236,7 +1236,7 @@
" .llong 0\n" \
".previous" \
: "=r" (rval) \
- : "i" (CPU_FTR_CELL_TB_BUG), "i" (SPRN_TBRL)); \
+ : "i" (CPU_FTR_CELL_TB_BUG), "i" (SPRN_TBRL) : "cr0"); \
rval;})
#else
#define mftb() ({unsigned long rval; \
diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c
index 86150fb..91e5c17 100644
--- a/arch/powerpc/kernel/align.c
+++ b/arch/powerpc/kernel/align.c
@@ -808,14 +808,25 @@
nb = aligninfo[instr].len;
flags = aligninfo[instr].flags;
- /* ldbrx/stdbrx overlap lfs/stfs in the DSISR unfortunately */
- if (IS_XFORM(instruction) && ((instruction >> 1) & 0x3ff) == 532) {
- nb = 8;
- flags = LD+SW;
- } else if (IS_XFORM(instruction) &&
- ((instruction >> 1) & 0x3ff) == 660) {
- nb = 8;
- flags = ST+SW;
+ /*
+ * Handle some cases which give overlaps in the DSISR values.
+ */
+ if (IS_XFORM(instruction)) {
+ switch (get_xop(instruction)) {
+ case 532: /* ldbrx */
+ nb = 8;
+ flags = LD+SW;
+ break;
+ case 660: /* stdbrx */
+ nb = 8;
+ flags = ST+SW;
+ break;
+ case 20: /* lwarx */
+ case 84: /* ldarx */
+ case 116: /* lharx */
+ case 276: /* lqarx */
+ return 0; /* not emulated ever */
+ }
}
/* Byteswap little endian loads and stores */
diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c
index 98949b0..6696c19 100644
--- a/arch/powerpc/kernel/eeh.c
+++ b/arch/powerpc/kernel/eeh.c
@@ -304,9 +304,17 @@
*
* For pHyp, we have to enable IO for log retrieval. Otherwise,
* 0xFF's is always returned from PCI config space.
+ *
+ * When the @severity is EEH_LOG_PERM, the PE is going to be
+ * removed. Prior to that, the drivers for devices included in
+ * the PE will be closed. The drivers rely on working IO path
+ * to bring the devices to quiet state. Otherwise, PCI traffic
+ * from those devices after they are removed is like to cause
+ * another unexpected EEH error.
*/
if (!(pe->type & EEH_PE_PHB)) {
- if (eeh_has_flag(EEH_ENABLE_IO_FOR_LOG))
+ if (eeh_has_flag(EEH_ENABLE_IO_FOR_LOG) ||
+ severity == EEH_LOG_PERM)
eeh_pci_enable(pe, EEH_OPT_THAW_MMIO);
/*
diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
index c314db8..9837c98 100644
--- a/arch/powerpc/kernel/eeh_driver.c
+++ b/arch/powerpc/kernel/eeh_driver.c
@@ -655,7 +655,7 @@
*/
#define MAX_WAIT_FOR_RECOVERY 300
-static void eeh_handle_normal_event(struct eeh_pe *pe)
+static bool eeh_handle_normal_event(struct eeh_pe *pe)
{
struct pci_bus *frozen_bus;
int rc = 0;
@@ -665,7 +665,7 @@
if (!frozen_bus) {
pr_err("%s: Cannot find PCI bus for PHB#%d-PE#%x\n",
__func__, pe->phb->global_number, pe->addr);
- return;
+ return false;
}
eeh_pe_update_time_stamp(pe);
@@ -790,7 +790,7 @@
pr_info("EEH: Notify device driver to resume\n");
eeh_pe_dev_traverse(pe, eeh_report_resume, NULL);
- return;
+ return false;
excess_failures:
/*
@@ -831,7 +831,11 @@
pci_lock_rescan_remove();
pcibios_remove_pci_devices(frozen_bus);
pci_unlock_rescan_remove();
+
+ /* The passed PE should no longer be used */
+ return true;
}
+ return false;
}
static void eeh_handle_special_event(void)
@@ -897,7 +901,14 @@
*/
if (rc == EEH_NEXT_ERR_FROZEN_PE ||
rc == EEH_NEXT_ERR_FENCED_PHB) {
- eeh_handle_normal_event(pe);
+ /*
+ * eeh_handle_normal_event() can make the PE stale if it
+ * determines that the PE cannot possibly be recovered.
+ * Don't modify the PE state if that's the case.
+ */
+ if (eeh_handle_normal_event(pe))
+ continue;
+
eeh_pe_state_clear(pe, EEH_PE_RECOVERING);
} else {
pci_lock_rescan_remove();
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index edba294..f6fd033 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -716,7 +716,7 @@
addi r8,r1,INT_FRAME_SIZE /* Get the kprobed function entry */
- lwz r3,GPR1(r1)
+ ld r3,GPR1(r1)
subi r3,r3,INT_FRAME_SIZE /* dst: Allocate a trampoline exception frame */
mr r4,r1 /* src: current exception frame */
mr r1,r3 /* Reroute the trampoline frame to r1 */
@@ -730,8 +730,8 @@
addi r6,r6,8
bdnz 2b
- /* Do real store operation to complete stwu */
- lwz r5,GPR1(r1)
+ /* Do real store operation to complete stdu */
+ ld r5,GPR1(r1)
std r8,0(r5)
/* Clear _TIF_EMULATE_STACK_STORE flag */
diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
index 488e631..5cc93f0 100644
--- a/arch/powerpc/kernel/exceptions-64e.S
+++ b/arch/powerpc/kernel/exceptions-64e.S
@@ -735,8 +735,14 @@
andis. r15,r14,(DBSR_IC|DBSR_BT)@h
beq+ 1f
+#ifdef CONFIG_RELOCATABLE
+ ld r15,PACATOC(r13)
+ ld r14,interrupt_base_book3e@got(r15)
+ ld r15,__end_interrupts@got(r15)
+#else
LOAD_REG_IMMEDIATE(r14,interrupt_base_book3e)
LOAD_REG_IMMEDIATE(r15,__end_interrupts)
+#endif
cmpld cr0,r10,r14
cmpld cr1,r10,r15
blt+ cr0,1f
@@ -799,8 +805,14 @@
andis. r15,r14,(DBSR_IC|DBSR_BT)@h
beq+ 1f
+#ifdef CONFIG_RELOCATABLE
+ ld r15,PACATOC(r13)
+ ld r14,interrupt_base_book3e@got(r15)
+ ld r15,__end_interrupts@got(r15)
+#else
LOAD_REG_IMMEDIATE(r14,interrupt_base_book3e)
LOAD_REG_IMMEDIATE(r15,__end_interrupts)
+#endif
cmpld cr0,r10,r14
cmpld cr1,r10,r15
blt+ cr0,1f
diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c
index 7c053f2..1138fec 100644
--- a/arch/powerpc/kernel/kprobes.c
+++ b/arch/powerpc/kernel/kprobes.c
@@ -514,6 +514,15 @@
#endif
#endif
+ /*
+ * jprobes use jprobe_return() which skips the normal return
+ * path of the function, and this messes up the accounting of the
+ * function graph tracer.
+ *
+ * Pause function graph tracing while performing the jprobe function.
+ */
+ pause_graph_tracing();
+
return 1;
}
@@ -536,6 +545,8 @@
* saved regs...
*/
memcpy(regs, &kcb->jprobe_saved_regs, sizeof(struct pt_regs));
+ /* It's OK to start function graph tracing again */
+ unpause_graph_tracing();
preempt_enable_no_resched();
return 1;
}
diff --git a/arch/powerpc/kernel/mce.c b/arch/powerpc/kernel/mce.c
index b2eb468..da3c4c3 100644
--- a/arch/powerpc/kernel/mce.c
+++ b/arch/powerpc/kernel/mce.c
@@ -204,6 +204,8 @@
{
int index;
+ add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
+
/*
* For now just print it to console.
* TODO: log this error event to FSP or nvram.
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 5c03a6a..a208232 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -220,6 +220,15 @@
unsigned long lpcr = mfspr(SPRN_LPCR);
mtspr(SPRN_LPCR, lpcr | LPCR_AIL_3);
}
+
+ /*
+ * Fixup HFSCR:TM based on CPU features. The bit is set by our
+ * early asm init because at that point we haven't updated our
+ * CPU features from firmware and device-tree. Here we have,
+ * so let's do it.
+ */
+ if (cpu_has_feature(CPU_FTR_HVMODE) && !cpu_has_feature(CPU_FTR_TM_COMP))
+ mtspr(SPRN_HFSCR, mfspr(SPRN_HFSCR) & ~HFSCR_TM);
}
/*
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 37de90f..e4dcb0a 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -297,8 +297,6 @@
__this_cpu_inc(irq_stat.mce_exceptions);
- add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
-
if (cur_cpu_spec && cur_cpu_spec->machine_check_early)
handled = cur_cpu_spec->machine_check_early(regs);
return handled;
@@ -704,6 +702,8 @@
__this_cpu_inc(irq_stat.mce_exceptions);
+ add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE);
+
/* See if any machine dependent calls. In theory, we would want
* to call the CPU first, and call the ppc_md. one if the CPU
* one returns a positive number. However there is existing code
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 3c3a367..428563b 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -2687,12 +2687,38 @@
{
int r;
int srcu_idx;
+ unsigned long ebb_regs[3] = {}; /* shut up GCC */
+ unsigned long user_tar = 0;
+ unsigned long proc_fscr = 0;
+ unsigned int user_vrsave;
if (!vcpu->arch.sane) {
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
return -EINVAL;
}
+ /*
+ * Don't allow entry with a suspended transaction, because
+ * the guest entry/exit code will lose it.
+ * If the guest has TM enabled, save away their TM-related SPRs
+ * (they will get restored by the TM unavailable interrupt).
+ */
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ if (cpu_has_feature(CPU_FTR_TM) && current->thread.regs &&
+ (current->thread.regs->msr & MSR_TM)) {
+ if (MSR_TM_ACTIVE(current->thread.regs->msr)) {
+ run->exit_reason = KVM_EXIT_FAIL_ENTRY;
+ run->fail_entry.hardware_entry_failure_reason = 0;
+ return -EINVAL;
+ }
+ /* Enable TM so we can read the TM SPRs */
+ mtmsr(mfmsr() | MSR_TM);
+ current->thread.tm_tfhar = mfspr(SPRN_TFHAR);
+ current->thread.tm_tfiar = mfspr(SPRN_TFIAR);
+ current->thread.tm_texasr = mfspr(SPRN_TEXASR);
+ }
+#endif
+
kvmppc_core_prepare_to_enter(vcpu);
/* No need to go into the guest when all we'll do is come back out */
@@ -2715,6 +2741,17 @@
flush_fp_to_thread(current);
flush_altivec_to_thread(current);
flush_vsx_to_thread(current);
+
+ /* Save userspace EBB and other register values */
+ if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
+ ebb_regs[0] = mfspr(SPRN_EBBHR);
+ ebb_regs[1] = mfspr(SPRN_EBBRR);
+ ebb_regs[2] = mfspr(SPRN_BESCR);
+ user_tar = mfspr(SPRN_TAR);
+ proc_fscr = mfspr(SPRN_FSCR);
+ }
+ user_vrsave = mfspr(SPRN_VRSAVE);
+
vcpu->arch.wqp = &vcpu->arch.vcore->wq;
vcpu->arch.pgdir = current->mm->pgd;
vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
@@ -2736,6 +2773,29 @@
}
} while (is_kvmppc_resume_guest(r));
+ /* Restore userspace EBB and other register values */
+ if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
+ mtspr(SPRN_EBBHR, ebb_regs[0]);
+ mtspr(SPRN_EBBRR, ebb_regs[1]);
+ mtspr(SPRN_BESCR, ebb_regs[2]);
+ mtspr(SPRN_TAR, user_tar);
+ mtspr(SPRN_FSCR, proc_fscr);
+ }
+ mtspr(SPRN_VRSAVE, user_vrsave);
+
+ /*
+ * Since we don't do lazy TM reload, we need to reload
+ * the TM registers here.
+ */
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ if (cpu_has_feature(CPU_FTR_TM) && current->thread.regs &&
+ (current->thread.regs->msr & MSR_TM)) {
+ mtspr(SPRN_TFHAR, current->thread.tm_tfhar);
+ mtspr(SPRN_TFIAR, current->thread.tm_tfiar);
+ mtspr(SPRN_TEXASR, current->thread.tm_texasr);
+ }
+#endif
+
out:
vcpu->arch.state = KVMPPC_VCPU_NOTREADY;
atomic_dec(&vcpu->kvm->arch.vcpus_running);
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index 1a743f8..ffab926 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -36,6 +36,13 @@
#define NAPPING_CEDE 1
#define NAPPING_NOVCPU 2
+/* Stack frame offsets for kvmppc_hv_entry */
+#define SFS 112
+#define STACK_SLOT_TRAP (SFS-4)
+#define STACK_SLOT_CIABR (SFS-16)
+#define STACK_SLOT_DAWR (SFS-24)
+#define STACK_SLOT_DAWRX (SFS-32)
+
/*
* Call kvmppc_hv_entry in real mode.
* Must be called with interrupts hard-disabled.
@@ -274,10 +281,10 @@
bl kvmhv_accumulate_time
#endif
13: mr r3, r12
- stw r12, 112-4(r1)
+ stw r12, STACK_SLOT_TRAP(r1)
bl kvmhv_commence_exit
nop
- lwz r12, 112-4(r1)
+ lwz r12, STACK_SLOT_TRAP(r1)
b kvmhv_switch_to_host
/*
@@ -489,7 +496,7 @@
*/
mflr r0
std r0, PPC_LR_STKOFF(r1)
- stdu r1, -112(r1)
+ stdu r1, -SFS(r1)
/* Save R1 in the PACA */
std r1, HSTATE_HOST_R1(r13)
@@ -643,6 +650,16 @@
mtspr SPRN_PURR,r7
mtspr SPRN_SPURR,r8
+ /* Save host values of some registers */
+BEGIN_FTR_SECTION
+ mfspr r5, SPRN_CIABR
+ mfspr r6, SPRN_DAWR
+ mfspr r7, SPRN_DAWRX
+ std r5, STACK_SLOT_CIABR(r1)
+ std r6, STACK_SLOT_DAWR(r1)
+ std r7, STACK_SLOT_DAWRX(r1)
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
+
BEGIN_FTR_SECTION
/* Set partition DABR */
/* Do this before re-enabling PMU to avoid P7 DABR corruption bug */
@@ -1266,8 +1283,7 @@
*/
li r0, 0
mtspr SPRN_IAMR, r0
- mtspr SPRN_CIABR, r0
- mtspr SPRN_DAWRX, r0
+ mtspr SPRN_PSPB, r0
mtspr SPRN_TCSCR, r0
mtspr SPRN_WORT, r0
/* Set MMCRS to 1<<31 to freeze and disable the SPMC counters */
@@ -1283,6 +1299,7 @@
std r6,VCPU_UAMOR(r9)
li r6,0
mtspr SPRN_AMR,r6
+ mtspr SPRN_UAMOR, r6
/* Switch DSCR back to host value */
mfspr r8, SPRN_DSCR
@@ -1424,6 +1441,16 @@
slbia
ptesync
+ /* Restore host values of some registers */
+BEGIN_FTR_SECTION
+ ld r5, STACK_SLOT_CIABR(r1)
+ ld r6, STACK_SLOT_DAWR(r1)
+ ld r7, STACK_SLOT_DAWRX(r1)
+ mtspr SPRN_CIABR, r5
+ mtspr SPRN_DAWR, r6
+ mtspr SPRN_DAWRX, r7
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
+
/*
* POWER7/POWER8 guest -> host partition switch code.
* We don't have to lock against tlbies but we do
@@ -1533,8 +1560,8 @@
li r0, KVM_GUEST_MODE_NONE
stb r0, HSTATE_IN_GUEST(r13)
- ld r0, 112+PPC_LR_STKOFF(r1)
- addi r1, r1, 112
+ ld r0, SFS+PPC_LR_STKOFF(r1)
+ addi r1, r1, SFS
mtlr r0
blr
diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c
index 5cc2e7a..b379146 100644
--- a/arch/powerpc/kvm/emulate.c
+++ b/arch/powerpc/kvm/emulate.c
@@ -302,7 +302,6 @@
advance = 0;
printk(KERN_ERR "Couldn't emulate instruction 0x%08x "
"(op %d xop %d)\n", inst, get_op(inst), get_xop(inst));
- kvmppc_core_queue_program(vcpu, 0);
}
}
diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c
index 4014881..e37162d 100644
--- a/arch/powerpc/lib/sstep.c
+++ b/arch/powerpc/lib/sstep.c
@@ -687,8 +687,10 @@
case 19:
switch ((instr >> 1) & 0x3ff) {
case 0: /* mcrf */
- rd = (instr >> 21) & 0x1c;
- ra = (instr >> 16) & 0x1c;
+ rd = 7 - ((instr >> 23) & 0x7);
+ ra = 7 - ((instr >> 18) & 0x7);
+ rd *= 4;
+ ra *= 4;
val = (regs->ccr >> ra) & 0xf;
regs->ccr = (regs->ccr & ~(0xfUL << rd)) | (val << rd);
goto instr_done;
@@ -967,6 +969,19 @@
#endif
case 19: /* mfcr */
+ if ((instr >> 20) & 1) {
+ imm = 0xf0000000UL;
+ for (sh = 0; sh < 8; ++sh) {
+ if (instr & (0x80000 >> sh)) {
+ regs->gpr[rd] = regs->ccr & imm;
+ break;
+ }
+ imm >>= 4;
+ }
+
+ goto instr_done;
+ }
+
regs->gpr[rd] = regs->ccr;
regs->gpr[rd] &= 0xffffffffUL;
goto instr_done;
diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c
index c8822af..19d9b2d 100644
--- a/arch/powerpc/mm/hash_native_64.c
+++ b/arch/powerpc/mm/hash_native_64.c
@@ -645,6 +645,10 @@
unsigned long psize = batch->psize;
int ssize = batch->ssize;
int i;
+ unsigned int use_local;
+
+ use_local = local && mmu_has_feature(MMU_FTR_TLBIEL) &&
+ mmu_psize_defs[psize].tlbiel && !cxl_ctx_in_use();
local_irq_save(flags);
@@ -671,8 +675,7 @@
} pte_iterate_hashed_end();
}
- if (mmu_has_feature(MMU_FTR_TLBIEL) &&
- mmu_psize_defs[psize].tlbiel && local) {
+ if (use_local) {
asm volatile("ptesync":::"memory");
for (i = 0; i < number; i++) {
vpn = batch->vpn[i];
diff --git a/arch/powerpc/mm/slb_low.S b/arch/powerpc/mm/slb_low.S
index 4c48b48..0b48ce4 100644
--- a/arch/powerpc/mm/slb_low.S
+++ b/arch/powerpc/mm/slb_low.S
@@ -179,6 +179,16 @@
b slb_finish_load
8: /* invalid EA */
+ /*
+ * It's possible the bad EA is too large to fit in the SLB cache, which
+ * would mean we'd fail to invalidate it on context switch. So mark the
+ * SLB cache as full so we force a full flush. We also set cr7+eq to
+ * mark the address as a kernel address, so slb_finish_load() skips
+ * trying to insert it into the SLB cache.
+ */
+ li r9,SLB_CACHE_ENTRIES + 1
+ sth r9,PACASLBCACHEPTR(r13)
+ crset 4*cr7+eq
li r10,0 /* BAD_VSID */
li r9,0 /* BAD_VSID */
li r11,SLB_VSID_USER /* flags don't much matter */
diff --git a/arch/powerpc/platforms/powernv/opal-wrappers.S b/arch/powerpc/platforms/powernv/opal-wrappers.S
index e45b88a..ae877c7 100644
--- a/arch/powerpc/platforms/powernv/opal-wrappers.S
+++ b/arch/powerpc/platforms/powernv/opal-wrappers.S
@@ -148,7 +148,7 @@
opal_tracepoint_return:
std r3,STK_REG(R31)(r1)
mr r4,r3
- ld r0,STK_REG(R23)(r1)
+ ld r3,STK_REG(R23)(r1)
bl __trace_opal_exit
ld r3,STK_REG(R31)(r1)
addi r1,r1,STACKFRAMESIZE
diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c
index f244dcb..96536c9 100644
--- a/arch/powerpc/platforms/pseries/dlpar.c
+++ b/arch/powerpc/platforms/pseries/dlpar.c
@@ -280,7 +280,6 @@
if (rc)
return rc;
- of_node_put(dn); /* Must decrement the refcount */
return 0;
}
diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c
index e9ff44c..e8b1027 100644
--- a/arch/powerpc/platforms/pseries/hotplug-memory.c
+++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
@@ -110,6 +110,7 @@
for (i = 0; i < num_lmbs; i++) {
lmbs[i].base_addr = be64_to_cpu(lmbs[i].base_addr);
lmbs[i].drc_index = be32_to_cpu(lmbs[i].drc_index);
+ lmbs[i].aa_index = be32_to_cpu(lmbs[i].aa_index);
lmbs[i].flags = be32_to_cpu(lmbs[i].flags);
}
@@ -553,6 +554,7 @@
for (i = 0; i < num_lmbs; i++) {
lmbs[i].base_addr = cpu_to_be64(lmbs[i].base_addr);
lmbs[i].drc_index = cpu_to_be32(lmbs[i].drc_index);
+ lmbs[i].aa_index = cpu_to_be32(lmbs[i].aa_index);
lmbs[i].flags = cpu_to_be32(lmbs[i].flags);
}
diff --git a/arch/powerpc/platforms/pseries/reconfig.c b/arch/powerpc/platforms/pseries/reconfig.c
index 7c7fcc0..fb695f1 100644
--- a/arch/powerpc/platforms/pseries/reconfig.c
+++ b/arch/powerpc/platforms/pseries/reconfig.c
@@ -82,7 +82,6 @@
of_detach_node(np);
of_node_put(parent);
- of_node_put(np); /* Must decrement the refcount */
return 0;
}
diff --git a/arch/s390/boot/compressed/misc.c b/arch/s390/boot/compressed/misc.c
index 4da604e..ca15613 100644
--- a/arch/s390/boot/compressed/misc.c
+++ b/arch/s390/boot/compressed/misc.c
@@ -141,31 +141,34 @@
unsigned long decompress_kernel(void)
{
- unsigned long output_addr;
- unsigned char *output;
+ void *output, *kernel_end;
- output_addr = ((unsigned long) &_end + HEAP_SIZE + 4095UL) & -4096UL;
- check_ipl_parmblock((void *) 0, output_addr + SZ__bss_start);
- memset(&_bss, 0, &_ebss - &_bss);
- free_mem_ptr = (unsigned long)&_end;
- free_mem_end_ptr = free_mem_ptr + HEAP_SIZE;
- output = (unsigned char *) output_addr;
+ output = (void *) ALIGN((unsigned long) &_end + HEAP_SIZE, PAGE_SIZE);
+ kernel_end = output + SZ__bss_start;
+ check_ipl_parmblock((void *) 0, (unsigned long) kernel_end);
#ifdef CONFIG_BLK_DEV_INITRD
/*
* Move the initrd right behind the end of the decompressed
- * kernel image.
+ * kernel image. This also prevents initrd corruption caused by
+ * bss clearing since kernel_end will always be located behind the
+ * current bss section..
*/
- if (INITRD_START && INITRD_SIZE &&
- INITRD_START < (unsigned long) output + SZ__bss_start) {
- check_ipl_parmblock(output + SZ__bss_start,
- INITRD_START + INITRD_SIZE);
- memmove(output + SZ__bss_start,
- (void *) INITRD_START, INITRD_SIZE);
- INITRD_START = (unsigned long) output + SZ__bss_start;
+ if (INITRD_START && INITRD_SIZE && kernel_end > (void *) INITRD_START) {
+ check_ipl_parmblock(kernel_end, INITRD_SIZE);
+ memmove(kernel_end, (void *) INITRD_START, INITRD_SIZE);
+ INITRD_START = (unsigned long) kernel_end;
}
#endif
+ /*
+ * Clear bss section. free_mem_ptr and free_mem_end_ptr need to be
+ * initialized afterwards since they reside in bss.
+ */
+ memset(&_bss, 0, &_ebss - &_bss);
+ free_mem_ptr = (unsigned long) &_end;
+ free_mem_end_ptr = free_mem_ptr + HEAP_SIZE;
+
puts("Uncompressing Linux... ");
__decompress(input_data, input_len, NULL, NULL, output, 0, NULL, error);
puts("Ok, booting the kernel.\n");
diff --git a/arch/s390/include/asm/ctl_reg.h b/arch/s390/include/asm/ctl_reg.h
index d7697ab..8e136b88 100644
--- a/arch/s390/include/asm/ctl_reg.h
+++ b/arch/s390/include/asm/ctl_reg.h
@@ -15,7 +15,9 @@
BUILD_BUG_ON(sizeof(addrtype) != (high - low + 1) * sizeof(long));\
asm volatile( \
" lctlg %1,%2,%0\n" \
- : : "Q" (*(addrtype *)(&array)), "i" (low), "i" (high));\
+ : \
+ : "Q" (*(addrtype *)(&array)), "i" (low), "i" (high) \
+ : "memory"); \
}
#define __ctl_store(array, low, high) { \
diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
index bab6739..b9eb7b1 100644
--- a/arch/s390/include/asm/elf.h
+++ b/arch/s390/include/asm/elf.h
@@ -154,14 +154,13 @@
#define CORE_DUMP_USE_REGSET
#define ELF_EXEC_PAGESIZE 4096
-/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
- use of this is to invoke "./ld.so someprog" to test out a new version of
- the loader. We need to make sure that it is out of the way of the program
- that it will "exec", and that there is sufficient room for the brk. 64-bit
- tasks are aligned to 4GB. */
-#define ELF_ET_DYN_BASE (is_32bit_task() ? \
- (STACK_TOP / 3 * 2) : \
- (STACK_TOP / 3 * 2) & ~((1UL << 32) - 1))
+/*
+ * This is the base location for PIE (ET_DYN with INTERP) loads. On
+ * 64-bit, this is raised to 4GB to leave the entire 32-bit address
+ * space open for things that want to use the area for 32-bit pointers.
+ */
+#define ELF_ET_DYN_BASE (is_compat_task() ? 0x000400000UL : \
+ 0x100000000UL)
/* This yields a mask that user programs can use to figure out what
instruction set this CPU supports. */
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 024f85f..e2c0e4e 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -829,6 +829,8 @@
{
pgste_t pgste;
+ if (pte_present(entry))
+ pte_val(entry) &= ~_PAGE_UNUSED;
if (mm_has_pgste(mm)) {
pgste = pgste_get_lock(ptep);
pgste_val(pgste) &= ~_PGSTE_GPS_ZERO;
diff --git a/arch/s390/include/asm/syscall.h b/arch/s390/include/asm/syscall.h
index 6ba0bf9..6bc941b 100644
--- a/arch/s390/include/asm/syscall.h
+++ b/arch/s390/include/asm/syscall.h
@@ -64,6 +64,12 @@
{
unsigned long mask = -1UL;
+ /*
+ * No arguments for this syscall, there's nothing to do.
+ */
+ if (!n)
+ return;
+
BUG_ON(i + n > 6);
#ifdef CONFIG_COMPAT
if (test_tsk_thread_flag(task, TIF_31BIT))
diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
index 5c7381c..c8d837f 100644
--- a/arch/s390/include/asm/uaccess.h
+++ b/arch/s390/include/asm/uaccess.h
@@ -150,7 +150,7 @@
" jg 2b\n" \
".popsection\n" \
EX_TABLE(0b,3b) EX_TABLE(1b,3b) \
- : "=d" (__rc), "=Q" (*(to)) \
+ : "=d" (__rc), "+Q" (*(to)) \
: "d" (size), "Q" (*(from)), \
"d" (__reg0), "K" (-EFAULT) \
: "cc"); \
diff --git a/arch/s390/kernel/crash_dump.c b/arch/s390/kernel/crash_dump.c
index f7c3a61..df46859 100644
--- a/arch/s390/kernel/crash_dump.c
+++ b/arch/s390/kernel/crash_dump.c
@@ -464,6 +464,20 @@
}
/*
+ * Initialize final note (needed for /proc/vmcore code)
+ */
+static void *nt_final(void *ptr)
+{
+ Elf64_Nhdr *note;
+
+ note = (Elf64_Nhdr *) ptr;
+ note->n_namesz = 0;
+ note->n_descsz = 0;
+ note->n_type = 0;
+ return PTR_ADD(ptr, sizeof(Elf64_Nhdr));
+}
+
+/*
* Initialize ELF header (new kernel)
*/
static void *ehdr_init(Elf64_Ehdr *ehdr, int mem_chunk_cnt)
@@ -553,6 +567,7 @@
ptr = fill_cpu_elf_notes(ptr, &sa_ext->sa, sa_ext->vx_regs);
}
ptr = nt_vmcoreinfo(ptr);
+ ptr = nt_final(ptr);
memset(phdr, 0, sizeof(*phdr));
phdr->p_type = PT_NOTE;
phdr->p_offset = notes_offset;
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 424e680..4612ed7 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -229,12 +229,17 @@
lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
.Lsie_done:
# some program checks are suppressing. C code (e.g. do_protection_exception)
-# will rewind the PSW by the ILC, which is 4 bytes in case of SIE. Other
-# instructions between sie64a and .Lsie_done should not cause program
-# interrupts. So lets use a nop (47 00 00 00) as a landing pad.
+# will rewind the PSW by the ILC, which is often 4 bytes in case of SIE. There
+# are some corner cases (e.g. runtime instrumentation) where ILC is unpredictable.
+# Other instructions between sie64a and .Lsie_done should not cause program
+# interrupts. So lets use 3 nops as a landing pad for all possible rewinds.
# See also .Lcleanup_sie
-.Lrewind_pad:
- nop 0
+.Lrewind_pad6:
+ nopr 7
+.Lrewind_pad4:
+ nopr 7
+.Lrewind_pad2:
+ nopr 7
.globl sie_exit
sie_exit:
lg %r14,__SF_EMPTY+8(%r15) # load guest register save area
@@ -247,7 +252,9 @@
stg %r14,__SF_EMPTY+16(%r15) # set exit reason code
j sie_exit
- EX_TABLE(.Lrewind_pad,.Lsie_fault)
+ EX_TABLE(.Lrewind_pad6,.Lsie_fault)
+ EX_TABLE(.Lrewind_pad4,.Lsie_fault)
+ EX_TABLE(.Lrewind_pad2,.Lsie_fault)
EX_TABLE(sie_exit,.Lsie_fault)
#endif
@@ -308,6 +315,7 @@
lg %r14,__LC_VDSO_PER_CPU
lmg %r0,%r10,__PT_R0(%r11)
mvc __LC_RETURN_PSW(16),__PT_PSW(%r11)
+.Lsysc_exit_timer:
stpt __LC_EXIT_TIMER
mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
lmg %r11,%r15,__PT_R11(%r11)
@@ -593,6 +601,7 @@
lg %r14,__LC_VDSO_PER_CPU
lmg %r0,%r10,__PT_R0(%r11)
mvc __LC_RETURN_PSW(16),__PT_PSW(%r11)
+.Lio_exit_timer:
stpt __LC_EXIT_TIMER
mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
lmg %r11,%r15,__PT_R11(%r11)
@@ -1118,15 +1127,23 @@
br %r14
.Lcleanup_sysc_restore:
+ # check if stpt has been executed
clg %r9,BASED(.Lcleanup_sysc_restore_insn)
+ jh 0f
+ mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER
+ cghi %r11,__LC_SAVE_AREA_ASYNC
je 0f
+ mvc __LC_EXIT_TIMER(8),__LC_MCCK_ENTER_TIMER
+0: clg %r9,BASED(.Lcleanup_sysc_restore_insn+8)
+ je 1f
lg %r9,24(%r11) # get saved pointer to pt_regs
mvc __LC_RETURN_PSW(16),__PT_PSW(%r9)
mvc 0(64,%r11),__PT_R8(%r9)
lmg %r0,%r7,__PT_R0(%r9)
-0: lmg %r8,%r9,__LC_RETURN_PSW
+1: lmg %r8,%r9,__LC_RETURN_PSW
br %r14
.Lcleanup_sysc_restore_insn:
+ .quad .Lsysc_exit_timer
.quad .Lsysc_done - 4
.Lcleanup_io_tif:
@@ -1134,15 +1151,20 @@
br %r14
.Lcleanup_io_restore:
+ # check if stpt has been executed
clg %r9,BASED(.Lcleanup_io_restore_insn)
- je 0f
+ jh 0f
+ mvc __LC_EXIT_TIMER(8),__LC_MCCK_ENTER_TIMER
+0: clg %r9,BASED(.Lcleanup_io_restore_insn+8)
+ je 1f
lg %r9,24(%r11) # get saved r11 pointer to pt_regs
mvc __LC_RETURN_PSW(16),__PT_PSW(%r9)
mvc 0(64,%r11),__PT_R8(%r9)
lmg %r0,%r7,__PT_R0(%r9)
-0: lmg %r8,%r9,__LC_RETURN_PSW
+1: lmg %r8,%r9,__LC_RETURN_PSW
br %r14
.Lcleanup_io_restore_insn:
+ .quad .Lio_exit_timer
.quad .Lio_done - 4
.Lcleanup_idle:
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index ef7d6c8..f354fd8 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -372,7 +372,7 @@
ro_end = (unsigned long)&_eshared & PAGE_MASK;
for_each_memblock(memory, reg) {
start = reg->base;
- end = reg->base + reg->size - 1;
+ end = reg->base + reg->size;
if (start >= ro_end || end <= ro_start)
vmem_add_mem(start, end - start, 0);
else if (start >= ro_start && end <= ro_end)
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index 0e2919d..1395eeb 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -1250,7 +1250,8 @@
insn_count = bpf_jit_insn(jit, fp, i);
if (insn_count < 0)
return -1;
- jit->addrs[i + 1] = jit->prg; /* Next instruction address */
+ /* Next instruction address */
+ jit->addrs[i + insn_count] = jit->prg;
}
bpf_jit_epilogue(jit);
diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c
index 3a40f718..4004e03 100644
--- a/arch/s390/pci/pci_dma.c
+++ b/arch/s390/pci/pci_dma.c
@@ -455,7 +455,7 @@
zdev->dma_table = dma_alloc_cpu_table();
if (!zdev->dma_table) {
rc = -ENOMEM;
- goto out_clean;
+ goto out;
}
/*
@@ -475,18 +475,22 @@
zdev->iommu_bitmap = vzalloc(zdev->iommu_pages / 8);
if (!zdev->iommu_bitmap) {
rc = -ENOMEM;
- goto out_reg;
+ goto free_dma_table;
}
rc = zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma,
(u64) zdev->dma_table);
if (rc)
- goto out_reg;
- return 0;
+ goto free_bitmap;
-out_reg:
+ return 0;
+free_bitmap:
+ vfree(zdev->iommu_bitmap);
+ zdev->iommu_bitmap = NULL;
+free_dma_table:
dma_free_cpu_table(zdev->dma_table);
-out_clean:
+ zdev->dma_table = NULL;
+out:
return rc;
}
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 3736be6..894bcae 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -183,9 +183,9 @@
int "Maximum number of CPUs"
depends on SMP
range 2 32 if SPARC32
- range 2 1024 if SPARC64
+ range 2 4096 if SPARC64
default 32 if SPARC32
- default 64 if SPARC64
+ default 4096 if SPARC64
source kernel/Kconfig.hz
diff --git a/arch/sparc/include/asm/mmu_64.h b/arch/sparc/include/asm/mmu_64.h
index f7de0db..83b36a5 100644
--- a/arch/sparc/include/asm/mmu_64.h
+++ b/arch/sparc/include/asm/mmu_64.h
@@ -52,7 +52,7 @@
#define CTX_NR_MASK TAG_CONTEXT_BITS
#define CTX_HW_MASK (CTX_NR_MASK | CTX_PGSZ_MASK)
-#define CTX_FIRST_VERSION ((_AC(1,UL) << CTX_VERSION_SHIFT) + _AC(1,UL))
+#define CTX_FIRST_VERSION BIT(CTX_VERSION_SHIFT)
#define CTX_VALID(__ctx) \
(!(((__ctx.sparc64_ctx_val) ^ tlb_context_cache) & CTX_VERSION_MASK))
#define CTX_HWBITS(__ctx) ((__ctx.sparc64_ctx_val) & CTX_HW_MASK)
diff --git a/arch/sparc/include/asm/mmu_context_64.h b/arch/sparc/include/asm/mmu_context_64.h
index b84be67..0cdeb2b 100644
--- a/arch/sparc/include/asm/mmu_context_64.h
+++ b/arch/sparc/include/asm/mmu_context_64.h
@@ -17,22 +17,19 @@
extern unsigned long tlb_context_cache;
extern unsigned long mmu_context_bmap[];
+DECLARE_PER_CPU(struct mm_struct *, per_cpu_secondary_mm);
void get_new_mmu_context(struct mm_struct *mm);
-#ifdef CONFIG_SMP
-void smp_new_mmu_context_version(void);
-#else
-#define smp_new_mmu_context_version() do { } while (0)
-#endif
-
int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
void destroy_context(struct mm_struct *mm);
void __tsb_context_switch(unsigned long pgd_pa,
struct tsb_config *tsb_base,
struct tsb_config *tsb_huge,
- unsigned long tsb_descr_pa);
+ unsigned long tsb_descr_pa,
+ unsigned long secondary_ctx);
-static inline void tsb_context_switch(struct mm_struct *mm)
+static inline void tsb_context_switch_ctx(struct mm_struct *mm,
+ unsigned long ctx)
{
__tsb_context_switch(__pa(mm->pgd),
&mm->context.tsb_block[0],
@@ -43,9 +40,12 @@
#else
NULL
#endif
- , __pa(&mm->context.tsb_descr[0]));
+ , __pa(&mm->context.tsb_descr[0]),
+ ctx);
}
+#define tsb_context_switch(X) tsb_context_switch_ctx(X, 0)
+
void tsb_grow(struct mm_struct *mm,
unsigned long tsb_index,
unsigned long mm_rss);
@@ -74,8 +74,9 @@
static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk)
{
unsigned long ctx_valid, flags;
- int cpu;
+ int cpu = smp_processor_id();
+ per_cpu(per_cpu_secondary_mm, cpu) = mm;
if (unlikely(mm == &init_mm))
return;
@@ -114,14 +115,12 @@
* cpu0 to update it's TSB because at that point the cpu_vm_mask
* only had cpu1 set in it.
*/
- load_secondary_context(mm);
- tsb_context_switch(mm);
+ tsb_context_switch_ctx(mm, CTX_HWBITS(mm->context));
/* Any time a processor runs a context on an address space
* for the first time, we must flush that context out of the
* local TLB.
*/
- cpu = smp_processor_id();
if (!ctx_valid || !cpumask_test_cpu(cpu, mm_cpumask(mm))) {
cpumask_set_cpu(cpu, mm_cpumask(mm));
__flush_tlb_mm(CTX_HWBITS(mm->context),
@@ -131,26 +130,7 @@
}
#define deactivate_mm(tsk,mm) do { } while (0)
-
-/* Activate a new MM instance for the current task. */
-static inline void activate_mm(struct mm_struct *active_mm, struct mm_struct *mm)
-{
- unsigned long flags;
- int cpu;
-
- spin_lock_irqsave(&mm->context.lock, flags);
- if (!CTX_VALID(mm->context))
- get_new_mmu_context(mm);
- cpu = smp_processor_id();
- if (!cpumask_test_cpu(cpu, mm_cpumask(mm)))
- cpumask_set_cpu(cpu, mm_cpumask(mm));
-
- load_secondary_context(mm);
- __flush_tlb_mm(CTX_HWBITS(mm->context), SECONDARY_CONTEXT);
- tsb_context_switch(mm);
- spin_unlock_irqrestore(&mm->context.lock, flags);
-}
-
+#define activate_mm(active_mm, mm) switch_mm(active_mm, mm, NULL)
#endif /* !(__ASSEMBLY__) */
#endif /* !(__SPARC64_MMU_CONTEXT_H) */
diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
index 91b963a..29c3b40 100644
--- a/arch/sparc/include/asm/pgtable_32.h
+++ b/arch/sparc/include/asm/pgtable_32.h
@@ -91,9 +91,9 @@
* ZERO_PAGE is a global shared page that is always zero: used
* for zero-mapped memory areas etc..
*/
-extern unsigned long empty_zero_page;
+extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
-#define ZERO_PAGE(vaddr) (virt_to_page(&empty_zero_page))
+#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
/*
* In general all page table modifications should use the V8 atomic
diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
index 408b715..9d81579 100644
--- a/arch/sparc/include/asm/pgtable_64.h
+++ b/arch/sparc/include/asm/pgtable_64.h
@@ -668,6 +668,14 @@
return pte_pfn(pte);
}
+#define __HAVE_ARCH_PMD_WRITE
+static inline unsigned long pmd_write(pmd_t pmd)
+{
+ pte_t pte = __pte(pmd_val(pmd));
+
+ return pte_write(pte);
+}
+
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
static inline unsigned long pmd_dirty(pmd_t pmd)
{
@@ -683,13 +691,6 @@
return pte_young(pte);
}
-static inline unsigned long pmd_write(pmd_t pmd)
-{
- pte_t pte = __pte(pmd_val(pmd));
-
- return pte_write(pte);
-}
-
static inline unsigned long pmd_trans_huge(pmd_t pmd)
{
pte_t pte = __pte(pmd_val(pmd));
diff --git a/arch/sparc/include/asm/pil.h b/arch/sparc/include/asm/pil.h
index 26693703..522b43d 100644
--- a/arch/sparc/include/asm/pil.h
+++ b/arch/sparc/include/asm/pil.h
@@ -20,7 +20,6 @@
#define PIL_SMP_CALL_FUNC 1
#define PIL_SMP_RECEIVE_SIGNAL 2
#define PIL_SMP_CAPTURE 3
-#define PIL_SMP_CTX_NEW_VERSION 4
#define PIL_DEVICE_IRQ 5
#define PIL_SMP_CALL_FUNC_SNGL 6
#define PIL_DEFERRED_PCR_WORK 7
diff --git a/arch/sparc/include/asm/setup.h b/arch/sparc/include/asm/setup.h
index 29d64b1..be0cc1b 100644
--- a/arch/sparc/include/asm/setup.h
+++ b/arch/sparc/include/asm/setup.h
@@ -16,7 +16,7 @@
*/
extern unsigned char boot_cpu_id;
-extern unsigned long empty_zero_page;
+extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
extern int serial_console;
static inline int con_is_present(void)
diff --git a/arch/sparc/include/asm/trap_block.h b/arch/sparc/include/asm/trap_block.h
index ec9c04d..ff05992 100644
--- a/arch/sparc/include/asm/trap_block.h
+++ b/arch/sparc/include/asm/trap_block.h
@@ -54,6 +54,7 @@
void init_cur_cpu_trap(struct thread_info *);
void setup_tba(void);
extern int ncpus_probed;
+extern u64 cpu_mondo_counter[NR_CPUS];
unsigned long real_hard_smp_processor_id(void);
diff --git a/arch/sparc/include/asm/vio.h b/arch/sparc/include/asm/vio.h
index 8174f6c..9dca7a8 100644
--- a/arch/sparc/include/asm/vio.h
+++ b/arch/sparc/include/asm/vio.h
@@ -327,6 +327,7 @@
int compat_len;
u64 dev_no;
+ u64 id;
unsigned long channel_id;
diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c
index e22416c..bfbde8c 100644
--- a/arch/sparc/kernel/irq_64.c
+++ b/arch/sparc/kernel/irq_64.c
@@ -1034,17 +1034,26 @@
{
#ifdef CONFIG_SMP
unsigned long page;
+ void *mondo, *p;
- BUILD_BUG_ON((NR_CPUS * sizeof(u16)) > (PAGE_SIZE - 64));
+ BUILD_BUG_ON((NR_CPUS * sizeof(u16)) > PAGE_SIZE);
+
+ /* Make sure mondo block is 64byte aligned */
+ p = kzalloc(127, GFP_KERNEL);
+ if (!p) {
+ prom_printf("SUN4V: Error, cannot allocate mondo block.\n");
+ prom_halt();
+ }
+ mondo = (void *)(((unsigned long)p + 63) & ~0x3f);
+ tb->cpu_mondo_block_pa = __pa(mondo);
page = get_zeroed_page(GFP_KERNEL);
if (!page) {
- prom_printf("SUN4V: Error, cannot allocate cpu mondo page.\n");
+ prom_printf("SUN4V: Error, cannot allocate cpu list page.\n");
prom_halt();
}
- tb->cpu_mondo_block_pa = __pa(page);
- tb->cpu_list_pa = __pa(page + 64);
+ tb->cpu_list_pa = __pa(page);
#endif
}
diff --git a/arch/sparc/kernel/kernel.h b/arch/sparc/kernel/kernel.h
index e7f652b..44f32dd4 100644
--- a/arch/sparc/kernel/kernel.h
+++ b/arch/sparc/kernel/kernel.h
@@ -37,7 +37,6 @@
/* smp_64.c */
void __irq_entry smp_call_function_client(int irq, struct pt_regs *regs);
void __irq_entry smp_call_function_single_client(int irq, struct pt_regs *regs);
-void __irq_entry smp_new_mmu_context_version_client(int irq, struct pt_regs *regs);
void __irq_entry smp_penguin_jailcell(int irq, struct pt_regs *regs);
void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs);
diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c
index 9ddc492..c156617 100644
--- a/arch/sparc/kernel/ptrace_64.c
+++ b/arch/sparc/kernel/ptrace_64.c
@@ -311,7 +311,7 @@
}
if (!ret) {
- unsigned long y;
+ unsigned long y = regs->y;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
&y,
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
index 19cd08d..4511caa 100644
--- a/arch/sparc/kernel/smp_64.c
+++ b/arch/sparc/kernel/smp_64.c
@@ -617,22 +617,48 @@
}
}
-/* Multi-cpu list version. */
+#define CPU_MONDO_COUNTER(cpuid) (cpu_mondo_counter[cpuid])
+#define MONDO_USEC_WAIT_MIN 2
+#define MONDO_USEC_WAIT_MAX 100
+#define MONDO_RETRY_LIMIT 500000
+
+/* Multi-cpu list version.
+ *
+ * Deliver xcalls to 'cnt' number of cpus in 'cpu_list'.
+ * Sometimes not all cpus receive the mondo, requiring us to re-send
+ * the mondo until all cpus have received, or cpus are truly stuck
+ * unable to receive mondo, and we timeout.
+ * Occasionally a target cpu strand is borrowed briefly by hypervisor to
+ * perform guest service, such as PCIe error handling. Consider the
+ * service time, 1 second overall wait is reasonable for 1 cpu.
+ * Here two in-between mondo check wait time are defined: 2 usec for
+ * single cpu quick turn around and up to 100usec for large cpu count.
+ * Deliver mondo to large number of cpus could take longer, we adjusts
+ * the retry count as long as target cpus are making forward progress.
+ */
static void hypervisor_xcall_deliver(struct trap_per_cpu *tb, int cnt)
{
- int retries, this_cpu, prev_sent, i, saw_cpu_error;
+ int this_cpu, tot_cpus, prev_sent, i, rem;
+ int usec_wait, retries, tot_retries;
+ u16 first_cpu = 0xffff;
+ unsigned long xc_rcvd = 0;
unsigned long status;
+ int ecpuerror_id = 0;
+ int enocpu_id = 0;
u16 *cpu_list;
+ u16 cpu;
this_cpu = smp_processor_id();
-
cpu_list = __va(tb->cpu_list_pa);
-
- saw_cpu_error = 0;
- retries = 0;
+ usec_wait = cnt * MONDO_USEC_WAIT_MIN;
+ if (usec_wait > MONDO_USEC_WAIT_MAX)
+ usec_wait = MONDO_USEC_WAIT_MAX;
+ retries = tot_retries = 0;
+ tot_cpus = cnt;
prev_sent = 0;
+
do {
- int forward_progress, n_sent;
+ int n_sent, mondo_delivered, target_cpu_busy;
status = sun4v_cpu_mondo_send(cnt,
tb->cpu_list_pa,
@@ -640,94 +666,113 @@
/* HV_EOK means all cpus received the xcall, we're done. */
if (likely(status == HV_EOK))
- break;
+ goto xcall_done;
+
+ /* If not these non-fatal errors, panic */
+ if (unlikely((status != HV_EWOULDBLOCK) &&
+ (status != HV_ECPUERROR) &&
+ (status != HV_ENOCPU)))
+ goto fatal_errors;
/* First, see if we made any forward progress.
*
+ * Go through the cpu_list, count the target cpus that have
+ * received our mondo (n_sent), and those that did not (rem).
+ * Re-pack cpu_list with the cpus remain to be retried in the
+ * front - this simplifies tracking the truly stalled cpus.
+ *
* The hypervisor indicates successful sends by setting
* cpu list entries to the value 0xffff.
+ *
+ * EWOULDBLOCK means some target cpus did not receive the
+ * mondo and retry usually helps.
+ *
+ * ECPUERROR means at least one target cpu is in error state,
+ * it's usually safe to skip the faulty cpu and retry.
+ *
+ * ENOCPU means one of the target cpu doesn't belong to the
+ * domain, perhaps offlined which is unexpected, but not
+ * fatal and it's okay to skip the offlined cpu.
*/
+ rem = 0;
n_sent = 0;
for (i = 0; i < cnt; i++) {
- if (likely(cpu_list[i] == 0xffff))
+ cpu = cpu_list[i];
+ if (likely(cpu == 0xffff)) {
n_sent++;
+ } else if ((status == HV_ECPUERROR) &&
+ (sun4v_cpu_state(cpu) == HV_CPU_STATE_ERROR)) {
+ ecpuerror_id = cpu + 1;
+ } else if (status == HV_ENOCPU && !cpu_online(cpu)) {
+ enocpu_id = cpu + 1;
+ } else {
+ cpu_list[rem++] = cpu;
+ }
}
- forward_progress = 0;
- if (n_sent > prev_sent)
- forward_progress = 1;
+ /* No cpu remained, we're done. */
+ if (rem == 0)
+ break;
+ /* Otherwise, update the cpu count for retry. */
+ cnt = rem;
+
+ /* Record the overall number of mondos received by the
+ * first of the remaining cpus.
+ */
+ if (first_cpu != cpu_list[0]) {
+ first_cpu = cpu_list[0];
+ xc_rcvd = CPU_MONDO_COUNTER(first_cpu);
+ }
+
+ /* Was any mondo delivered successfully? */
+ mondo_delivered = (n_sent > prev_sent);
prev_sent = n_sent;
- /* If we get a HV_ECPUERROR, then one or more of the cpus
- * in the list are in error state. Use the cpu_state()
- * hypervisor call to find out which cpus are in error state.
+ /* or, was any target cpu busy processing other mondos? */
+ target_cpu_busy = (xc_rcvd < CPU_MONDO_COUNTER(first_cpu));
+ xc_rcvd = CPU_MONDO_COUNTER(first_cpu);
+
+ /* Retry count is for no progress. If we're making progress,
+ * reset the retry count.
*/
- if (unlikely(status == HV_ECPUERROR)) {
- for (i = 0; i < cnt; i++) {
- long err;
- u16 cpu;
-
- cpu = cpu_list[i];
- if (cpu == 0xffff)
- continue;
-
- err = sun4v_cpu_state(cpu);
- if (err == HV_CPU_STATE_ERROR) {
- saw_cpu_error = (cpu + 1);
- cpu_list[i] = 0xffff;
- }
- }
- } else if (unlikely(status != HV_EWOULDBLOCK))
- goto fatal_mondo_error;
-
- /* Don't bother rewriting the CPU list, just leave the
- * 0xffff and non-0xffff entries in there and the
- * hypervisor will do the right thing.
- *
- * Only advance timeout state if we didn't make any
- * forward progress.
- */
- if (unlikely(!forward_progress)) {
- if (unlikely(++retries > 10000))
- goto fatal_mondo_timeout;
-
- /* Delay a little bit to let other cpus catch up
- * on their cpu mondo queue work.
- */
- udelay(2 * cnt);
+ if (likely(mondo_delivered || target_cpu_busy)) {
+ tot_retries += retries;
+ retries = 0;
+ } else if (unlikely(retries > MONDO_RETRY_LIMIT)) {
+ goto fatal_mondo_timeout;
}
+
+ /* Delay a little bit to let other cpus catch up on
+ * their cpu mondo queue work.
+ */
+ if (!mondo_delivered)
+ udelay(usec_wait);
+
+ retries++;
} while (1);
- if (unlikely(saw_cpu_error))
- goto fatal_mondo_cpu_error;
-
+xcall_done:
+ if (unlikely(ecpuerror_id > 0)) {
+ pr_crit("CPU[%d]: SUN4V mondo cpu error, target cpu(%d) was in error state\n",
+ this_cpu, ecpuerror_id - 1);
+ } else if (unlikely(enocpu_id > 0)) {
+ pr_crit("CPU[%d]: SUN4V mondo cpu error, target cpu(%d) does not belong to the domain\n",
+ this_cpu, enocpu_id - 1);
+ }
return;
-fatal_mondo_cpu_error:
- printk(KERN_CRIT "CPU[%d]: SUN4V mondo cpu error, some target cpus "
- "(including %d) were in error state\n",
- this_cpu, saw_cpu_error - 1);
- return;
+fatal_errors:
+ /* fatal errors include bad alignment, etc */
+ pr_crit("CPU[%d]: Args were cnt(%d) cpulist_pa(%lx) mondo_block_pa(%lx)\n",
+ this_cpu, tot_cpus, tb->cpu_list_pa, tb->cpu_mondo_block_pa);
+ panic("Unexpected SUN4V mondo error %lu\n", status);
fatal_mondo_timeout:
- printk(KERN_CRIT "CPU[%d]: SUN4V mondo timeout, no forward "
- " progress after %d retries.\n",
- this_cpu, retries);
- goto dump_cpu_list_and_out;
-
-fatal_mondo_error:
- printk(KERN_CRIT "CPU[%d]: Unexpected SUN4V mondo error %lu\n",
- this_cpu, status);
- printk(KERN_CRIT "CPU[%d]: Args were cnt(%d) cpulist_pa(%lx) "
- "mondo_block_pa(%lx)\n",
- this_cpu, cnt, tb->cpu_list_pa, tb->cpu_mondo_block_pa);
-
-dump_cpu_list_and_out:
- printk(KERN_CRIT "CPU[%d]: CPU list [ ", this_cpu);
- for (i = 0; i < cnt; i++)
- printk("%u ", cpu_list[i]);
- printk("]\n");
+ /* some cpus being non-responsive to the cpu mondo */
+ pr_crit("CPU[%d]: SUN4V mondo timeout, cpu(%d) made no forward progress after %d retries. Total target cpus(%d).\n",
+ this_cpu, first_cpu, (tot_retries + retries), tot_cpus);
+ panic("SUN4V mondo timeout panic\n");
}
static void (*xcall_deliver_impl)(struct trap_per_cpu *, int);
@@ -959,37 +1004,6 @@
preempt_enable();
}
-void __irq_entry smp_new_mmu_context_version_client(int irq, struct pt_regs *regs)
-{
- struct mm_struct *mm;
- unsigned long flags;
-
- clear_softint(1 << irq);
-
- /* See if we need to allocate a new TLB context because
- * the version of the one we are using is now out of date.
- */
- mm = current->active_mm;
- if (unlikely(!mm || (mm == &init_mm)))
- return;
-
- spin_lock_irqsave(&mm->context.lock, flags);
-
- if (unlikely(!CTX_VALID(mm->context)))
- get_new_mmu_context(mm);
-
- spin_unlock_irqrestore(&mm->context.lock, flags);
-
- load_secondary_context(mm);
- __flush_tlb_mm(CTX_HWBITS(mm->context),
- SECONDARY_CONTEXT);
-}
-
-void smp_new_mmu_context_version(void)
-{
- smp_cross_call(&xcall_new_mmu_context_version, 0, 0, 0);
-}
-
#ifdef CONFIG_KGDB
void kgdb_roundup_cpus(unsigned long flags)
{
diff --git a/arch/sparc/kernel/sun4v_ivec.S b/arch/sparc/kernel/sun4v_ivec.S
index 559bc5e..3463199 100644
--- a/arch/sparc/kernel/sun4v_ivec.S
+++ b/arch/sparc/kernel/sun4v_ivec.S
@@ -26,6 +26,21 @@
ldxa [%g0] ASI_SCRATCHPAD, %g4
sub %g4, TRAP_PER_CPU_FAULT_INFO, %g4
+ /* Get smp_processor_id() into %g3 */
+ sethi %hi(trap_block), %g5
+ or %g5, %lo(trap_block), %g5
+ sub %g4, %g5, %g3
+ srlx %g3, TRAP_BLOCK_SZ_SHIFT, %g3
+
+ /* Increment cpu_mondo_counter[smp_processor_id()] */
+ sethi %hi(cpu_mondo_counter), %g5
+ or %g5, %lo(cpu_mondo_counter), %g5
+ sllx %g3, 3, %g3
+ add %g5, %g3, %g5
+ ldx [%g5], %g3
+ add %g3, 1, %g3
+ stx %g3, [%g5]
+
/* Get CPU mondo queue base phys address into %g7. */
ldx [%g4 + TRAP_PER_CPU_CPU_MONDO_PA], %g7
diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
index d21cd62..d883c59 100644
--- a/arch/sparc/kernel/traps_64.c
+++ b/arch/sparc/kernel/traps_64.c
@@ -85,7 +85,7 @@
void bad_trap(struct pt_regs *regs, long lvl)
{
- char buffer[32];
+ char buffer[36];
siginfo_t info;
if (notify_die(DIE_TRAP, "bad trap", regs,
@@ -116,7 +116,7 @@
void bad_trap_tl1(struct pt_regs *regs, long lvl)
{
- char buffer[32];
+ char buffer[36];
if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
0, lvl, SIGTRAP) == NOTIFY_STOP)
@@ -2659,6 +2659,7 @@
}
}
+u64 cpu_mondo_counter[NR_CPUS] = {0};
struct trap_per_cpu trap_block[NR_CPUS];
EXPORT_SYMBOL(trap_block);
diff --git a/arch/sparc/kernel/tsb.S b/arch/sparc/kernel/tsb.S
index d568c82..7d961f6e 100644
--- a/arch/sparc/kernel/tsb.S
+++ b/arch/sparc/kernel/tsb.S
@@ -375,6 +375,7 @@
* %o1: TSB base config pointer
* %o2: TSB huge config pointer, or NULL if none
* %o3: Hypervisor TSB descriptor physical address
+ * %o4: Secondary context to load, if non-zero
*
* We have to run this whole thing with interrupts
* disabled so that the current cpu doesn't change
@@ -387,6 +388,17 @@
rdpr %pstate, %g1
wrpr %g1, PSTATE_IE, %pstate
+ brz,pn %o4, 1f
+ mov SECONDARY_CONTEXT, %o5
+
+661: stxa %o4, [%o5] ASI_DMMU
+ .section .sun4v_1insn_patch, "ax"
+ .word 661b
+ stxa %o4, [%o5] ASI_MMU
+ .previous
+ flush %g6
+
+1:
TRAP_LOAD_TRAP_BLOCK(%g2, %g3)
stx %o0, [%g2 + TRAP_PER_CPU_PGD_PADDR]
@@ -470,13 +482,16 @@
.type copy_tsb,#function
copy_tsb: /* %o0=old_tsb_base, %o1=old_tsb_size
* %o2=new_tsb_base, %o3=new_tsb_size
+ * %o4=page_size_shift
*/
sethi %uhi(TSB_PASS_BITS), %g7
srlx %o3, 4, %o3
- add %o0, %o1, %g1 /* end of old tsb */
+ add %o0, %o1, %o1 /* end of old tsb */
sllx %g7, 32, %g7
sub %o3, 1, %o3 /* %o3 == new tsb hash mask */
+ mov %o4, %g1 /* page_size_shift */
+
661: prefetcha [%o0] ASI_N, #one_read
.section .tsb_phys_patch, "ax"
.word 661b
@@ -501,9 +516,9 @@
/* This can definitely be computed faster... */
srlx %o0, 4, %o5 /* Build index */
and %o5, 511, %o5 /* Mask index */
- sllx %o5, PAGE_SHIFT, %o5 /* Put into vaddr position */
+ sllx %o5, %g1, %o5 /* Put into vaddr position */
or %o4, %o5, %o4 /* Full VADDR. */
- srlx %o4, PAGE_SHIFT, %o4 /* Shift down to create index */
+ srlx %o4, %g1, %o4 /* Shift down to create index */
and %o4, %o3, %o4 /* Mask with new_tsb_nents-1 */
sllx %o4, 4, %o4 /* Shift back up into tsb ent offset */
TSB_STORE(%o2 + %o4, %g2) /* Store TAG */
@@ -511,7 +526,7 @@
TSB_STORE(%o2 + %o4, %g3) /* Store TTE */
80: add %o0, 16, %o0
- cmp %o0, %g1
+ cmp %o0, %o1
bne,pt %xcc, 90b
nop
diff --git a/arch/sparc/kernel/ttable_64.S b/arch/sparc/kernel/ttable_64.S
index c6dfdaa..170ead6 100644
--- a/arch/sparc/kernel/ttable_64.S
+++ b/arch/sparc/kernel/ttable_64.S
@@ -50,7 +50,7 @@
tl0_irq1: TRAP_IRQ(smp_call_function_client, 1)
tl0_irq2: TRAP_IRQ(smp_receive_signal_client, 2)
tl0_irq3: TRAP_IRQ(smp_penguin_jailcell, 3)
-tl0_irq4: TRAP_IRQ(smp_new_mmu_context_version_client, 4)
+tl0_irq4: BTRAP(0x44)
#else
tl0_irq1: BTRAP(0x41)
tl0_irq2: BTRAP(0x42)
diff --git a/arch/sparc/kernel/vio.c b/arch/sparc/kernel/vio.c
index cb5789c..34824ca 100644
--- a/arch/sparc/kernel/vio.c
+++ b/arch/sparc/kernel/vio.c
@@ -284,13 +284,16 @@
if (!id) {
dev_set_name(&vdev->dev, "%s", bus_id_name);
vdev->dev_no = ~(u64)0;
+ vdev->id = ~(u64)0;
} else if (!cfg_handle) {
dev_set_name(&vdev->dev, "%s-%llu", bus_id_name, *id);
vdev->dev_no = *id;
+ vdev->id = ~(u64)0;
} else {
dev_set_name(&vdev->dev, "%s-%llu-%llu", bus_id_name,
*cfg_handle, *id);
vdev->dev_no = *cfg_handle;
+ vdev->id = *id;
}
vdev->dev.parent = parent;
@@ -333,27 +336,84 @@
(void) vio_create_one(hp, node, &root_vdev->dev);
}
+struct vio_md_node_query {
+ const char *type;
+ u64 dev_no;
+ u64 id;
+};
+
static int vio_md_node_match(struct device *dev, void *arg)
{
+ struct vio_md_node_query *query = (struct vio_md_node_query *) arg;
struct vio_dev *vdev = to_vio_dev(dev);
- if (vdev->mp == (u64) arg)
- return 1;
+ if (vdev->dev_no != query->dev_no)
+ return 0;
+ if (vdev->id != query->id)
+ return 0;
+ if (strcmp(vdev->type, query->type))
+ return 0;
- return 0;
+ return 1;
}
static void vio_remove(struct mdesc_handle *hp, u64 node)
{
+ const char *type;
+ const u64 *id, *cfg_handle;
+ u64 a;
+ struct vio_md_node_query query;
struct device *dev;
- dev = device_find_child(&root_vdev->dev, (void *) node,
+ type = mdesc_get_property(hp, node, "device-type", NULL);
+ if (!type) {
+ type = mdesc_get_property(hp, node, "name", NULL);
+ if (!type)
+ type = mdesc_node_name(hp, node);
+ }
+
+ query.type = type;
+
+ id = mdesc_get_property(hp, node, "id", NULL);
+ cfg_handle = NULL;
+ mdesc_for_each_arc(a, hp, node, MDESC_ARC_TYPE_BACK) {
+ u64 target;
+
+ target = mdesc_arc_target(hp, a);
+ cfg_handle = mdesc_get_property(hp, target,
+ "cfg-handle", NULL);
+ if (cfg_handle)
+ break;
+ }
+
+ if (!id) {
+ query.dev_no = ~(u64)0;
+ query.id = ~(u64)0;
+ } else if (!cfg_handle) {
+ query.dev_no = *id;
+ query.id = ~(u64)0;
+ } else {
+ query.dev_no = *cfg_handle;
+ query.id = *id;
+ }
+
+ dev = device_find_child(&root_vdev->dev, &query,
vio_md_node_match);
if (dev) {
printk(KERN_INFO "VIO: Removing device %s\n", dev_name(dev));
device_unregister(dev);
put_device(dev);
+ } else {
+ if (!id)
+ printk(KERN_ERR "VIO: Removed unknown %s node.\n",
+ type);
+ else if (!cfg_handle)
+ printk(KERN_ERR "VIO: Removed unknown %s node %llu.\n",
+ type, *id);
+ else
+ printk(KERN_ERR "VIO: Removed unknown %s node %llu-%llu.\n",
+ type, *cfg_handle, *id);
}
}
diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c
index eb82871..3b7092d 100644
--- a/arch/sparc/mm/init_32.c
+++ b/arch/sparc/mm/init_32.c
@@ -301,7 +301,7 @@
/* Saves us work later. */
- memset((void *)&empty_zero_page, 0, PAGE_SIZE);
+ memset((void *)empty_zero_page, 0, PAGE_SIZE);
i = last_valid_pfn >> ((20 - PAGE_SHIFT) + 5);
i += 1;
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index 3d3414c1..384aba1 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -656,10 +656,58 @@
/* get_new_mmu_context() uses "cache + 1". */
DEFINE_SPINLOCK(ctx_alloc_lock);
-unsigned long tlb_context_cache = CTX_FIRST_VERSION - 1;
+unsigned long tlb_context_cache = CTX_FIRST_VERSION;
#define MAX_CTX_NR (1UL << CTX_NR_BITS)
#define CTX_BMAP_SLOTS BITS_TO_LONGS(MAX_CTX_NR)
DECLARE_BITMAP(mmu_context_bmap, MAX_CTX_NR);
+DEFINE_PER_CPU(struct mm_struct *, per_cpu_secondary_mm) = {0};
+
+static void mmu_context_wrap(void)
+{
+ unsigned long old_ver = tlb_context_cache & CTX_VERSION_MASK;
+ unsigned long new_ver, new_ctx, old_ctx;
+ struct mm_struct *mm;
+ int cpu;
+
+ bitmap_zero(mmu_context_bmap, 1 << CTX_NR_BITS);
+
+ /* Reserve kernel context */
+ set_bit(0, mmu_context_bmap);
+
+ new_ver = (tlb_context_cache & CTX_VERSION_MASK) + CTX_FIRST_VERSION;
+ if (unlikely(new_ver == 0))
+ new_ver = CTX_FIRST_VERSION;
+ tlb_context_cache = new_ver;
+
+ /*
+ * Make sure that any new mm that are added into per_cpu_secondary_mm,
+ * are going to go through get_new_mmu_context() path.
+ */
+ mb();
+
+ /*
+ * Updated versions to current on those CPUs that had valid secondary
+ * contexts
+ */
+ for_each_online_cpu(cpu) {
+ /*
+ * If a new mm is stored after we took this mm from the array,
+ * it will go into get_new_mmu_context() path, because we
+ * already bumped the version in tlb_context_cache.
+ */
+ mm = per_cpu(per_cpu_secondary_mm, cpu);
+
+ if (unlikely(!mm || mm == &init_mm))
+ continue;
+
+ old_ctx = mm->context.sparc64_ctx_val;
+ if (likely((old_ctx & CTX_VERSION_MASK) == old_ver)) {
+ new_ctx = (old_ctx & ~CTX_VERSION_MASK) | new_ver;
+ set_bit(new_ctx & CTX_NR_MASK, mmu_context_bmap);
+ mm->context.sparc64_ctx_val = new_ctx;
+ }
+ }
+}
/* Caller does TLB context flushing on local CPU if necessary.
* The caller also ensures that CTX_VALID(mm->context) is false.
@@ -675,48 +723,30 @@
{
unsigned long ctx, new_ctx;
unsigned long orig_pgsz_bits;
- int new_version;
spin_lock(&ctx_alloc_lock);
+retry:
+ /* wrap might have happened, test again if our context became valid */
+ if (unlikely(CTX_VALID(mm->context)))
+ goto out;
orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK);
ctx = (tlb_context_cache + 1) & CTX_NR_MASK;
new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx);
- new_version = 0;
if (new_ctx >= (1 << CTX_NR_BITS)) {
new_ctx = find_next_zero_bit(mmu_context_bmap, ctx, 1);
if (new_ctx >= ctx) {
- int i;
- new_ctx = (tlb_context_cache & CTX_VERSION_MASK) +
- CTX_FIRST_VERSION;
- if (new_ctx == 1)
- new_ctx = CTX_FIRST_VERSION;
-
- /* Don't call memset, for 16 entries that's just
- * plain silly...
- */
- mmu_context_bmap[0] = 3;
- mmu_context_bmap[1] = 0;
- mmu_context_bmap[2] = 0;
- mmu_context_bmap[3] = 0;
- for (i = 4; i < CTX_BMAP_SLOTS; i += 4) {
- mmu_context_bmap[i + 0] = 0;
- mmu_context_bmap[i + 1] = 0;
- mmu_context_bmap[i + 2] = 0;
- mmu_context_bmap[i + 3] = 0;
- }
- new_version = 1;
- goto out;
+ mmu_context_wrap();
+ goto retry;
}
}
+ if (mm->context.sparc64_ctx_val)
+ cpumask_clear(mm_cpumask(mm));
mmu_context_bmap[new_ctx>>6] |= (1UL << (new_ctx & 63));
new_ctx |= (tlb_context_cache & CTX_VERSION_MASK);
-out:
tlb_context_cache = new_ctx;
mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits;
+out:
spin_unlock(&ctx_alloc_lock);
-
- if (unlikely(new_version))
- smp_new_mmu_context_version();
}
static int numa_enabled = 1;
@@ -1493,7 +1523,7 @@
if ((long)addr < 0L) {
unsigned long pa = __pa(addr);
- if ((addr >> max_phys_bits) != 0UL)
+ if ((pa >> max_phys_bits) != 0UL)
return false;
return pfn_valid(pa >> PAGE_SHIFT);
diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c
index 9cdeca0..2664112 100644
--- a/arch/sparc/mm/tsb.c
+++ b/arch/sparc/mm/tsb.c
@@ -451,7 +451,8 @@
extern void copy_tsb(unsigned long old_tsb_base,
unsigned long old_tsb_size,
unsigned long new_tsb_base,
- unsigned long new_tsb_size);
+ unsigned long new_tsb_size,
+ unsigned long page_size_shift);
unsigned long old_tsb_base = (unsigned long) old_tsb;
unsigned long new_tsb_base = (unsigned long) new_tsb;
@@ -459,7 +460,9 @@
old_tsb_base = __pa(old_tsb_base);
new_tsb_base = __pa(new_tsb_base);
}
- copy_tsb(old_tsb_base, old_size, new_tsb_base, new_size);
+ copy_tsb(old_tsb_base, old_size, new_tsb_base, new_size,
+ tsb_index == MM_TSB_BASE ?
+ PAGE_SHIFT : REAL_HPAGE_SHIFT);
}
mm->context.tsb_block[tsb_index].tsb = new_tsb;
diff --git a/arch/sparc/mm/ultra.S b/arch/sparc/mm/ultra.S
index 5d2fd6c..fcf4d27 100644
--- a/arch/sparc/mm/ultra.S
+++ b/arch/sparc/mm/ultra.S
@@ -971,11 +971,6 @@
wr %g0, (1 << PIL_SMP_CAPTURE), %set_softint
retry
- .globl xcall_new_mmu_context_version
-xcall_new_mmu_context_version:
- wr %g0, (1 << PIL_SMP_CTX_NEW_VERSION), %set_softint
- retry
-
#ifdef CONFIG_KGDB
.globl xcall_kgdb_capture
xcall_kgdb_capture:
diff --git a/arch/sparc/power/hibernate.c b/arch/sparc/power/hibernate.c
index 17bd2e1..df707a8 100644
--- a/arch/sparc/power/hibernate.c
+++ b/arch/sparc/power/hibernate.c
@@ -35,6 +35,5 @@
{
struct mm_struct *mm = current->active_mm;
- load_secondary_context(mm);
- tsb_context_switch(mm);
+ tsb_context_switch_ctx(mm, CTX_HWBITS(mm->context));
}
diff --git a/arch/tile/include/asm/thread_info.h b/arch/tile/include/asm/thread_info.h
index dc1fb28..489b150 100644
--- a/arch/tile/include/asm/thread_info.h
+++ b/arch/tile/include/asm/thread_info.h
@@ -78,7 +78,7 @@
#ifndef __ASSEMBLY__
-void arch_release_thread_info(struct thread_info *info);
+void arch_release_thread_stack(unsigned long *stack);
/* How to get the thread information struct from C. */
register unsigned long stack_pointer __asm__("sp");
diff --git a/arch/tile/kernel/process.c b/arch/tile/kernel/process.c
index 7d57693..a97ab1a 100644
--- a/arch/tile/kernel/process.c
+++ b/arch/tile/kernel/process.c
@@ -73,8 +73,9 @@
/*
* Release a thread_info structure
*/
-void arch_release_thread_info(struct thread_info *info)
+void arch_release_thread_stack(unsigned long *stack)
{
+ struct thread_info *info = (void *)stack;
struct single_step_state *step_state = info->step_state;
if (step_state) {
diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
index 9011a88..ed1e920 100644
--- a/arch/x86/boot/boot.h
+++ b/arch/x86/boot/boot.h
@@ -16,7 +16,7 @@
#ifndef BOOT_BOOT_H
#define BOOT_BOOT_H
-#define STACK_SIZE 512 /* Minimum number of bytes for stack */
+#define STACK_SIZE 1024 /* Minimum number of bytes for stack */
#ifndef __ASSEMBLY__
diff --git a/arch/x86/boot/string.c b/arch/x86/boot/string.c
index 318b846..06ceddb 100644
--- a/arch/x86/boot/string.c
+++ b/arch/x86/boot/string.c
@@ -14,6 +14,7 @@
#include <linux/types.h>
#include "ctype.h"
+#include "string.h"
int memcmp(const void *s1, const void *s2, size_t len)
{
diff --git a/arch/x86/boot/string.h b/arch/x86/boot/string.h
index 725e820..113588d 100644
--- a/arch/x86/boot/string.h
+++ b/arch/x86/boot/string.h
@@ -18,4 +18,13 @@
#define memset(d,c,l) __builtin_memset(d,c,l)
#define memcmp __builtin_memcmp
+extern int strcmp(const char *str1, const char *str2);
+extern int strncmp(const char *cs, const char *ct, size_t count);
+extern size_t strlen(const char *s);
+extern char *strstr(const char *s1, const char *s2);
+extern size_t strnlen(const char *s, size_t maxlen);
+extern unsigned int atou(const char *s);
+extern unsigned long long simple_strtoull(const char *cp, char **endp,
+ unsigned int base);
+
#endif /* BOOT_STRING_H */
diff --git a/arch/x86/crypto/ghash-clmulni-intel_glue.c b/arch/x86/crypto/ghash-clmulni-intel_glue.c
index 440df0c..a69321a 100644
--- a/arch/x86/crypto/ghash-clmulni-intel_glue.c
+++ b/arch/x86/crypto/ghash-clmulni-intel_glue.c
@@ -219,6 +219,29 @@
}
}
+static int ghash_async_import(struct ahash_request *req, const void *in)
+{
+ struct ahash_request *cryptd_req = ahash_request_ctx(req);
+ struct shash_desc *desc = cryptd_shash_desc(cryptd_req);
+ struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
+
+ ghash_async_init(req);
+ memcpy(dctx, in, sizeof(*dctx));
+ return 0;
+
+}
+
+static int ghash_async_export(struct ahash_request *req, void *out)
+{
+ struct ahash_request *cryptd_req = ahash_request_ctx(req);
+ struct shash_desc *desc = cryptd_shash_desc(cryptd_req);
+ struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
+
+ memcpy(out, dctx, sizeof(*dctx));
+ return 0;
+
+}
+
static int ghash_async_digest(struct ahash_request *req)
{
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
@@ -288,8 +311,11 @@
.final = ghash_async_final,
.setkey = ghash_async_setkey,
.digest = ghash_async_digest,
+ .export = ghash_async_export,
+ .import = ghash_async_import,
.halg = {
.digestsize = GHASH_DIGEST_SIZE,
+ .statesize = sizeof(struct ghash_desc_ctx),
.base = {
.cra_name = "ghash",
.cra_driver_name = "ghash-clmulni",
diff --git a/arch/x86/crypto/sha1_avx2_x86_64_asm.S b/arch/x86/crypto/sha1_avx2_x86_64_asm.S
index 1cd792d..1eab79c 100644
--- a/arch/x86/crypto/sha1_avx2_x86_64_asm.S
+++ b/arch/x86/crypto/sha1_avx2_x86_64_asm.S
@@ -117,11 +117,10 @@
.set T1, REG_T1
.endm
-#define K_BASE %r8
#define HASH_PTR %r9
+#define BLOCKS_CTR %r8
#define BUFFER_PTR %r10
#define BUFFER_PTR2 %r13
-#define BUFFER_END %r11
#define PRECALC_BUF %r14
#define WK_BUF %r15
@@ -205,14 +204,14 @@
* blended AVX2 and ALU instruction scheduling
* 1 vector iteration per 8 rounds
*/
- vmovdqu ((i * 2) + PRECALC_OFFSET)(BUFFER_PTR), W_TMP
+ vmovdqu (i * 2)(BUFFER_PTR), W_TMP
.elseif ((i & 7) == 1)
- vinsertf128 $1, (((i-1) * 2)+PRECALC_OFFSET)(BUFFER_PTR2),\
+ vinsertf128 $1, ((i-1) * 2)(BUFFER_PTR2),\
WY_TMP, WY_TMP
.elseif ((i & 7) == 2)
vpshufb YMM_SHUFB_BSWAP, WY_TMP, WY
.elseif ((i & 7) == 4)
- vpaddd K_XMM(K_BASE), WY, WY_TMP
+ vpaddd K_XMM + K_XMM_AR(%rip), WY, WY_TMP
.elseif ((i & 7) == 7)
vmovdqu WY_TMP, PRECALC_WK(i&~7)
@@ -255,7 +254,7 @@
vpxor WY, WY_TMP, WY_TMP
.elseif ((i & 7) == 7)
vpxor WY_TMP2, WY_TMP, WY
- vpaddd K_XMM(K_BASE), WY, WY_TMP
+ vpaddd K_XMM + K_XMM_AR(%rip), WY, WY_TMP
vmovdqu WY_TMP, PRECALC_WK(i&~7)
PRECALC_ROTATE_WY
@@ -291,7 +290,7 @@
vpsrld $30, WY, WY
vpor WY, WY_TMP, WY
.elseif ((i & 7) == 7)
- vpaddd K_XMM(K_BASE), WY, WY_TMP
+ vpaddd K_XMM + K_XMM_AR(%rip), WY, WY_TMP
vmovdqu WY_TMP, PRECALC_WK(i&~7)
PRECALC_ROTATE_WY
@@ -446,6 +445,16 @@
.endm
+/* Add constant only if (%2 > %3) condition met (uses RTA as temp)
+ * %1 + %2 >= %3 ? %4 : 0
+ */
+.macro ADD_IF_GE a, b, c, d
+ mov \a, RTA
+ add $\d, RTA
+ cmp $\c, \b
+ cmovge RTA, \a
+.endm
+
/*
* macro implements 80 rounds of SHA-1, for multiple blocks with s/w pipelining
*/
@@ -463,13 +472,16 @@
lea (2*4*80+32)(%rsp), WK_BUF
# Precalc WK for first 2 blocks
- PRECALC_OFFSET = 0
+ ADD_IF_GE BUFFER_PTR2, BLOCKS_CTR, 2, 64
.set i, 0
.rept 160
PRECALC i
.set i, i + 1
.endr
- PRECALC_OFFSET = 128
+
+ /* Go to next block if needed */
+ ADD_IF_GE BUFFER_PTR, BLOCKS_CTR, 3, 128
+ ADD_IF_GE BUFFER_PTR2, BLOCKS_CTR, 4, 128
xchg WK_BUF, PRECALC_BUF
.align 32
@@ -479,8 +491,8 @@
* we use K_BASE value as a signal of a last block,
* it is set below by: cmovae BUFFER_PTR, K_BASE
*/
- cmp K_BASE, BUFFER_PTR
- jne _begin
+ test BLOCKS_CTR, BLOCKS_CTR
+ jnz _begin
.align 32
jmp _end
.align 32
@@ -512,10 +524,10 @@
.set j, j+2
.endr
- add $(2*64), BUFFER_PTR /* move to next odd-64-byte block */
- cmp BUFFER_END, BUFFER_PTR /* is current block the last one? */
- cmovae K_BASE, BUFFER_PTR /* signal the last iteration smartly */
-
+ /* Update Counter */
+ sub $1, BLOCKS_CTR
+ /* Move to the next block only if needed*/
+ ADD_IF_GE BUFFER_PTR, BLOCKS_CTR, 4, 128
/*
* rounds
* 60,62,64,66,68
@@ -532,8 +544,8 @@
UPDATE_HASH 12(HASH_PTR), D
UPDATE_HASH 16(HASH_PTR), E
- cmp K_BASE, BUFFER_PTR /* is current block the last one? */
- je _loop
+ test BLOCKS_CTR, BLOCKS_CTR
+ jz _loop
mov TB, B
@@ -575,10 +587,10 @@
.set j, j+2
.endr
- add $(2*64), BUFFER_PTR2 /* move to next even-64-byte block */
-
- cmp BUFFER_END, BUFFER_PTR2 /* is current block the last one */
- cmovae K_BASE, BUFFER_PTR /* signal the last iteration smartly */
+ /* update counter */
+ sub $1, BLOCKS_CTR
+ /* Move to the next block only if needed*/
+ ADD_IF_GE BUFFER_PTR2, BLOCKS_CTR, 4, 128
jmp _loop3
_loop3:
@@ -641,19 +653,12 @@
avx2_zeroupper
- lea K_XMM_AR(%rip), K_BASE
-
+ /* Setup initial values */
mov CTX, HASH_PTR
mov BUF, BUFFER_PTR
- lea 64(BUF), BUFFER_PTR2
- shl $6, CNT /* mul by 64 */
- add BUF, CNT
- add $64, CNT
- mov CNT, BUFFER_END
-
- cmp BUFFER_END, BUFFER_PTR2
- cmovae K_BASE, BUFFER_PTR2
+ mov BUF, BUFFER_PTR2
+ mov CNT, BLOCKS_CTR
xmm_mov BSWAP_SHUFB_CTL(%rip), YMM_SHUFB_BSWAP
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index c3272837..0d38d9f 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -1201,6 +1201,8 @@
* other IST entries.
*/
+ ASM_CLAC
+
/* Use %rdx as our temp variable throughout */
pushq %rdx
diff --git a/arch/x86/entry/vdso/vdso32-setup.c b/arch/x86/entry/vdso/vdso32-setup.c
index 08a317a..a7508d7 100644
--- a/arch/x86/entry/vdso/vdso32-setup.c
+++ b/arch/x86/entry/vdso/vdso32-setup.c
@@ -31,8 +31,10 @@
{
vdso32_enabled = simple_strtoul(s, NULL, 0);
- if (vdso32_enabled > 1)
+ if (vdso32_enabled > 1) {
pr_warn("vdso32 values other than 0 and 1 are no longer allowed; vdso disabled\n");
+ vdso32_enabled = 0;
+ }
return 1;
}
@@ -63,13 +65,18 @@
/* Register vsyscall32 into the ABI table */
#include <linux/sysctl.h>
+static const int zero;
+static const int one = 1;
+
static struct ctl_table abi_table2[] = {
{
.procname = "vsyscall32",
.data = &vdso32_enabled,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = proc_dointvec
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = (int *)&zero,
+ .extra2 = (int *)&one,
},
{}
};
diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
index 1514753..bcd3d61 100644
--- a/arch/x86/include/asm/elf.h
+++ b/arch/x86/include/asm/elf.h
@@ -245,12 +245,13 @@
#define CORE_DUMP_USE_REGSET
#define ELF_EXEC_PAGESIZE 4096
-/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
- use of this is to invoke "./ld.so someprog" to test out a new version of
- the loader. We need to make sure that it is out of the way of the program
- that it will "exec", and that there is sufficient room for the brk. */
-
-#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
+/*
+ * This is the base location for PIE (ET_DYN with INTERP) loads. On
+ * 64-bit, this is above 4GB to leave the entire 32-bit address
+ * space open for things that want to use the area for 32-bit pointers.
+ */
+#define ELF_ET_DYN_BASE (mmap_is_ia32() ? 0x000400000UL : \
+ (TASK_SIZE / 3 * 2))
/* This yields a mask that user programs can use to figure out what
instruction set this CPU supports. This could be done in user space,
@@ -278,7 +279,7 @@
#define ARCH_DLINFO_IA32 \
do { \
- if (vdso32_enabled) { \
+ if (VDSO_CURRENT_BASE) { \
NEW_AUX_ENT(AT_SYSINFO, VDSO_ENTRY); \
NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_CURRENT_BASE); \
} \
diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
index de25aad..9016b4b 100644
--- a/arch/x86/include/asm/io.h
+++ b/arch/x86/include/asm/io.h
@@ -304,13 +304,13 @@
static inline void outs##bwl(int port, const void *addr, unsigned long count) \
{ \
asm volatile("rep; outs" #bwl \
- : "+S"(addr), "+c"(count) : "d"(port)); \
+ : "+S"(addr), "+c"(count) : "d"(port) : "memory"); \
} \
\
static inline void ins##bwl(int port, void *addr, unsigned long count) \
{ \
asm volatile("rep; ins" #bwl \
- : "+D"(addr), "+c"(count) : "d"(port)); \
+ : "+D"(addr), "+c"(count) : "d"(port) : "memory"); \
}
BUILDIO(b, b, char)
diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h
index e9cd7be..19d14ac 100644
--- a/arch/x86/include/asm/kvm_emulate.h
+++ b/arch/x86/include/asm/kvm_emulate.h
@@ -221,6 +221,9 @@
void (*get_cpuid)(struct x86_emulate_ctxt *ctxt,
u32 *eax, u32 *ebx, u32 *ecx, u32 *edx);
void (*set_nmi_mask)(struct x86_emulate_ctxt *ctxt, bool masked);
+
+ unsigned (*get_hflags)(struct x86_emulate_ctxt *ctxt);
+ void (*set_hflags)(struct x86_emulate_ctxt *ctxt, unsigned hflags);
};
typedef u32 __attribute__((vector_size(16))) sse128_t;
@@ -290,7 +293,6 @@
/* interruptibility state, as a result of execution of STI or MOV SS */
int interruptibility;
- int emul_flags;
bool perm_ok; /* do not check permissions if true */
bool ud; /* inject an #UD if host doesn't support insn */
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 690b402..37db36f 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -405,6 +405,8 @@
#define MSR_IA32_TSC_ADJUST 0x0000003b
#define MSR_IA32_BNDCFGS 0x00000d90
+#define MSR_IA32_BNDCFGS_RSVD 0x00000ffc
+
#define MSR_IA32_XSS 0x00000da0
#define FEATURE_CONTROL_LOCKED (1<<0)
diff --git a/arch/x86/include/asm/pat.h b/arch/x86/include/asm/pat.h
index 0b1ff4c..fffb279 100644
--- a/arch/x86/include/asm/pat.h
+++ b/arch/x86/include/asm/pat.h
@@ -7,6 +7,7 @@
bool pat_enabled(void);
void pat_disable(const char *reason);
extern void pat_init(void);
+extern void init_cache_modes(void);
extern int reserve_memtype(u64 start, u64 end,
enum page_cache_mode req_pcm, enum page_cache_mode *ret_pcm);
diff --git a/arch/x86/include/asm/pmem.h b/arch/x86/include/asm/pmem.h
index d8ce3ec..6503526 100644
--- a/arch/x86/include/asm/pmem.h
+++ b/arch/x86/include/asm/pmem.h
@@ -72,8 +72,8 @@
* @size: number of bytes to write back
*
* Write back a cache range using the CLWB (cache line write back)
- * instruction. This function requires explicit ordering with an
- * arch_wmb_pmem() call. This API is internal to the x86 PMEM implementation.
+ * instruction. Note that @size is internally rounded up to be cache
+ * line size aligned.
*/
static inline void __arch_wb_cache_pmem(void *vaddr, size_t size)
{
@@ -87,15 +87,6 @@
clwb(p);
}
-/*
- * copy_from_iter_nocache() on x86 only uses non-temporal stores for iovec
- * iterators, so for other types (bvec & kvec) we must do a cache write-back.
- */
-static inline bool __iter_needs_pmem_wb(struct iov_iter *i)
-{
- return iter_is_iovec(i) == false;
-}
-
/**
* arch_copy_from_iter_pmem - copy data from an iterator to PMEM
* @addr: PMEM destination address
@@ -114,8 +105,36 @@
/* TODO: skip the write-back by always using non-temporal stores */
len = copy_from_iter_nocache(vaddr, bytes, i);
- if (__iter_needs_pmem_wb(i))
- __arch_wb_cache_pmem(vaddr, bytes);
+ /*
+ * In the iovec case on x86_64 copy_from_iter_nocache() uses
+ * non-temporal stores for the bulk of the transfer, but we need
+ * to manually flush if the transfer is unaligned. A cached
+ * memory copy is used when destination or size is not naturally
+ * aligned. That is:
+ * - Require 8-byte alignment when size is 8 bytes or larger.
+ * - Require 4-byte alignment when size is 4 bytes.
+ *
+ * In the non-iovec case the entire destination needs to be
+ * flushed.
+ */
+ if (iter_is_iovec(i)) {
+ unsigned long flushed, dest = (unsigned long) addr;
+
+ if (bytes < 8) {
+ if (!IS_ALIGNED(dest, 4) || (bytes != 4))
+ __arch_wb_cache_pmem(addr, bytes);
+ } else {
+ if (!IS_ALIGNED(dest, 8)) {
+ dest = ALIGN(dest, boot_cpu_data.x86_clflush_size);
+ __arch_wb_cache_pmem(addr, 1);
+ }
+
+ flushed = dest - (unsigned long) addr;
+ if (bytes > flushed && !IS_ALIGNED(bytes - flushed, 8))
+ __arch_wb_cache_pmem(addr + bytes - 1, 1);
+ }
+ } else
+ __arch_wb_cache_pmem(addr, bytes);
return len;
}
diff --git a/arch/x86/include/asm/xen/hypercall.h b/arch/x86/include/asm/xen/hypercall.h
index 4c20dd3..85133b2 100644
--- a/arch/x86/include/asm/xen/hypercall.h
+++ b/arch/x86/include/asm/xen/hypercall.h
@@ -43,6 +43,7 @@
#include <asm/page.h>
#include <asm/pgtable.h>
+#include <asm/smap.h>
#include <xen/interface/xen.h>
#include <xen/interface/sched.h>
@@ -213,10 +214,12 @@
__HYPERCALL_DECLS;
__HYPERCALL_5ARG(a1, a2, a3, a4, a5);
+ stac();
asm volatile("call *%[call]"
: __HYPERCALL_5PARAM
: [call] "a" (&hypercall_page[call])
: __HYPERCALL_CLOBBER5);
+ clac();
return (long)__res;
}
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index e759076..1e5eb9f 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -329,6 +329,14 @@
struct mpc_intsrc mp_irq;
/*
+ * Check bus_irq boundary.
+ */
+ if (bus_irq >= NR_IRQS_LEGACY) {
+ pr_warn("Invalid bus_irq %u for legacy override\n", bus_irq);
+ return;
+ }
+
+ /*
* Convert 'gsi' to 'ioapic.pin'.
*/
ioapic = mp_find_ioapic(gsi);
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 8ca533b..fc91c98 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -1875,6 +1875,7 @@
.irq_ack = irq_chip_ack_parent,
.irq_eoi = ioapic_ack_level,
.irq_set_affinity = ioapic_set_affinity,
+ .irq_retrigger = irq_chip_retrigger_hierarchy,
.flags = IRQCHIP_SKIP_SET_WAKE,
};
@@ -1886,6 +1887,7 @@
.irq_ack = irq_chip_ack_parent,
.irq_eoi = ioapic_ir_ack_level,
.irq_set_affinity = ioapic_set_affinity,
+ .irq_retrigger = irq_chip_retrigger_hierarchy,
.flags = IRQCHIP_SKIP_SET_WAKE,
};
@@ -2113,7 +2115,7 @@
int idx;
idx = find_irq_entry(apic1, pin1, mp_INT);
if (idx != -1 && irq_trigger(idx))
- unmask_ioapic_irq(irq_get_chip_data(0));
+ unmask_ioapic_irq(irq_get_irq_data(0));
}
irq_domain_deactivate_irq(irq_data);
irq_domain_activate_irq(irq_data);
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
index e99b150..2116176 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
@@ -53,7 +53,7 @@
"load_store",
"insn_fetch",
"combined_unit",
- "",
+ "decode_unit",
"northbridge",
"execution_unit",
};
@@ -682,6 +682,9 @@
const char *name = th_names[bank];
int err = 0;
+ if (!dev)
+ return -ENODEV;
+
if (is_shared_bank(bank)) {
nb = node_to_amd_nb(amd_get_nb_id(cpu));
diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c
index cfc4a96..83b5f7a 100644
--- a/arch/x86/kernel/cpu/mshyperv.c
+++ b/arch/x86/kernel/cpu/mshyperv.c
@@ -30,6 +30,7 @@
#include <asm/apic.h>
#include <asm/timer.h>
#include <asm/reboot.h>
+#include <asm/nmi.h>
struct ms_hyperv_info ms_hyperv;
EXPORT_SYMBOL_GPL(ms_hyperv);
@@ -157,6 +158,26 @@
return 0;
}
+#ifdef CONFIG_X86_LOCAL_APIC
+/*
+ * Prior to WS2016 Debug-VM sends NMIs to all CPUs which makes
+ * it dificult to process CHANNELMSG_UNLOAD in case of crash. Handle
+ * unknown NMI on the first CPU which gets it.
+ */
+static int hv_nmi_unknown(unsigned int val, struct pt_regs *regs)
+{
+ static atomic_t nmi_cpu = ATOMIC_INIT(-1);
+
+ if (!unknown_nmi_panic)
+ return NMI_DONE;
+
+ if (atomic_cmpxchg(&nmi_cpu, -1, raw_smp_processor_id()) != -1)
+ return NMI_HANDLED;
+
+ return NMI_DONE;
+}
+#endif
+
static void __init ms_hyperv_init_platform(void)
{
/*
@@ -182,6 +203,9 @@
printk(KERN_INFO "HyperV: LAPIC Timer Frequency: %#x\n",
lapic_timer_frequency);
}
+
+ register_nmi_handler(NMI_UNKNOWN, hv_nmi_unknown, NMI_FLAG_FIRST,
+ "hv_nmi_unknown");
#endif
if (ms_hyperv.features & HV_X64_MSR_TIME_REF_COUNT_AVAILABLE)
diff --git a/arch/x86/kernel/cpu/perf_event_intel_lbr.c b/arch/x86/kernel/cpu/perf_event_intel_lbr.c
index 659f01e..2cdae69 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_lbr.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_lbr.c
@@ -153,7 +153,7 @@
*/
if (cpuc->lbr_sel)
lbr_select = cpuc->lbr_sel->config;
- if (!pmi)
+ if (!pmi && cpuc->lbr_sel)
wrmsrl(MSR_LBR_SELECT, lbr_select);
rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
@@ -410,6 +410,9 @@
cpuc->lbr_entries[i].to = msr_lastbranch.to;
cpuc->lbr_entries[i].mispred = 0;
cpuc->lbr_entries[i].predicted = 0;
+ cpuc->lbr_entries[i].in_tx = 0;
+ cpuc->lbr_entries[i].abort = 0;
+ cpuc->lbr_entries[i].cycles = 0;
cpuc->lbr_entries[i].reserved = 0;
}
cpuc->lbr_stack.nr = i;
@@ -429,8 +432,10 @@
int out = 0;
int num = x86_pmu.lbr_nr;
- if (cpuc->lbr_sel->config & LBR_CALL_STACK)
- num = tos;
+ if (cpuc->lbr_sel) {
+ if (cpuc->lbr_sel->config & LBR_CALL_STACK)
+ num = tos;
+ }
for (i = 0; i < num; i++) {
unsigned long lbr_idx = (tos - i) & mask;
diff --git a/arch/x86/kernel/fpu/init.c b/arch/x86/kernel/fpu/init.c
index be39b5f..1011c05b 100644
--- a/arch/x86/kernel/fpu/init.c
+++ b/arch/x86/kernel/fpu/init.c
@@ -96,6 +96,7 @@
* Boot time FPU feature detection code:
*/
unsigned int mxcsr_feature_mask __read_mostly = 0xffffffffu;
+EXPORT_SYMBOL_GPL(mxcsr_feature_mask);
static void __init fpu__init_system_mxcsr(void)
{
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index eb6bd34..1b96bfe 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -977,6 +977,18 @@
unsigned long return_hooker = (unsigned long)
&return_to_handler;
+ /*
+ * When resuming from suspend-to-ram, this function can be indirectly
+ * called from early CPU startup code while the CPU is in real mode,
+ * which would fail miserably. Make sure the stack pointer is a
+ * virtual address.
+ *
+ * This check isn't as accurate as virt_addr_valid(), but it should be
+ * good enough for this purpose, and it's fast.
+ */
+ if (unlikely((long)__builtin_frame_address(0) >= 0))
+ return;
+
if (unlikely(ftrace_graph_is_dead()))
return;
diff --git a/arch/x86/kernel/kprobes/common.h b/arch/x86/kernel/kprobes/common.h
index c6ee63f..d688826 100644
--- a/arch/x86/kernel/kprobes/common.h
+++ b/arch/x86/kernel/kprobes/common.h
@@ -67,7 +67,7 @@
#endif
/* Ensure if the instruction can be boostable */
-extern int can_boost(kprobe_opcode_t *instruction);
+extern int can_boost(kprobe_opcode_t *instruction, void *addr);
/* Recover instruction if given address is probed */
extern unsigned long recover_probed_instruction(kprobe_opcode_t *buf,
unsigned long addr);
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index 0eb0006..335141b 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -163,12 +163,12 @@
* Returns non-zero if opcode is boostable.
* RIP relative instructions are adjusted at copying time in 64 bits mode
*/
-int can_boost(kprobe_opcode_t *opcodes)
+int can_boost(kprobe_opcode_t *opcodes, void *addr)
{
kprobe_opcode_t opcode;
kprobe_opcode_t *orig_opcodes = opcodes;
- if (search_exception_tables((unsigned long)opcodes))
+ if (search_exception_tables((unsigned long)addr))
return 0; /* Page fault may occur on this address. */
retry:
@@ -413,7 +413,7 @@
* __copy_instruction can modify the displacement of the instruction,
* but it doesn't affect boostable check.
*/
- if (can_boost(p->ainsn.insn))
+ if (can_boost(p->ainsn.insn, p->addr))
p->ainsn.boostable = 0;
else
p->ainsn.boostable = -1;
diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
index 7b3b9d1..c9d488f 100644
--- a/arch/x86/kernel/kprobes/opt.c
+++ b/arch/x86/kernel/kprobes/opt.c
@@ -177,7 +177,7 @@
while (len < RELATIVEJUMP_SIZE) {
ret = __copy_instruction(dest + len, src + len);
- if (!ret || !can_boost(dest + len))
+ if (!ret || !can_boost(dest + len, src + len))
return -EINVAL;
len += ret;
}
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index 47190bd..32187f8 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -151,6 +151,8 @@
if (hlist_unhashed(&n.link))
break;
+ rcu_irq_exit();
+
if (!n.halted) {
local_irq_enable();
schedule();
@@ -159,11 +161,11 @@
/*
* We cannot reschedule. So halt.
*/
- rcu_irq_exit();
native_safe_halt();
- rcu_irq_enter();
local_irq_disable();
}
+
+ rcu_irq_enter();
}
if (!n.halted)
finish_wait(&n.wq, &wait);
diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
index 0497f71..c055e9a 100644
--- a/arch/x86/kernel/pci-calgary_64.c
+++ b/arch/x86/kernel/pci-calgary_64.c
@@ -296,7 +296,7 @@
/* were we called with bad_dma_address? */
badend = DMA_ERROR_CODE + (EMERGENCY_PAGES * PAGE_SIZE);
- if (unlikely((dma_addr >= DMA_ERROR_CODE) && (dma_addr < badend))) {
+ if (unlikely(dma_addr < badend)) {
WARN(1, KERN_ERR "Calgary: driver tried unmapping bad DMA "
"address 0x%Lx\n", dma_addr);
return;
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index d2bbe34..e67b834 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -1048,6 +1048,13 @@
if (mtrr_trim_uncached_memory(max_pfn))
max_pfn = e820_end_of_ram_pfn();
+ /*
+ * This call is required when the CPU does not support PAT. If
+ * mtrr_bp_init() invoked it already via pat_init() the call has no
+ * effect.
+ */
+ init_cache_modes();
+
#ifdef CONFIG_X86_32
/* max_low_pfn get updated here */
find_low_pfn_range();
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index 2e1fd58..83d6369 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -46,11 +46,18 @@
return ret;
}
+bool kvm_mpx_supported(void)
+{
+ return ((host_xcr0 & (XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR))
+ && kvm_x86_ops->mpx_supported());
+}
+EXPORT_SYMBOL_GPL(kvm_mpx_supported);
+
u64 kvm_supported_xcr0(void)
{
u64 xcr0 = KVM_SUPPORTED_XCR0 & host_xcr0;
- if (!kvm_x86_ops->mpx_supported())
+ if (!kvm_mpx_supported())
xcr0 &= ~(XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR);
return xcr0;
@@ -97,7 +104,7 @@
if (best && (best->eax & (F(XSAVES) | F(XSAVEC))))
best->ebx = xstate_required_size(vcpu->arch.xcr0, true);
- vcpu->arch.eager_fpu = use_eager_fpu() || guest_cpuid_has_mpx(vcpu);
+ vcpu->arch.eager_fpu = use_eager_fpu();
if (vcpu->arch.eager_fpu)
kvm_x86_ops->fpu_activate(vcpu);
@@ -295,7 +302,7 @@
#endif
unsigned f_rdtscp = kvm_x86_ops->rdtscp_supported() ? F(RDTSCP) : 0;
unsigned f_invpcid = kvm_x86_ops->invpcid_supported() ? F(INVPCID) : 0;
- unsigned f_mpx = kvm_x86_ops->mpx_supported() ? F(MPX) : 0;
+ unsigned f_mpx = kvm_mpx_supported() ? F(MPX) : 0;
unsigned f_xsaves = kvm_x86_ops->xsaves_supported() ? F(XSAVES) : 0;
/* cpuid 1.edx */
@@ -737,18 +744,20 @@
static int move_to_next_stateful_cpuid_entry(struct kvm_vcpu *vcpu, int i)
{
struct kvm_cpuid_entry2 *e = &vcpu->arch.cpuid_entries[i];
- int j, nent = vcpu->arch.cpuid_nent;
+ struct kvm_cpuid_entry2 *ej;
+ int j = i;
+ int nent = vcpu->arch.cpuid_nent;
e->flags &= ~KVM_CPUID_FLAG_STATE_READ_NEXT;
/* when no next entry is found, the current entry[i] is reselected */
- for (j = i + 1; ; j = (j + 1) % nent) {
- struct kvm_cpuid_entry2 *ej = &vcpu->arch.cpuid_entries[j];
- if (ej->function == e->function) {
- ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
- return j;
- }
- }
- return 0; /* silence gcc, even though control never reaches here */
+ do {
+ j = (j + 1) % nent;
+ ej = &vcpu->arch.cpuid_entries[j];
+ } while (ej->function != e->function);
+
+ ej->flags |= KVM_CPUID_FLAG_STATE_READ_NEXT;
+
+ return j;
}
/* find an entry with matching function, matching index (if needed), and that
@@ -818,12 +827,6 @@
if (!best)
best = check_cpuid_limit(vcpu, function, index);
- /*
- * Perfmon not yet supported for L2 guest.
- */
- if (is_guest_mode(vcpu) && function == 0xa)
- best = NULL;
-
if (best) {
*eax = best->eax;
*ebx = best->ebx;
diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h
index 3f5c48d..d1534fe 100644
--- a/arch/x86/kvm/cpuid.h
+++ b/arch/x86/kvm/cpuid.h
@@ -4,6 +4,7 @@
#include "x86.h"
int kvm_update_cpuid(struct kvm_vcpu *vcpu);
+bool kvm_mpx_supported(void);
struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
u32 function, u32 index);
int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid,
@@ -134,14 +135,6 @@
return best && (best->ebx & bit(X86_FEATURE_RTM));
}
-static inline bool guest_cpuid_has_mpx(struct kvm_vcpu *vcpu)
-{
- struct kvm_cpuid_entry2 *best;
-
- best = kvm_find_cpuid_entry(vcpu, 7, 0);
- return best && (best->ebx & bit(X86_FEATURE_MPX));
-}
-
static inline bool guest_cpuid_has_pcommit(struct kvm_vcpu *vcpu)
{
struct kvm_cpuid_entry2 *best;
@@ -150,6 +143,14 @@
return best && (best->ebx & bit(X86_FEATURE_PCOMMIT));
}
+static inline bool guest_cpuid_has_mpx(struct kvm_vcpu *vcpu)
+{
+ struct kvm_cpuid_entry2 *best;
+
+ best = kvm_find_cpuid_entry(vcpu, 7, 0);
+ return best && (best->ebx & bit(X86_FEATURE_MPX));
+}
+
static inline bool guest_cpuid_has_rdtscp(struct kvm_vcpu *vcpu)
{
struct kvm_cpuid_entry2 *best;
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 1dcea22..04b2f3c 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -2531,7 +2531,7 @@
u64 smbase;
int ret;
- if ((ctxt->emul_flags & X86EMUL_SMM_MASK) == 0)
+ if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_MASK) == 0)
return emulate_ud(ctxt);
/*
@@ -2580,11 +2580,11 @@
return X86EMUL_UNHANDLEABLE;
}
- if ((ctxt->emul_flags & X86EMUL_SMM_INSIDE_NMI_MASK) == 0)
+ if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_INSIDE_NMI_MASK) == 0)
ctxt->ops->set_nmi_mask(ctxt, false);
- ctxt->emul_flags &= ~X86EMUL_SMM_INSIDE_NMI_MASK;
- ctxt->emul_flags &= ~X86EMUL_SMM_MASK;
+ ctxt->ops->set_hflags(ctxt, ctxt->ops->get_hflags(ctxt) &
+ ~(X86EMUL_SMM_INSIDE_NMI_MASK | X86EMUL_SMM_MASK));
return X86EMUL_CONTINUE;
}
@@ -5296,6 +5296,7 @@
const struct x86_emulate_ops *ops = ctxt->ops;
int rc = X86EMUL_CONTINUE;
int saved_dst_type = ctxt->dst.type;
+ unsigned emul_flags;
ctxt->mem_read.pos = 0;
@@ -5310,6 +5311,7 @@
goto done;
}
+ emul_flags = ctxt->ops->get_hflags(ctxt);
if (unlikely(ctxt->d &
(No64|Undefined|Sse|Mmx|Intercept|CheckPerm|Priv|Prot|String))) {
if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) ||
@@ -5343,7 +5345,7 @@
fetch_possible_mmx_operand(ctxt, &ctxt->dst);
}
- if (unlikely(ctxt->emul_flags & X86EMUL_GUEST_MASK) && ctxt->intercept) {
+ if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && ctxt->intercept) {
rc = emulator_check_intercept(ctxt, ctxt->intercept,
X86_ICPT_PRE_EXCEPT);
if (rc != X86EMUL_CONTINUE)
@@ -5372,7 +5374,7 @@
goto done;
}
- if (unlikely(ctxt->emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
+ if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
rc = emulator_check_intercept(ctxt, ctxt->intercept,
X86_ICPT_POST_EXCEPT);
if (rc != X86EMUL_CONTINUE)
@@ -5426,7 +5428,7 @@
special_insn:
- if (unlikely(ctxt->emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
+ if (unlikely(emul_flags & X86EMUL_GUEST_MASK) && (ctxt->d & Intercept)) {
rc = emulator_check_intercept(ctxt, ctxt->intercept,
X86_ICPT_POST_MEMACCESS);
if (rc != X86EMUL_CONTINUE)
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 8eb8a93..1049c3c 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -3433,12 +3433,15 @@
return kvm_setup_async_pf(vcpu, gva, kvm_vcpu_gfn_to_hva(vcpu, gfn), &arch);
}
-static bool can_do_async_pf(struct kvm_vcpu *vcpu)
+bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu)
{
if (unlikely(!lapic_in_kernel(vcpu) ||
kvm_event_needs_reinjection(vcpu)))
return false;
+ if (is_guest_mode(vcpu))
+ return false;
+
return kvm_x86_ops->interrupt_allowed(vcpu);
}
@@ -3454,7 +3457,7 @@
if (!async)
return false; /* *pfn has correct page already */
- if (!prefault && can_do_async_pf(vcpu)) {
+ if (!prefault && kvm_can_do_async_pf(vcpu)) {
trace_kvm_try_async_get_page(gva, gfn);
if (kvm_find_async_pf_gfn(vcpu, gfn)) {
trace_kvm_async_pf_doublefault(gva, gfn);
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index 55ffb7b..e60fc80 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -74,6 +74,7 @@
int handle_mmio_page_fault(struct kvm_vcpu *vcpu, u64 addr, bool direct);
void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu);
void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly);
+bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu);
static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm)
{
diff --git a/arch/x86/kvm/pmu_intel.c b/arch/x86/kvm/pmu_intel.c
index ab38af4..23a7c7b 100644
--- a/arch/x86/kvm/pmu_intel.c
+++ b/arch/x86/kvm/pmu_intel.c
@@ -294,7 +294,7 @@
((u64)1 << edx.split.bit_width_fixed) - 1;
}
- pmu->global_ctrl = ((1 << pmu->nr_arch_gp_counters) - 1) |
+ pmu->global_ctrl = ((1ull << pmu->nr_arch_gp_counters) - 1) |
(((1ull << pmu->nr_arch_fixed_counters) - 1) << INTEL_PMC_IDX_FIXED);
pmu->global_ctrl_mask = ~pmu->global_ctrl;
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 3a7ae80d..b123911 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -863,7 +863,6 @@
static u64 construct_eptp(unsigned long root_hpa);
static void kvm_cpu_vmxon(u64 addr);
static void kvm_cpu_vmxoff(void);
-static bool vmx_mpx_supported(void);
static bool vmx_xsaves_supported(void);
static int vmx_cpu_uses_apicv(struct kvm_vcpu *vcpu);
static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr);
@@ -2264,7 +2263,7 @@
if (!(vmcs12->exception_bitmap & (1u << nr)))
return 0;
- nested_vmx_vmexit(vcpu, to_vmx(vcpu)->exit_reason,
+ nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI,
vmcs_read32(VM_EXIT_INTR_INFO),
vmcs_readl(EXIT_QUALIFICATION));
return 1;
@@ -2541,7 +2540,7 @@
VM_EXIT_LOAD_IA32_EFER | VM_EXIT_SAVE_IA32_EFER |
VM_EXIT_SAVE_VMX_PREEMPTION_TIMER | VM_EXIT_ACK_INTR_ON_EXIT;
- if (vmx_mpx_supported())
+ if (kvm_mpx_supported())
vmx->nested.nested_vmx_exit_ctls_high |= VM_EXIT_CLEAR_BNDCFGS;
/* We support free control of debug control saving. */
@@ -2562,7 +2561,7 @@
VM_ENTRY_LOAD_IA32_PAT;
vmx->nested.nested_vmx_entry_ctls_high |=
(VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR | VM_ENTRY_LOAD_IA32_EFER);
- if (vmx_mpx_supported())
+ if (kvm_mpx_supported())
vmx->nested.nested_vmx_entry_ctls_high |= VM_ENTRY_LOAD_BNDCFGS;
/* We support free control of debug control loading. */
@@ -2813,7 +2812,8 @@
msr_info->data = vmcs_readl(GUEST_SYSENTER_ESP);
break;
case MSR_IA32_BNDCFGS:
- if (!vmx_mpx_supported())
+ if (!kvm_mpx_supported() ||
+ (!msr_info->host_initiated && !guest_cpuid_has_mpx(vcpu)))
return 1;
msr_info->data = vmcs_read64(GUEST_BNDCFGS);
break;
@@ -2890,7 +2890,11 @@
vmcs_writel(GUEST_SYSENTER_ESP, data);
break;
case MSR_IA32_BNDCFGS:
- if (!vmx_mpx_supported())
+ if (!kvm_mpx_supported() ||
+ (!msr_info->host_initiated && !guest_cpuid_has_mpx(vcpu)))
+ return 1;
+ if (is_noncanonical_address(data & PAGE_MASK) ||
+ (data & MSR_IA32_BNDCFGS_RSVD))
return 1;
vmcs_write64(GUEST_BNDCFGS, data);
break;
@@ -3363,7 +3367,7 @@
for (i = j = 0; i < max_shadow_read_write_fields; i++) {
switch (shadow_read_write_fields[i]) {
case GUEST_BNDCFGS:
- if (!vmx_mpx_supported())
+ if (!kvm_mpx_supported())
continue;
break;
default:
@@ -6253,7 +6257,6 @@
vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_CS, false);
vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_ESP, false);
vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_EIP, false);
- vmx_disable_intercept_for_msr(MSR_IA32_BNDCFGS, true);
memcpy(vmx_msr_bitmap_legacy_x2apic,
vmx_msr_bitmap_legacy, PAGE_SIZE);
@@ -6678,14 +6681,20 @@
}
page = nested_get_page(vcpu, vmptr);
- if (page == NULL ||
- *(u32 *)kmap(page) != VMCS12_REVISION) {
+ if (page == NULL) {
nested_vmx_failInvalid(vcpu);
+ skip_emulated_instruction(vcpu);
+ return 1;
+ }
+ if (*(u32 *)kmap(page) != VMCS12_REVISION) {
kunmap(page);
+ nested_release_page_clean(page);
+ nested_vmx_failInvalid(vcpu);
skip_emulated_instruction(vcpu);
return 1;
}
kunmap(page);
+ nested_release_page_clean(page);
vmx->nested.vmxon_ptr = vmptr;
break;
case EXIT_REASON_VMCLEAR:
@@ -7748,8 +7757,6 @@
case EXIT_REASON_TASK_SWITCH:
return true;
case EXIT_REASON_CPUID:
- if (kvm_register_read(vcpu, VCPU_REGS_RAX) == 0xa)
- return false;
return true;
case EXIT_REASON_HLT:
return nested_cpu_has(vmcs12, CPU_BASED_HLT_EXITING);
@@ -7834,6 +7841,9 @@
return nested_cpu_has2(vmcs12, SECONDARY_EXEC_XSAVES);
case EXIT_REASON_PCOMMIT:
return nested_cpu_has2(vmcs12, SECONDARY_EXEC_PCOMMIT);
+ case EXIT_REASON_PML_FULL:
+ /* We don't expose PML support to L1. */
+ return false;
default:
return true;
}
@@ -9753,6 +9763,18 @@
}
+ if (enable_pml) {
+ /*
+ * Conceptually we want to copy the PML address and index from
+ * vmcs01 here, and then back to vmcs01 on nested vmexit. But,
+ * since we always flush the log on each vmexit, this happens
+ * to be equivalent to simply resetting the fields in vmcs02.
+ */
+ ASSERT(vmx->pml_pg);
+ vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg));
+ vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1);
+ }
+
if (nested_cpu_has_ept(vmcs12)) {
kvm_mmu_unload(vcpu);
nested_ept_init_mmu_context(vcpu);
@@ -10246,7 +10268,7 @@
vmcs12->guest_sysenter_cs = vmcs_read32(GUEST_SYSENTER_CS);
vmcs12->guest_sysenter_esp = vmcs_readl(GUEST_SYSENTER_ESP);
vmcs12->guest_sysenter_eip = vmcs_readl(GUEST_SYSENTER_EIP);
- if (vmx_mpx_supported())
+ if (kvm_mpx_supported())
vmcs12->guest_bndcfgs = vmcs_read64(GUEST_BNDCFGS);
if (nested_cpu_has_xsaves(vmcs12))
vmcs12->xss_exit_bitmap = vmcs_read64(XSS_EXIT_BITMAP);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index e75095f..8e526c6 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2960,6 +2960,12 @@
| KVM_VCPUEVENT_VALID_SMM))
return -EINVAL;
+ /* INITs are latched while in SMM */
+ if (events->flags & KVM_VCPUEVENT_VALID_SMM &&
+ (events->smi.smm || events->smi.pending) &&
+ vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED)
+ return -EINVAL;
+
process_nmi(vcpu);
vcpu->arch.exception.pending = events->exception.injected;
vcpu->arch.exception.nr = events->exception.nr;
@@ -3134,11 +3140,14 @@
}
}
+#define XSAVE_MXCSR_OFFSET 24
+
static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu,
struct kvm_xsave *guest_xsave)
{
u64 xstate_bv =
*(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)];
+ u32 mxcsr = *(u32 *)&guest_xsave->region[XSAVE_MXCSR_OFFSET / sizeof(u32)];
if (cpu_has_xsave) {
/*
@@ -3146,11 +3155,13 @@
* CPUID leaf 0xD, index 0, EDX:EAX. This is for compatibility
* with old userspace.
*/
- if (xstate_bv & ~kvm_supported_xcr0())
+ if (xstate_bv & ~kvm_supported_xcr0() ||
+ mxcsr & ~mxcsr_feature_mask)
return -EINVAL;
load_xsave(vcpu, (u8 *)guest_xsave->region);
} else {
- if (xstate_bv & ~XFEATURE_MASK_FPSSE)
+ if (xstate_bv & ~XFEATURE_MASK_FPSSE ||
+ mxcsr & ~mxcsr_feature_mask)
return -EINVAL;
memcpy(&vcpu->arch.guest_fpu.state.fxsave,
guest_xsave->region, sizeof(struct fxregs_state));
@@ -4597,16 +4608,20 @@
static int kernel_pio(struct kvm_vcpu *vcpu, void *pd)
{
- /* TODO: String I/O for in kernel device */
- int r;
+ int r = 0, i;
- if (vcpu->arch.pio.in)
- r = kvm_io_bus_read(vcpu, KVM_PIO_BUS, vcpu->arch.pio.port,
- vcpu->arch.pio.size, pd);
- else
- r = kvm_io_bus_write(vcpu, KVM_PIO_BUS,
- vcpu->arch.pio.port, vcpu->arch.pio.size,
- pd);
+ for (i = 0; i < vcpu->arch.pio.count; i++) {
+ if (vcpu->arch.pio.in)
+ r = kvm_io_bus_read(vcpu, KVM_PIO_BUS, vcpu->arch.pio.port,
+ vcpu->arch.pio.size, pd);
+ else
+ r = kvm_io_bus_write(vcpu, KVM_PIO_BUS,
+ vcpu->arch.pio.port, vcpu->arch.pio.size,
+ pd);
+ if (r)
+ break;
+ pd += vcpu->arch.pio.size;
+ }
return r;
}
@@ -4644,6 +4659,8 @@
if (vcpu->arch.pio.count)
goto data_avail;
+ memset(vcpu->arch.pio_data, 0, size * count);
+
ret = emulator_pio_in_out(vcpu, size, port, val, count, true);
if (ret) {
data_avail:
@@ -4827,6 +4844,8 @@
if (var.unusable) {
memset(desc, 0, sizeof(*desc));
+ if (base3)
+ *base3 = 0;
return false;
}
@@ -4982,6 +5001,16 @@
kvm_x86_ops->set_nmi_mask(emul_to_vcpu(ctxt), masked);
}
+static unsigned emulator_get_hflags(struct x86_emulate_ctxt *ctxt)
+{
+ return emul_to_vcpu(ctxt)->arch.hflags;
+}
+
+static void emulator_set_hflags(struct x86_emulate_ctxt *ctxt, unsigned emul_flags)
+{
+ kvm_set_hflags(emul_to_vcpu(ctxt), emul_flags);
+}
+
static const struct x86_emulate_ops emulate_ops = {
.read_gpr = emulator_read_gpr,
.write_gpr = emulator_write_gpr,
@@ -5021,6 +5050,8 @@
.intercept = emulator_intercept,
.get_cpuid = emulator_get_cpuid,
.set_nmi_mask = emulator_set_nmi_mask,
+ .get_hflags = emulator_get_hflags,
+ .set_hflags = emulator_set_hflags,
};
static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask)
@@ -5073,7 +5104,6 @@
BUILD_BUG_ON(HF_GUEST_MASK != X86EMUL_GUEST_MASK);
BUILD_BUG_ON(HF_SMM_MASK != X86EMUL_SMM_MASK);
BUILD_BUG_ON(HF_SMM_INSIDE_NMI_MASK != X86EMUL_SMM_INSIDE_NMI_MASK);
- ctxt->emul_flags = vcpu->arch.hflags;
init_decode_cache(ctxt);
vcpu->arch.emulate_regs_need_sync_from_vcpu = false;
@@ -5469,8 +5499,6 @@
unsigned long rflags = kvm_x86_ops->get_rflags(vcpu);
toggle_interruptibility(vcpu, ctxt->interruptibility);
vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
- if (vcpu->arch.hflags != ctxt->emul_flags)
- kvm_set_hflags(vcpu, ctxt->emul_flags);
kvm_rip_write(vcpu, ctxt->eip);
if (r == EMULATE_DONE)
kvm_vcpu_check_singlestep(vcpu, rflags, &r);
@@ -5957,7 +5985,8 @@
kvm_x86_ops->patch_hypercall(vcpu, instruction);
- return emulator_write_emulated(ctxt, rip, instruction, 3, NULL);
+ return emulator_write_emulated(ctxt, rip, instruction, 3,
+ &ctxt->exception);
}
static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu)
@@ -6993,6 +7022,12 @@
mp_state->mp_state != KVM_MP_STATE_RUNNABLE)
return -EINVAL;
+ /* INITs are latched while in SMM */
+ if ((is_smm(vcpu) || vcpu->arch.smi_pending) &&
+ (mp_state->mp_state == KVM_MP_STATE_SIPI_RECEIVED ||
+ mp_state->mp_state == KVM_MP_STATE_INIT_RECEIVED))
+ return -EINVAL;
+
if (mp_state->mp_state == KVM_MP_STATE_SIPI_RECEIVED) {
vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED;
set_bit(KVM_APIC_SIPI, &vcpu->arch.apic->pending_events);
@@ -8222,8 +8257,7 @@
if (!(vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED))
return true;
else
- return !kvm_event_needs_reinjection(vcpu) &&
- kvm_x86_ops->interrupt_allowed(vcpu);
+ return kvm_can_do_async_pf(vcpu);
}
void kvm_arch_start_assignment(struct kvm *kvm)
diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
index 27f89c7..423644c 100644
--- a/arch/x86/lib/copy_user_64.S
+++ b/arch/x86/lib/copy_user_64.S
@@ -80,7 +80,7 @@
movl %edx,%ecx
andl $63,%edx
shrl $6,%ecx
- jz 17f
+ jz .L_copy_short_string
1: movq (%rsi),%r8
2: movq 1*8(%rsi),%r9
3: movq 2*8(%rsi),%r10
@@ -101,7 +101,8 @@
leaq 64(%rdi),%rdi
decl %ecx
jnz 1b
-17: movl %edx,%ecx
+.L_copy_short_string:
+ movl %edx,%ecx
andl $7,%edx
shrl $3,%ecx
jz 20f
@@ -215,6 +216,8 @@
*/
ENTRY(copy_user_enhanced_fast_string)
ASM_STAC
+ cmpl $64,%edx
+ jb .L_copy_short_string /* less then 64 bytes, avoid the costly 'rep' */
movl %edx,%ecx
1: rep
movsb
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index 493f541..3aebbd6 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -628,21 +628,40 @@
* devmem_is_allowed() checks to see if /dev/mem access to a certain address
* is valid. The argument is a physical page number.
*
- *
- * On x86, access has to be given to the first megabyte of ram because that area
- * contains BIOS code and data regions used by X and dosemu and similar apps.
- * Access has to be given to non-kernel-ram areas as well, these contain the PCI
- * mmio resources as well as potential bios/acpi data regions.
+ * On x86, access has to be given to the first megabyte of RAM because that
+ * area traditionally contains BIOS code and data regions used by X, dosemu,
+ * and similar apps. Since they map the entire memory range, the whole range
+ * must be allowed (for mapping), but any areas that would otherwise be
+ * disallowed are flagged as being "zero filled" instead of rejected.
+ * Access has to be given to non-kernel-ram areas as well, these contain the
+ * PCI mmio resources as well as potential bios/acpi data regions.
*/
int devmem_is_allowed(unsigned long pagenr)
{
- if (pagenr < 256)
- return 1;
- if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
+ if (page_is_ram(pagenr)) {
+ /*
+ * For disallowed memory regions in the low 1MB range,
+ * request that the page be shown as all zeros.
+ */
+ if (pagenr < 256)
+ return 2;
+
return 0;
- if (!page_is_ram(pagenr))
- return 1;
- return 0;
+ }
+
+ /*
+ * This must follow RAM test, since System RAM is considered a
+ * restricted resource under CONFIG_STRICT_IOMEM.
+ */
+ if (iomem_is_exclusive(pagenr << PAGE_SHIFT)) {
+ /* Low 1MB bypasses iomem restrictions. */
+ if (pagenr < 256)
+ return 1;
+
+ return 0;
+ }
+
+ return 1;
}
void free_init_pages(char *what, unsigned long begin, unsigned long end)
diff --git a/arch/x86/mm/mpx.c b/arch/x86/mm/mpx.c
index ef05755..7ed47b1 100644
--- a/arch/x86/mm/mpx.c
+++ b/arch/x86/mm/mpx.c
@@ -293,7 +293,7 @@
* We were not able to extract an address from the instruction,
* probably because there was something invalid in it.
*/
- if (info->si_addr == (void *)-1) {
+ if (info->si_addr == (void __user *)-1) {
err = -EINVAL;
goto err_out;
}
@@ -525,15 +525,7 @@
if (!kernel_managing_mpx_tables(current->mm))
return -EINVAL;
- if (do_mpx_bt_fault()) {
- force_sig(SIGSEGV, current);
- /*
- * The force_sig() is essentially "handling" this
- * exception, so we do not pass up the error
- * from do_mpx_bt_fault().
- */
- }
- return 0;
+ return do_mpx_bt_fault();
}
/*
diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c
index 47b6436..3686a1d 100644
--- a/arch/x86/mm/numa_32.c
+++ b/arch/x86/mm/numa_32.c
@@ -100,5 +100,6 @@
printk(KERN_DEBUG "High memory starts at vaddr %08lx\n",
(ulong) pfn_to_kaddr(highstart_pfn));
+ __vmalloc_start_set = true;
setup_bootmem_allocator();
}
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index 6ad687d..3f1bb4f 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -36,14 +36,14 @@
#undef pr_fmt
#define pr_fmt(fmt) "" fmt
-static bool boot_cpu_done;
-
-static int __read_mostly __pat_enabled = IS_ENABLED(CONFIG_X86_PAT);
-static void init_cache_modes(void);
+static bool __read_mostly boot_cpu_done;
+static bool __read_mostly pat_disabled = !IS_ENABLED(CONFIG_X86_PAT);
+static bool __read_mostly pat_initialized;
+static bool __read_mostly init_cm_done;
void pat_disable(const char *reason)
{
- if (!__pat_enabled)
+ if (pat_disabled)
return;
if (boot_cpu_done) {
@@ -51,10 +51,8 @@
return;
}
- __pat_enabled = 0;
+ pat_disabled = true;
pr_info("x86/PAT: %s\n", reason);
-
- init_cache_modes();
}
static int __init nopat(char *str)
@@ -66,7 +64,7 @@
bool pat_enabled(void)
{
- return !!__pat_enabled;
+ return pat_initialized;
}
EXPORT_SYMBOL_GPL(pat_enabled);
@@ -204,6 +202,8 @@
update_cache_mode_entry(i, cache);
}
pr_info("x86/PAT: Configuration [0-7]: %s\n", pat_msg);
+
+ init_cm_done = true;
}
#define PAT(x, y) ((u64)PAT_ ## y << ((x)*8))
@@ -224,6 +224,7 @@
}
wrmsrl(MSR_IA32_CR_PAT, pat);
+ pat_initialized = true;
__init_cache_modes(pat);
}
@@ -241,10 +242,9 @@
wrmsrl(MSR_IA32_CR_PAT, pat);
}
-static void init_cache_modes(void)
+void init_cache_modes(void)
{
u64 pat = 0;
- static int init_cm_done;
if (init_cm_done)
return;
@@ -286,8 +286,6 @@
}
__init_cache_modes(pat);
-
- init_cm_done = 1;
}
/**
@@ -305,10 +303,8 @@
u64 pat;
struct cpuinfo_x86 *c = &boot_cpu_data;
- if (!pat_enabled()) {
- init_cache_modes();
+ if (pat_disabled)
return;
- }
if ((c->x86_vendor == X86_VENDOR_INTEL) &&
(((c->x86 == 0x6) && (c->x86_model <= 0xd)) ||
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 5fb6ada..5a760fd 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -134,8 +134,6 @@
{
struct flush_tlb_info info;
- if (end == 0)
- end = start + PAGE_SIZE;
info.flush_mm = mm;
info.flush_start = start;
info.flush_end = end;
@@ -264,7 +262,7 @@
}
if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
- flush_tlb_others(mm_cpumask(mm), mm, start, 0UL);
+ flush_tlb_others(mm_cpumask(mm), mm, start, start + PAGE_SIZE);
preempt_enable();
}
diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c
index c6d6efe..7575f07 100644
--- a/arch/x86/pci/xen.c
+++ b/arch/x86/pci/xen.c
@@ -231,23 +231,14 @@
return 1;
for_each_pci_msi_entry(msidesc, dev) {
- __pci_read_msi_msg(msidesc, &msg);
- pirq = MSI_ADDR_EXT_DEST_ID(msg.address_hi) |
- ((msg.address_lo >> MSI_ADDR_DEST_ID_SHIFT) & 0xff);
- if (msg.data != XEN_PIRQ_MSI_DATA ||
- xen_irq_from_pirq(pirq) < 0) {
- pirq = xen_allocate_pirq_msi(dev, msidesc);
- if (pirq < 0) {
- irq = -ENODEV;
- goto error;
- }
- xen_msi_compose_msg(dev, pirq, &msg);
- __pci_write_msi_msg(msidesc, &msg);
- dev_dbg(&dev->dev, "xen: msi bound to pirq=%d\n", pirq);
- } else {
- dev_dbg(&dev->dev,
- "xen: msi already bound to pirq=%d\n", pirq);
+ pirq = xen_allocate_pirq_msi(dev, msidesc);
+ if (pirq < 0) {
+ irq = -ENODEV;
+ goto error;
}
+ xen_msi_compose_msg(dev, pirq, &msg);
+ __pci_write_msi_msg(msidesc, &msg);
+ dev_dbg(&dev->dev, "xen: msi bound to pirq=%d\n", pirq);
irq = xen_bind_pirq_msi_to_irq(dev, msidesc, pirq,
(type == PCI_CAP_ID_MSI) ? nvec : 1,
(type == PCI_CAP_ID_MSIX) ?
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_wdt.c b/arch/x86/platform/intel-mid/device_libs/platform_wdt.c
index de73413..40c6164 100644
--- a/arch/x86/platform/intel-mid/device_libs/platform_wdt.c
+++ b/arch/x86/platform/intel-mid/device_libs/platform_wdt.c
@@ -17,7 +17,7 @@
#include <asm/intel-mid.h>
#include <asm/io_apic.h>
-#define TANGIER_EXT_TIMER0_MSI 15
+#define TANGIER_EXT_TIMER0_MSI 12
static struct platform_device wdt_dev = {
.name = "intel_mid_wdt",
diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
index 0c2fae8..73eb7fd 100644
--- a/arch/x86/tools/relocs.c
+++ b/arch/x86/tools/relocs.c
@@ -992,11 +992,12 @@
die("Segment relocations found but --realmode not specified\n");
/* Order the relocations for more efficient processing */
- sort_relocs(&relocs16);
sort_relocs(&relocs32);
#if ELF_BITS == 64
sort_relocs(&relocs32neg);
sort_relocs(&relocs64);
+#else
+ sort_relocs(&relocs16);
#endif
/* Print the relocations */
diff --git a/arch/x86/um/ptrace_64.c b/arch/x86/um/ptrace_64.c
index a629694..e14c43a 100644
--- a/arch/x86/um/ptrace_64.c
+++ b/arch/x86/um/ptrace_64.c
@@ -121,7 +121,7 @@
else if ((addr >= offsetof(struct user, u_debugreg[0])) &&
(addr <= offsetof(struct user, u_debugreg[7]))) {
addr -= offsetof(struct user, u_debugreg[0]);
- addr = addr >> 2;
+ addr = addr >> 3;
if ((addr == 4) || (addr == 5))
return -EIO;
child->thread.arch.debugregs[addr] = data;
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 1e56ff5..63146c3 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -2038,7 +2038,8 @@
/*
* Translate a virtual address to a physical one without relying on mapped
- * page tables.
+ * page tables. Don't rely on big pages being aligned in (guest) physical
+ * space!
*/
static phys_addr_t __init xen_early_virt_to_phys(unsigned long vaddr)
{
@@ -2059,7 +2060,7 @@
sizeof(pud)));
if (!pud_present(pud))
return 0;
- pa = pud_pfn(pud) << PAGE_SHIFT;
+ pa = pud_val(pud) & PTE_PFN_MASK;
if (pud_large(pud))
return pa + (vaddr & ~PUD_MASK);
@@ -2067,7 +2068,7 @@
sizeof(pmd)));
if (!pmd_present(pmd))
return 0;
- pa = pmd_pfn(pmd) << PAGE_SHIFT;
+ pa = pmd_val(pmd) & PTE_PFN_MASK;
if (pmd_large(pmd))
return pa + (vaddr & ~PMD_MASK);
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index e345891..df8844a 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -713,10 +713,9 @@
size = PFN_PHYS(xen_start_info->nr_p2m_frames);
}
- if (!xen_is_e820_reserved(start, size)) {
- memblock_reserve(start, size);
+ memblock_reserve(start, size);
+ if (!xen_is_e820_reserved(start, size))
return;
- }
#ifdef CONFIG_X86_32
/*
@@ -727,6 +726,7 @@
BUG();
#else
xen_relocate_p2m();
+ memblock_free(start, size);
#endif
}
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
index 9e2ba5c..f42e78d 100644
--- a/arch/x86/xen/spinlock.c
+++ b/arch/x86/xen/spinlock.c
@@ -27,6 +27,12 @@
static void xen_qlock_kick(int cpu)
{
+ int irq = per_cpu(lock_kicker_irq, cpu);
+
+ /* Don't kick if the target's kicker interrupt is not initialized. */
+ if (irq == -1)
+ return;
+
xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR);
}
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c
index f1ba6a0..8846257 100644
--- a/arch/x86/xen/time.c
+++ b/arch/x86/xen/time.c
@@ -343,11 +343,11 @@
WARN_ON(!clockevent_state_oneshot(evt));
single.timeout_abs_ns = get_abs_timeout(delta);
- single.flags = VCPU_SSHOTTMR_future;
+ /* Get an event anyway, even if the timeout is already expired */
+ single.flags = 0;
ret = HYPERVISOR_vcpu_op(VCPUOP_set_singleshot_timer, cpu, &single);
-
- BUG_ON(ret != 0 && ret != -ETIME);
+ BUG_ON(ret != 0);
return ret;
}
diff --git a/arch/xtensa/include/asm/irq.h b/arch/xtensa/include/asm/irq.h
index f71f88e..19707db 100644
--- a/arch/xtensa/include/asm/irq.h
+++ b/arch/xtensa/include/asm/irq.h
@@ -29,7 +29,8 @@
# define PLATFORM_NR_IRQS 0
#endif
#define XTENSA_NR_IRQS XCHAL_NUM_INTERRUPTS
-#define NR_IRQS (XTENSA_NR_IRQS + VARIANT_NR_IRQS + PLATFORM_NR_IRQS)
+#define NR_IRQS (XTENSA_NR_IRQS + VARIANT_NR_IRQS + PLATFORM_NR_IRQS + 1)
+#define XTENSA_PIC_LINUX_IRQ(hwirq) ((hwirq) + 1)
#if VARIANT_NR_IRQS == 0
static inline void variant_init_irq(void) { }
diff --git a/arch/xtensa/kernel/irq.c b/arch/xtensa/kernel/irq.c
index 4ac3d23..4416944 100644
--- a/arch/xtensa/kernel/irq.c
+++ b/arch/xtensa/kernel/irq.c
@@ -34,11 +34,6 @@
{
int irq = irq_find_mapping(NULL, hwirq);
- if (hwirq >= NR_IRQS) {
- printk(KERN_EMERG "%s: cannot handle IRQ %d\n",
- __func__, hwirq);
- }
-
#ifdef CONFIG_DEBUG_STACKOVERFLOW
/* Debugging check for stack overflow: is there less than 1KB free? */
{
diff --git a/arch/xtensa/platforms/xtfpga/include/platform/hardware.h b/arch/xtensa/platforms/xtfpga/include/platform/hardware.h
index dbeea2b..1fda7e2 100644
--- a/arch/xtensa/platforms/xtfpga/include/platform/hardware.h
+++ b/arch/xtensa/platforms/xtfpga/include/platform/hardware.h
@@ -24,16 +24,18 @@
/* Interrupt configuration. */
-#define PLATFORM_NR_IRQS 10
+#define PLATFORM_NR_IRQS 0
/* Default assignment of LX60 devices to external interrupts. */
#ifdef CONFIG_XTENSA_MX
#define DUART16552_INTNUM XCHAL_EXTINT3_NUM
#define OETH_IRQ XCHAL_EXTINT4_NUM
+#define C67X00_IRQ XCHAL_EXTINT8_NUM
#else
#define DUART16552_INTNUM XCHAL_EXTINT0_NUM
#define OETH_IRQ XCHAL_EXTINT1_NUM
+#define C67X00_IRQ XCHAL_EXTINT5_NUM
#endif
/*
@@ -63,5 +65,5 @@
#define C67X00_PADDR (XCHAL_KIO_PADDR + 0x0D0D0000)
#define C67X00_SIZE 0x10
-#define C67X00_IRQ 5
+
#endif /* __XTENSA_XTAVNET_HARDWARE_H */
diff --git a/arch/xtensa/platforms/xtfpga/setup.c b/arch/xtensa/platforms/xtfpga/setup.c
index e9f65f7..d1e9439 100644
--- a/arch/xtensa/platforms/xtfpga/setup.c
+++ b/arch/xtensa/platforms/xtfpga/setup.c
@@ -209,8 +209,8 @@
.flags = IORESOURCE_MEM,
},
[2] = { /* IRQ number */
- .start = OETH_IRQ,
- .end = OETH_IRQ,
+ .start = XTENSA_PIC_LINUX_IRQ(OETH_IRQ),
+ .end = XTENSA_PIC_LINUX_IRQ(OETH_IRQ),
.flags = IORESOURCE_IRQ,
},
};
@@ -246,8 +246,8 @@
.flags = IORESOURCE_MEM,
},
[1] = { /* IRQ number */
- .start = C67X00_IRQ,
- .end = C67X00_IRQ,
+ .start = XTENSA_PIC_LINUX_IRQ(C67X00_IRQ),
+ .end = XTENSA_PIC_LINUX_IRQ(C67X00_IRQ),
.flags = IORESOURCE_IRQ,
},
};
@@ -280,7 +280,7 @@
static struct plat_serial8250_port serial_platform_data[] = {
[0] = {
.mapbase = DUART16552_PADDR,
- .irq = DUART16552_INTNUM,
+ .irq = XTENSA_PIC_LINUX_IRQ(DUART16552_INTNUM),
.flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST |
UPF_IOREMAP,
.iotype = UPIO_MEM32,
diff --git a/block/bio.c b/block/bio.c
index d55eb83..ce974b1 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -375,10 +375,14 @@
bio_list_init(&punt);
bio_list_init(&nopunt);
- while ((bio = bio_list_pop(current->bio_list)))
+ while ((bio = bio_list_pop(¤t->bio_list[0])))
bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
+ current->bio_list[0] = nopunt;
- *current->bio_list = nopunt;
+ bio_list_init(&nopunt);
+ while ((bio = bio_list_pop(¤t->bio_list[1])))
+ bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
+ current->bio_list[1] = nopunt;
spin_lock(&bs->rescue_lock);
bio_list_merge(&bs->rescue_list, &punt);
@@ -466,7 +470,9 @@
* we retry with the original gfp_flags.
*/
- if (current->bio_list && !bio_list_empty(current->bio_list))
+ if (current->bio_list &&
+ (!bio_list_empty(¤t->bio_list[0]) ||
+ !bio_list_empty(¤t->bio_list[1])))
gfp_mask &= ~__GFP_DIRECT_RECLAIM;
p = mempool_alloc(bs->bio_pool, gfp_mask);
diff --git a/block/blk-core.c b/block/blk-core.c
index 500447b..153c761 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1269,7 +1269,11 @@
trace_block_sleeprq(q, bio, rw_flags & 1);
spin_unlock_irq(q->queue_lock);
- io_schedule();
+ /*
+ * FIXME: this should be io_schedule(). The timeout is there as a
+ * workaround for some io timeout problems.
+ */
+ io_schedule_timeout(5*HZ);
/*
* After sleeping, we become a "batching" process and will be able
@@ -2038,7 +2042,14 @@
*/
blk_qc_t generic_make_request(struct bio *bio)
{
- struct bio_list bio_list_on_stack;
+ /*
+ * bio_list_on_stack[0] contains bios submitted by the current
+ * make_request_fn.
+ * bio_list_on_stack[1] contains bios that were submitted before
+ * the current make_request_fn, but that haven't been processed
+ * yet.
+ */
+ struct bio_list bio_list_on_stack[2];
blk_qc_t ret = BLK_QC_T_NONE;
if (!generic_make_request_checks(bio))
@@ -2055,7 +2066,7 @@
* should be added at the tail
*/
if (current->bio_list) {
- bio_list_add(current->bio_list, bio);
+ bio_list_add(¤t->bio_list[0], bio);
goto out;
}
@@ -2074,24 +2085,39 @@
* bio_list, and call into ->make_request() again.
*/
BUG_ON(bio->bi_next);
- bio_list_init(&bio_list_on_stack);
- current->bio_list = &bio_list_on_stack;
+ bio_list_init(&bio_list_on_stack[0]);
+ current->bio_list = bio_list_on_stack;
do {
struct request_queue *q = bdev_get_queue(bio->bi_bdev);
if (likely(blk_queue_enter(q, __GFP_DIRECT_RECLAIM) == 0)) {
+ struct bio_list lower, same;
+
+ /* Create a fresh bio_list for all subordinate requests */
+ bio_list_on_stack[1] = bio_list_on_stack[0];
+ bio_list_init(&bio_list_on_stack[0]);
ret = q->make_request_fn(q, bio);
blk_queue_exit(q);
-
- bio = bio_list_pop(current->bio_list);
+ /* sort new bios into those for a lower level
+ * and those for the same level
+ */
+ bio_list_init(&lower);
+ bio_list_init(&same);
+ while ((bio = bio_list_pop(&bio_list_on_stack[0])) != NULL)
+ if (q == bdev_get_queue(bio->bi_bdev))
+ bio_list_add(&same, bio);
+ else
+ bio_list_add(&lower, bio);
+ /* now assemble so we handle the lowest level first */
+ bio_list_merge(&bio_list_on_stack[0], &lower);
+ bio_list_merge(&bio_list_on_stack[0], &same);
+ bio_list_merge(&bio_list_on_stack[0], &bio_list_on_stack[1]);
} else {
- struct bio *bio_next = bio_list_pop(current->bio_list);
-
bio_io_error(bio);
- bio = bio_next;
}
+ bio = bio_list_pop(&bio_list_on_stack[0]);
} while (bio);
current->bio_list = NULL; /* deactivate */
diff --git a/block/blk-integrity.c b/block/blk-integrity.c
index d69c5c7..478f572 100644
--- a/block/blk-integrity.c
+++ b/block/blk-integrity.c
@@ -412,12 +412,13 @@
bi->flags = BLK_INTEGRITY_VERIFY | BLK_INTEGRITY_GENERATE |
template->flags;
- bi->interval_exp = ilog2(queue_logical_block_size(disk->queue));
+ bi->interval_exp = template->interval_exp ? :
+ ilog2(queue_logical_block_size(disk->queue));
bi->profile = template->profile ? template->profile : &nop_profile;
bi->tuple_size = template->tuple_size;
bi->tag_size = template->tag_size;
- blk_integrity_revalidate(disk);
+ disk->queue->backing_dev_info.capabilities |= BDI_CAP_STABLE_WRITES;
}
EXPORT_SYMBOL(blk_integrity_register);
@@ -430,26 +431,11 @@
*/
void blk_integrity_unregister(struct gendisk *disk)
{
- blk_integrity_revalidate(disk);
+ disk->queue->backing_dev_info.capabilities &= ~BDI_CAP_STABLE_WRITES;
memset(&disk->queue->integrity, 0, sizeof(struct blk_integrity));
}
EXPORT_SYMBOL(blk_integrity_unregister);
-void blk_integrity_revalidate(struct gendisk *disk)
-{
- struct blk_integrity *bi = &disk->queue->integrity;
-
- if (!(disk->flags & GENHD_FL_UP))
- return;
-
- if (bi->profile)
- disk->queue->backing_dev_info.capabilities |=
- BDI_CAP_STABLE_WRITES;
- else
- disk->queue->backing_dev_info.capabilities &=
- ~BDI_CAP_STABLE_WRITES;
-}
-
void blk_integrity_add(struct gendisk *disk)
{
if (kobject_init_and_add(&disk->integrity_kobj, &integrity_ktype,
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 8bd5483..1452db0 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1470,7 +1470,7 @@
INIT_LIST_HEAD(&tags->page_list);
tags->rqs = kzalloc_node(set->queue_depth * sizeof(struct request *),
- GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY,
+ GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY,
set->numa_node);
if (!tags->rqs) {
blk_mq_free_tags(tags);
@@ -1496,7 +1496,7 @@
do {
page = alloc_pages_node(set->numa_node,
- GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO,
+ GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO,
this_order);
if (page)
break;
@@ -1517,7 +1517,7 @@
* Allow kmemleak to scan these pages as they contain pointers
* to additional allocations like via ops->init_request().
*/
- kmemleak_alloc(p, order_to_size(this_order), 1, GFP_KERNEL);
+ kmemleak_alloc(p, order_to_size(this_order), 1, GFP_NOIO);
entries_per_page = order_to_size(this_order) / rq_size;
to_do = min(entries_per_page, set->queue_depth - i);
left -= to_do * rq_size;
diff --git a/block/genhd.c b/block/genhd.c
index 7f1e8f8..de2a616 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -673,7 +673,6 @@
kobject_put(disk->part0.holder_dir);
kobject_put(disk->slave_dir);
- disk->driverfs_dev = NULL;
if (!sysfs_deprecated)
sysfs_remove_link(block_depr, dev_name(disk_to_dev(disk)));
pm_runtime_set_memalloc_noio(disk_to_dev(disk), false);
diff --git a/block/partition-generic.c b/block/partition-generic.c
index 91327db..19cf33b 100644
--- a/block/partition-generic.c
+++ b/block/partition-generic.c
@@ -446,7 +446,6 @@
if (disk->fops->revalidate_disk)
disk->fops->revalidate_disk(disk);
- blk_integrity_revalidate(disk);
check_disk_size_change(disk, bdev);
bdev->bd_invalidated = 0;
if (!get_capacity(disk) || !(state = check_partition(disk, bdev)))
diff --git a/block/partitions/msdos.c b/block/partitions/msdos.c
index 93e7c1b..5610cd5 100644
--- a/block/partitions/msdos.c
+++ b/block/partitions/msdos.c
@@ -300,6 +300,8 @@
continue;
bsd_start = le32_to_cpu(p->p_offset);
bsd_size = le32_to_cpu(p->p_size);
+ if (memcmp(flavour, "bsd\0", 4) == 0)
+ bsd_start += offset;
if (offset == bsd_start && size == bsd_size)
/* full parent partition, we have it already */
continue;
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
index 0774799..c6fee74 100644
--- a/block/scsi_ioctl.c
+++ b/block/scsi_ioctl.c
@@ -182,6 +182,9 @@
__set_bit(WRITE_16, filter->write_ok);
__set_bit(WRITE_LONG, filter->write_ok);
__set_bit(WRITE_LONG_2, filter->write_ok);
+ __set_bit(WRITE_SAME, filter->write_ok);
+ __set_bit(WRITE_SAME_16, filter->write_ok);
+ __set_bit(WRITE_SAME_32, filter->write_ok);
__set_bit(ERASE, filter->write_ok);
__set_bit(GPCMD_MODE_SELECT_10, filter->write_ok);
__set_bit(MODE_SELECT, filter->write_ok);
diff --git a/build.config b/build.config
index 50a52c4..8554072 100644
--- a/build.config
+++ b/build.config
@@ -7,7 +7,7 @@
EXTRA_CMDS=''
KERNEL_DIR=private/msm-google
POST_DEFCONFIG_CMDS='check_defconfig'
-CLANG_PREBUILT_BIN=prebuilts-master/clang/host/linux-x86/clang-3859424/bin/
+CLANG_PREBUILT_BIN=prebuilts-master/clang/host/linux-x86/clang-4053586/bin/
LINUX_GCC_CROSS_COMPILE_PREBUILTS_BIN=prebuilts/gcc/linux-x86/aarch64/aarch64-linux-android-4.9/bin
LZ4_PREBUILTS_BIN=prebuilts-master/misc/linux-x86/lz4
DTC_PREBUILTS_BIN=prebuilts-master/misc/linux-x86/dtc
diff --git a/build.config.clang b/build.config.clang
deleted file mode 100644
index 50a52c4..0000000
--- a/build.config.clang
+++ /dev/null
@@ -1,21 +0,0 @@
-ARCH=arm64
-BRANCH=android-msm-wahoo-4.4
-CC=clang
-CLANG_TRIPLE=aarch64-linux-gnu-
-CROSS_COMPILE=aarch64-linux-android-
-DEFCONFIG=wahoo_defconfig
-EXTRA_CMDS=''
-KERNEL_DIR=private/msm-google
-POST_DEFCONFIG_CMDS='check_defconfig'
-CLANG_PREBUILT_BIN=prebuilts-master/clang/host/linux-x86/clang-3859424/bin/
-LINUX_GCC_CROSS_COMPILE_PREBUILTS_BIN=prebuilts/gcc/linux-x86/aarch64/aarch64-linux-android-4.9/bin
-LZ4_PREBUILTS_BIN=prebuilts-master/misc/linux-x86/lz4
-DTC_PREBUILTS_BIN=prebuilts-master/misc/linux-x86/dtc
-LIBUFDT_PREBUILTS_BIN=prebuilts-master/misc/linux-x86/libufdt
-FILES="
-arch/arm64/boot/dtbo.img
-arch/arm64/boot/Image.lz4-dtb
-vmlinux
-System.map
-"
-IN_KERNEL_MODULES=1
diff --git a/certs/esl_key.pem b/certs/esl_key.pem
new file mode 100644
index 0000000..ec1318b
--- /dev/null
+++ b/certs/esl_key.pem
@@ -0,0 +1,29 @@
+-----BEGIN CERTIFICATE-----
+MIIFADCCAuigAwIBAgIJAKL6ZJ3LUxTBMA0GCSqGSIb3DQEBBQUAMBAxDjAMBgNV
+BAMMBUVhc2VsMB4XDTE3MDQwNjIyMTExMloXDTE3MDUwNjIyMTExMlowEDEOMAwG
+A1UEAwwFRWFzZWwwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDUJuve
+l3PuMegBFYywMhkMmCuP1iTdp9isBpQ7OuNupmOhJCMDs0wBW7IG9lm7oxFQVLGX
+8GGkVCRQBqxWwPmWNIqbE7a7ZXkIaCcF2qrC8X+vYCe/wEw/WHrLAZxPLbt19yAK
++JNcnezWip06EupYIYkH3pjMQlhR+d+QpGs5zlOoGN2Il/nEsGk+GHO2nKCxBE79
+fCS/6cCy/GqHNo3ZykYJg6FzT9MyZzxDwm+Cg1ZH6Ok/AdbAeEy3v1ZQdgjNNqgz
+zb+IXitbAdZjT3D15+m3cy7s8Y3a+bGChjSjWCIkcHc419r9E6JfzJAFM1PITWVn
+6Z5fgLzO8RuIxkuzNalXJ6khvk72ibfvUffmUWuGs/jZh/YINUQ35ULOGBkrSuvX
+ZipbJryKZ61tm8w/sw0zKIHu/y8T3WNMtVF9raqKEjl5P4Wj0Jh39LZNAt2dyJVV
+g67HB0+Y+zdIn9n9Dy5WymFd8QR6tMF3ADb3gk/ztRbriudnidh9jZcLV8SWmePT
+UYwfCf+tWNE5RaASMrBIZy51vXD/HjiF23krmEM2xXk4nAlLbZ4rX52oQqs1vW1e
+Xf8NAi+g9+YpR7THI4iQoziLMl4m4bcSlOHDNRlO4b8LJje58/IuC7FOdFWc9VlK
+0Z+qlliF0eMMEpV/xJC5W7NEbW9yVnSe1wQQUwIDAQABo10wWzAMBgNVHRMBAf8E
+AjAAMAsGA1UdDwQEAwIHgDAdBgNVHQ4EFgQULZy4+2alImbLOwCz49szX635COQw
+HwYDVR0jBBgwFoAULZy4+2alImbLOwCz49szX635COQwDQYJKoZIhvcNAQEFBQAD
+ggIBAMDw/M/y18ywIupgD7CDOb/VZxzj54qeId5UkujHYNkT6vDliCg7CW+2oZWD
+aI6+BeEXieTrs/SHg7mxo3PhHzdvohGLe/kZnwdN6A7NSHKJjwcjv5jBCx395STo
++fXKV+wErn+blqSSSb/6gKchrxlKHr6PmD5QBBV9enHSIwsCnAG4KE1922X1JCSm
+ZGJUcPOocqr7clWYdWulQH75myIL+6bMQ7KSdn6evjFgLKYetqmroVcbUXEir3MP
+FNQM2AMFNcLIC3eBu5LNmRisjoK26ReA24ue7cYkxjEBBEQY/UOZwKm0aAYhBxgb
+mTsgRva0IvwactvvKMdcFokHKxHURcTqPJYTzxkKr5WQycJ+CQc1CbiwJygeJXHN
+w1xLUyHauSi10Uod17564UX7oYBHcwHLHICrwZWrPIqEG4uWcCHul0dabaZTQ3/2
+DIuW18dm+oToGbAKVXP757SN9GVJgmuhLMH5m/tIzO60qEIuFH75BgIIN8SzpY5x
+/aYCkxijTCPShbo0Y6d11PFj0TpeNncl6Xymedb1p03SiMejlE1gbM0k6Tp9daT8
+o/v+zm+yWWyxvr5hB+/udTmJHM7o+COu5jqDzWD++fNm487LC8BMa2F78XaLtWrT
+V0m8ghFKRjzF1w4LUvYNUFlxvNFUMUPc/06TJp1XnWNJvVPL
+-----END CERTIFICATE-----
diff --git a/crypto/ahash.c b/crypto/ahash.c
index dac1c24..f9caf0f 100644
--- a/crypto/ahash.c
+++ b/crypto/ahash.c
@@ -31,6 +31,7 @@
crypto_completion_t complete;
void *data;
u8 *result;
+ u32 flags;
void *ubuf[] CRYPTO_MINALIGN_ATTR;
};
@@ -270,6 +271,8 @@
priv->result = req->result;
priv->complete = req->base.complete;
priv->data = req->base.data;
+ priv->flags = req->base.flags;
+
/*
* WARNING: We do not backup req->priv here! The req->priv
* is for internal use of the Crypto API and the
@@ -284,38 +287,44 @@
return 0;
}
-static void ahash_restore_req(struct ahash_request *req)
+static void ahash_restore_req(struct ahash_request *req, int err)
{
struct ahash_request_priv *priv = req->priv;
+ if (!err)
+ memcpy(priv->result, req->result,
+ crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
+
/* Restore the original crypto request. */
req->result = priv->result;
- req->base.complete = priv->complete;
- req->base.data = priv->data;
+
+ ahash_request_set_callback(req, priv->flags,
+ priv->complete, priv->data);
req->priv = NULL;
/* Free the req->priv.priv from the ADJUSTED request. */
kzfree(priv);
}
-static void ahash_op_unaligned_finish(struct ahash_request *req, int err)
+static void ahash_notify_einprogress(struct ahash_request *req)
{
struct ahash_request_priv *priv = req->priv;
+ struct crypto_async_request oreq;
- if (err == -EINPROGRESS)
- return;
+ oreq.data = priv->data;
- if (!err)
- memcpy(priv->result, req->result,
- crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
-
- ahash_restore_req(req);
+ priv->complete(&oreq, -EINPROGRESS);
}
static void ahash_op_unaligned_done(struct crypto_async_request *req, int err)
{
struct ahash_request *areq = req->data;
+ if (err == -EINPROGRESS) {
+ ahash_notify_einprogress(areq);
+ return;
+ }
+
/*
* Restore the original request, see ahash_op_unaligned() for what
* goes where.
@@ -326,7 +335,7 @@
*/
/* First copy req->result into req->priv.result */
- ahash_op_unaligned_finish(areq, err);
+ ahash_restore_req(areq, err);
/* Complete the ORIGINAL request. */
areq->base.complete(&areq->base, err);
@@ -342,7 +351,12 @@
return err;
err = op(req);
- ahash_op_unaligned_finish(req, err);
+ if (err == -EINPROGRESS ||
+ (err == -EBUSY && (ahash_request_flags(req) &
+ CRYPTO_TFM_REQ_MAY_BACKLOG)))
+ return err;
+
+ ahash_restore_req(req, err);
return err;
}
@@ -377,25 +391,14 @@
}
EXPORT_SYMBOL_GPL(crypto_ahash_digest);
-static void ahash_def_finup_finish2(struct ahash_request *req, int err)
-{
- struct ahash_request_priv *priv = req->priv;
-
- if (err == -EINPROGRESS)
- return;
-
- if (!err)
- memcpy(priv->result, req->result,
- crypto_ahash_digestsize(crypto_ahash_reqtfm(req)));
-
- ahash_restore_req(req);
-}
-
static void ahash_def_finup_done2(struct crypto_async_request *req, int err)
{
struct ahash_request *areq = req->data;
- ahash_def_finup_finish2(areq, err);
+ if (err == -EINPROGRESS)
+ return;
+
+ ahash_restore_req(areq, err);
areq->base.complete(&areq->base, err);
}
@@ -406,11 +409,15 @@
goto out;
req->base.complete = ahash_def_finup_done2;
- req->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+
err = crypto_ahash_reqtfm(req)->final(req);
+ if (err == -EINPROGRESS ||
+ (err == -EBUSY && (ahash_request_flags(req) &
+ CRYPTO_TFM_REQ_MAY_BACKLOG)))
+ return err;
out:
- ahash_def_finup_finish2(req, err);
+ ahash_restore_req(req, err);
return err;
}
@@ -418,7 +425,16 @@
{
struct ahash_request *areq = req->data;
+ if (err == -EINPROGRESS) {
+ ahash_notify_einprogress(areq);
+ return;
+ }
+
+ areq->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+
err = ahash_def_finup_finish1(areq, err);
+ if (areq->priv)
+ return;
areq->base.complete(&areq->base, err);
}
@@ -433,6 +449,11 @@
return err;
err = tfm->update(req);
+ if (err == -EINPROGRESS ||
+ (err == -EBUSY && (ahash_request_flags(req) &
+ CRYPTO_TFM_REQ_MAY_BACKLOG)))
+ return err;
+
return ahash_def_finup_finish1(req, err);
}
diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c
index 6d4d456..faea9d72 100644
--- a/crypto/algif_aead.c
+++ b/crypto/algif_aead.c
@@ -29,6 +29,11 @@
struct scatterlist sg[ALG_MAX_PAGES];
};
+struct aead_tfm {
+ struct crypto_aead *aead;
+ bool has_key;
+};
+
struct aead_ctx {
struct aead_sg_list tsgl;
/*
@@ -513,24 +518,146 @@
.poll = aead_poll,
};
+static int aead_check_key(struct socket *sock)
+{
+ int err = 0;
+ struct sock *psk;
+ struct alg_sock *pask;
+ struct aead_tfm *tfm;
+ struct sock *sk = sock->sk;
+ struct alg_sock *ask = alg_sk(sk);
+
+ lock_sock(sk);
+ if (ask->refcnt)
+ goto unlock_child;
+
+ psk = ask->parent;
+ pask = alg_sk(ask->parent);
+ tfm = pask->private;
+
+ err = -ENOKEY;
+ lock_sock_nested(psk, SINGLE_DEPTH_NESTING);
+ if (!tfm->has_key)
+ goto unlock;
+
+ if (!pask->refcnt++)
+ sock_hold(psk);
+
+ ask->refcnt = 1;
+ sock_put(psk);
+
+ err = 0;
+
+unlock:
+ release_sock(psk);
+unlock_child:
+ release_sock(sk);
+
+ return err;
+}
+
+static int aead_sendmsg_nokey(struct socket *sock, struct msghdr *msg,
+ size_t size)
+{
+ int err;
+
+ err = aead_check_key(sock);
+ if (err)
+ return err;
+
+ return aead_sendmsg(sock, msg, size);
+}
+
+static ssize_t aead_sendpage_nokey(struct socket *sock, struct page *page,
+ int offset, size_t size, int flags)
+{
+ int err;
+
+ err = aead_check_key(sock);
+ if (err)
+ return err;
+
+ return aead_sendpage(sock, page, offset, size, flags);
+}
+
+static int aead_recvmsg_nokey(struct socket *sock, struct msghdr *msg,
+ size_t ignored, int flags)
+{
+ int err;
+
+ err = aead_check_key(sock);
+ if (err)
+ return err;
+
+ return aead_recvmsg(sock, msg, ignored, flags);
+}
+
+static struct proto_ops algif_aead_ops_nokey = {
+ .family = PF_ALG,
+
+ .connect = sock_no_connect,
+ .socketpair = sock_no_socketpair,
+ .getname = sock_no_getname,
+ .ioctl = sock_no_ioctl,
+ .listen = sock_no_listen,
+ .shutdown = sock_no_shutdown,
+ .getsockopt = sock_no_getsockopt,
+ .mmap = sock_no_mmap,
+ .bind = sock_no_bind,
+ .accept = sock_no_accept,
+ .setsockopt = sock_no_setsockopt,
+
+ .release = af_alg_release,
+ .sendmsg = aead_sendmsg_nokey,
+ .sendpage = aead_sendpage_nokey,
+ .recvmsg = aead_recvmsg_nokey,
+ .poll = aead_poll,
+};
+
static void *aead_bind(const char *name, u32 type, u32 mask)
{
- return crypto_alloc_aead(name, type, mask);
+ struct aead_tfm *tfm;
+ struct crypto_aead *aead;
+
+ tfm = kzalloc(sizeof(*tfm), GFP_KERNEL);
+ if (!tfm)
+ return ERR_PTR(-ENOMEM);
+
+ aead = crypto_alloc_aead(name, type, mask);
+ if (IS_ERR(aead)) {
+ kfree(tfm);
+ return ERR_CAST(aead);
+ }
+
+ tfm->aead = aead;
+
+ return tfm;
}
static void aead_release(void *private)
{
- crypto_free_aead(private);
+ struct aead_tfm *tfm = private;
+
+ crypto_free_aead(tfm->aead);
+ kfree(tfm);
}
static int aead_setauthsize(void *private, unsigned int authsize)
{
- return crypto_aead_setauthsize(private, authsize);
+ struct aead_tfm *tfm = private;
+
+ return crypto_aead_setauthsize(tfm->aead, authsize);
}
static int aead_setkey(void *private, const u8 *key, unsigned int keylen)
{
- return crypto_aead_setkey(private, key, keylen);
+ struct aead_tfm *tfm = private;
+ int err;
+
+ err = crypto_aead_setkey(tfm->aead, key, keylen);
+ tfm->has_key = !err;
+
+ return err;
}
static void aead_sock_destruct(struct sock *sk)
@@ -546,12 +673,14 @@
af_alg_release_parent(sk);
}
-static int aead_accept_parent(void *private, struct sock *sk)
+static int aead_accept_parent_nokey(void *private, struct sock *sk)
{
struct aead_ctx *ctx;
struct alg_sock *ask = alg_sk(sk);
- unsigned int len = sizeof(*ctx) + crypto_aead_reqsize(private);
- unsigned int ivlen = crypto_aead_ivsize(private);
+ struct aead_tfm *tfm = private;
+ struct crypto_aead *aead = tfm->aead;
+ unsigned int len = sizeof(*ctx) + crypto_aead_reqsize(aead);
+ unsigned int ivlen = crypto_aead_ivsize(aead);
ctx = sock_kmalloc(sk, len, GFP_KERNEL);
if (!ctx)
@@ -577,7 +706,7 @@
ask->private = ctx;
- aead_request_set_tfm(&ctx->aead_req, private);
+ aead_request_set_tfm(&ctx->aead_req, aead);
aead_request_set_callback(&ctx->aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
af_alg_complete, &ctx->completion);
@@ -586,13 +715,25 @@
return 0;
}
+static int aead_accept_parent(void *private, struct sock *sk)
+{
+ struct aead_tfm *tfm = private;
+
+ if (!tfm->has_key)
+ return -ENOKEY;
+
+ return aead_accept_parent_nokey(private, sk);
+}
+
static const struct af_alg_type algif_type_aead = {
.bind = aead_bind,
.release = aead_release,
.setkey = aead_setkey,
.setauthsize = aead_setauthsize,
.accept = aead_accept_parent,
+ .accept_nokey = aead_accept_parent_nokey,
.ops = &algif_aead_ops,
+ .ops_nokey = &algif_aead_ops_nokey,
.name = "aead",
.owner = THIS_MODULE
};
diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c
index 68a5cea..8d8b3ee 100644
--- a/crypto/algif_hash.c
+++ b/crypto/algif_hash.c
@@ -184,7 +184,7 @@
struct alg_sock *ask = alg_sk(sk);
struct hash_ctx *ctx = ask->private;
struct ahash_request *req = &ctx->req;
- char state[crypto_ahash_statesize(crypto_ahash_reqtfm(req))];
+ char state[crypto_ahash_statesize(crypto_ahash_reqtfm(req)) ? : 1];
struct sock *sk2;
struct alg_sock *ask2;
struct hash_ctx *ctx2;
diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
index f5e9f93..b3b0004 100644
--- a/crypto/algif_skcipher.c
+++ b/crypto/algif_skcipher.c
@@ -86,8 +86,13 @@
}
sgl = sreq->tsg;
n = sg_nents(sgl);
- for_each_sg(sgl, sg, n, i)
- put_page(sg_page(sg));
+ for_each_sg(sgl, sg, n, i) {
+ struct page *page = sg_page(sg);
+
+ /* some SGs may not have a page mapped */
+ if (page && atomic_read(&page->_count))
+ put_page(page);
+ }
kfree(sreq->tsg);
}
diff --git a/crypto/authencesn.c b/crypto/authencesn.c
index 0c04688..52154ef 100644
--- a/crypto/authencesn.c
+++ b/crypto/authencesn.c
@@ -245,6 +245,9 @@
u8 *ihash = ohash + crypto_ahash_digestsize(auth);
u32 tmp[2];
+ if (!authsize)
+ goto decrypt;
+
/* Move high-order bits of sequence number back. */
scatterwalk_map_and_copy(tmp, dst, 4, 4, 0);
scatterwalk_map_and_copy(tmp + 1, dst, assoclen + cryptlen, 4, 0);
@@ -253,6 +256,8 @@
if (crypto_memneq(ihash, ohash, authsize))
return -EBADMSG;
+decrypt:
+
sg_init_table(areq_ctx->dst, 2);
dst = scatterwalk_ffwd(areq_ctx->dst, dst, assoclen);
diff --git a/crypto/cryptd.c b/crypto/cryptd.c
index e7aa904..26a504d 100644
--- a/crypto/cryptd.c
+++ b/crypto/cryptd.c
@@ -642,6 +642,7 @@
inst->alg.halg.base.cra_flags = type;
inst->alg.halg.digestsize = salg->digestsize;
+ inst->alg.halg.statesize = salg->statesize;
inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx);
inst->alg.halg.base.cra_init = cryptd_hash_init_tfm;
diff --git a/crypto/gcm.c b/crypto/gcm.c
index 1238b3c..0a12c09 100644
--- a/crypto/gcm.c
+++ b/crypto/gcm.c
@@ -152,10 +152,8 @@
err = crypto_ablkcipher_encrypt(&data->req);
if (err == -EINPROGRESS || err == -EBUSY) {
- err = wait_for_completion_interruptible(
- &data->result.completion);
- if (!err)
- err = data->result.err;
+ wait_for_completion(&data->result.completion);
+ err = data->result.err;
}
if (err)
diff --git a/crypto/lz4.c b/crypto/lz4.c
index aefbcea..74a655e 100644
--- a/crypto/lz4.c
+++ b/crypto/lz4.c
@@ -48,32 +48,26 @@
static int lz4_compress_crypto(struct crypto_tfm *tfm, const u8 *src,
unsigned int slen, u8 *dst, unsigned int *dlen)
{
- struct lz4_ctx *ctx = crypto_tfm_ctx(tfm);
- size_t tmp_len = *dlen;
- int err;
+ int out_len = LZ4_compress_default(src, dst,
+ slen, *dlen, ctx);
- err = lz4_compress(src, slen, dst, &tmp_len, ctx->lz4_comp_mem);
-
- if (err < 0)
+ if (!out_len)
return -EINVAL;
- *dlen = tmp_len;
+ *dlen = out_len;
return 0;
}
static int lz4_decompress_crypto(struct crypto_tfm *tfm, const u8 *src,
unsigned int slen, u8 *dst, unsigned int *dlen)
{
- int err;
- size_t tmp_len = *dlen;
- size_t __slen = slen;
+ int out_len = LZ4_decompress_safe(src, dst, slen, *dlen);
- err = lz4_decompress_unknownoutputsize(src, __slen, dst, &tmp_len);
- if (err < 0)
- return -EINVAL;
+ if (out_len < 0)
+ return out_len;
- *dlen = tmp_len;
- return err;
+ *dlen = out_len;
+ return 0;
}
static struct crypto_alg alg_lz4 = {
diff --git a/crypto/lz4hc.c b/crypto/lz4hc.c
index a1d3b5b..c93194e 100644
--- a/crypto/lz4hc.c
+++ b/crypto/lz4hc.c
@@ -48,32 +48,26 @@
static int lz4hc_compress_crypto(struct crypto_tfm *tfm, const u8 *src,
unsigned int slen, u8 *dst, unsigned int *dlen)
{
- struct lz4hc_ctx *ctx = crypto_tfm_ctx(tfm);
- size_t tmp_len = *dlen;
- int err;
+ int out_len = LZ4_compress_HC(src, dst, slen,
+ *dlen, LZ4HC_DEFAULT_CLEVEL, ctx);
- err = lz4hc_compress(src, slen, dst, &tmp_len, ctx->lz4hc_comp_mem);
-
- if (err < 0)
+ if (!out_len)
return -EINVAL;
- *dlen = tmp_len;
+ *dlen = out_len;
return 0;
}
static int lz4hc_decompress_crypto(struct crypto_tfm *tfm, const u8 *src,
unsigned int slen, u8 *dst, unsigned int *dlen)
{
- int err;
- size_t tmp_len = *dlen;
- size_t __slen = slen;
+ int out_len = LZ4_decompress_safe(src, dst, slen, *dlen);
- err = lz4_decompress_unknownoutputsize(src, __slen, dst, &tmp_len);
- if (err < 0)
- return -EINVAL;
+ if (out_len < 0)
+ return out_len;
- *dlen = tmp_len;
- return err;
+ *dlen = out_len;
+ return 0;
}
static struct crypto_alg alg_lz4hc = {
diff --git a/crypto/mcryptd.c b/crypto/mcryptd.c
index a0ceb41..b4f3930 100644
--- a/crypto/mcryptd.c
+++ b/crypto/mcryptd.c
@@ -531,6 +531,7 @@
inst->alg.halg.base.cra_flags = type;
inst->alg.halg.digestsize = salg->digestsize;
+ inst->alg.halg.statesize = salg->statesize;
inst->alg.halg.base.cra_ctxsize = sizeof(struct mcryptd_hash_ctx);
inst->alg.halg.base.cra_init = mcryptd_hash_init_tfm;
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index 8374ca8..6d4da8f 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -488,6 +488,8 @@
aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
tcrypt_complete, &result);
+ iv_len = crypto_aead_ivsize(tfm);
+
for (i = 0, j = 0; i < tcount; i++) {
if (template[i].np)
continue;
@@ -508,7 +510,6 @@
memcpy(input, template[i].input, template[i].ilen);
memcpy(assoc, template[i].assoc, template[i].alen);
- iv_len = crypto_aead_ivsize(tfm);
if (template[i].iv)
memcpy(iv, template[i].iv, iv_len);
else
@@ -617,7 +618,7 @@
j++;
if (template[i].iv)
- memcpy(iv, template[i].iv, MAX_IVLEN);
+ memcpy(iv, template[i].iv, iv_len);
else
memset(iv, 0, MAX_IVLEN);
diff --git a/crypto/testmgr.h b/crypto/testmgr.h
index ba6530d..127de70 100644
--- a/crypto/testmgr.h
+++ b/crypto/testmgr.h
@@ -33779,69 +33779,123 @@
};
-#define LZ4_COMP_TEST_VECTORS 1
-#define LZ4_DECOMP_TEST_VECTORS 1
-
-static struct comp_testvec lz4_comp_tv_template[] = {
+static const struct comp_testvec lz4_comp_tv_template[] = {
{
- .inlen = 70,
- .outlen = 45,
- .input = "Join us now and share the software "
- "Join us now and share the software ",
- .output = "\xf0\x10\x4a\x6f\x69\x6e\x20\x75"
- "\x73\x20\x6e\x6f\x77\x20\x61\x6e"
- "\x64\x20\x73\x68\x61\x72\x65\x20"
- "\x74\x68\x65\x20\x73\x6f\x66\x74"
- "\x77\x0d\x00\x0f\x23\x00\x0b\x50"
- "\x77\x61\x72\x65\x20",
+ .inlen = 255,
+ .outlen = 218,
+ .input = "LZ4 is lossless compression algorithm, providing"
+ " compression speed at 400 MB/s per core, scalable "
+ "with multi-cores CPU. It features an extremely fast "
+ "decoder, with speed in multiple GB/s per core, "
+ "typically reaching RAM speed limits on multi-core "
+ "systems.",
+ .output = "\xf9\x21\x4c\x5a\x34\x20\x69\x73\x20\x6c\x6f\x73\x73"
+ "\x6c\x65\x73\x73\x20\x63\x6f\x6d\x70\x72\x65\x73\x73"
+ "\x69\x6f\x6e\x20\x61\x6c\x67\x6f\x72\x69\x74\x68\x6d"
+ "\x2c\x20\x70\x72\x6f\x76\x69\x64\x69\x6e\x67\x21\x00"
+ "\xf0\x21\x73\x70\x65\x65\x64\x20\x61\x74\x20\x34\x30"
+ "\x30\x20\x4d\x42\x2f\x73\x20\x70\x65\x72\x20\x63\x6f"
+ "\x72\x65\x2c\x20\x73\x63\x61\x6c\x61\x62\x6c\x65\x20"
+ "\x77\x69\x74\x68\x20\x6d\x75\x6c\x74\x69\x2d\x1a\x00"
+ "\xf0\x00\x73\x20\x43\x50\x55\x2e\x20\x49\x74\x20\x66"
+ "\x65\x61\x74\x75\x11\x00\xf2\x0b\x61\x6e\x20\x65\x78"
+ "\x74\x72\x65\x6d\x65\x6c\x79\x20\x66\x61\x73\x74\x20"
+ "\x64\x65\x63\x6f\x64\x65\x72\x2c\x3d\x00\x02\x67\x00"
+ "\x22\x69\x6e\x46\x00\x5a\x70\x6c\x65\x20\x47\x6c\x00"
+ "\xf0\x00\x74\x79\x70\x69\x63\x61\x6c\x6c\x79\x20\x72"
+ "\x65\x61\x63\x68\xa7\x00\x33\x52\x41\x4d\x38\x00\x83"
+ "\x6c\x69\x6d\x69\x74\x73\x20\x6f\x3f\x00\x01\x85\x00"
+ "\x90\x20\x73\x79\x73\x74\x65\x6d\x73\x2e",
},
};
-static struct comp_testvec lz4_decomp_tv_template[] = {
+static const struct comp_testvec lz4_decomp_tv_template[] = {
{
- .inlen = 45,
- .outlen = 70,
- .input = "\xf0\x10\x4a\x6f\x69\x6e\x20\x75"
- "\x73\x20\x6e\x6f\x77\x20\x61\x6e"
- "\x64\x20\x73\x68\x61\x72\x65\x20"
- "\x74\x68\x65\x20\x73\x6f\x66\x74"
- "\x77\x0d\x00\x0f\x23\x00\x0b\x50"
- "\x77\x61\x72\x65\x20",
- .output = "Join us now and share the software "
- "Join us now and share the software ",
+ .inlen = 218,
+ .outlen = 255,
+ .input = "\xf9\x21\x4c\x5a\x34\x20\x69\x73\x20\x6c\x6f\x73\x73"
+ "\x6c\x65\x73\x73\x20\x63\x6f\x6d\x70\x72\x65\x73\x73"
+ "\x69\x6f\x6e\x20\x61\x6c\x67\x6f\x72\x69\x74\x68\x6d"
+ "\x2c\x20\x70\x72\x6f\x76\x69\x64\x69\x6e\x67\x21\x00"
+ "\xf0\x21\x73\x70\x65\x65\x64\x20\x61\x74\x20\x34\x30"
+ "\x30\x20\x4d\x42\x2f\x73\x20\x70\x65\x72\x20\x63\x6f"
+ "\x72\x65\x2c\x20\x73\x63\x61\x6c\x61\x62\x6c\x65\x20"
+ "\x77\x69\x74\x68\x20\x6d\x75\x6c\x74\x69\x2d\x1a\x00"
+ "\xf0\x00\x73\x20\x43\x50\x55\x2e\x20\x49\x74\x20\x66"
+ "\x65\x61\x74\x75\x11\x00\xf2\x0b\x61\x6e\x20\x65\x78"
+ "\x74\x72\x65\x6d\x65\x6c\x79\x20\x66\x61\x73\x74\x20"
+ "\x64\x65\x63\x6f\x64\x65\x72\x2c\x3d\x00\x02\x67\x00"
+ "\x22\x69\x6e\x46\x00\x5a\x70\x6c\x65\x20\x47\x6c\x00"
+ "\xf0\x00\x74\x79\x70\x69\x63\x61\x6c\x6c\x79\x20\x72"
+ "\x65\x61\x63\x68\xa7\x00\x33\x52\x41\x4d\x38\x00\x83"
+ "\x6c\x69\x6d\x69\x74\x73\x20\x6f\x3f\x00\x01\x85\x00"
+ "\x90\x20\x73\x79\x73\x74\x65\x6d\x73\x2e",
+ .output = "LZ4 is lossless compression algorithm, providing"
+ " compression speed at 400 MB/s per core, scalable "
+ "with multi-cores CPU. It features an extremely fast "
+ "decoder, with speed in multiple GB/s per core, "
+ "typically reaching RAM speed limits on multi-core "
+ "systems.",
},
};
-#define LZ4HC_COMP_TEST_VECTORS 1
-#define LZ4HC_DECOMP_TEST_VECTORS 1
-
-static struct comp_testvec lz4hc_comp_tv_template[] = {
+static const struct comp_testvec lz4hc_comp_tv_template[] = {
{
- .inlen = 70,
- .outlen = 45,
- .input = "Join us now and share the software "
- "Join us now and share the software ",
- .output = "\xf0\x10\x4a\x6f\x69\x6e\x20\x75"
- "\x73\x20\x6e\x6f\x77\x20\x61\x6e"
- "\x64\x20\x73\x68\x61\x72\x65\x20"
- "\x74\x68\x65\x20\x73\x6f\x66\x74"
- "\x77\x0d\x00\x0f\x23\x00\x0b\x50"
- "\x77\x61\x72\x65\x20",
+ .inlen = 255,
+ .outlen = 216,
+ .input = "LZ4 is lossless compression algorithm, providing"
+ " compression speed at 400 MB/s per core, scalable "
+ "with multi-cores CPU. It features an extremely fast "
+ "decoder, with speed in multiple GB/s per core, "
+ "typically reaching RAM speed limits on multi-core "
+ "systems.",
+ .output = "\xf9\x21\x4c\x5a\x34\x20\x69\x73\x20\x6c\x6f\x73\x73"
+ "\x6c\x65\x73\x73\x20\x63\x6f\x6d\x70\x72\x65\x73\x73"
+ "\x69\x6f\x6e\x20\x61\x6c\x67\x6f\x72\x69\x74\x68\x6d"
+ "\x2c\x20\x70\x72\x6f\x76\x69\x64\x69\x6e\x67\x21\x00"
+ "\xf0\x21\x73\x70\x65\x65\x64\x20\x61\x74\x20\x34\x30"
+ "\x30\x20\x4d\x42\x2f\x73\x20\x70\x65\x72\x20\x63\x6f"
+ "\x72\x65\x2c\x20\x73\x63\x61\x6c\x61\x62\x6c\x65\x20"
+ "\x77\x69\x74\x68\x20\x6d\x75\x6c\x74\x69\x2d\x1a\x00"
+ "\xf0\x00\x73\x20\x43\x50\x55\x2e\x20\x49\x74\x20\x66"
+ "\x65\x61\x74\x75\x11\x00\xf2\x0b\x61\x6e\x20\x65\x78"
+ "\x74\x72\x65\x6d\x65\x6c\x79\x20\x66\x61\x73\x74\x20"
+ "\x64\x65\x63\x6f\x64\x65\x72\x2c\x3d\x00\x02\x67\x00"
+ "\x22\x69\x6e\x46\x00\x5a\x70\x6c\x65\x20\x47\x6c\x00"
+ "\xf0\x00\x74\x79\x70\x69\x63\x61\x6c\x6c\x79\x20\x72"
+ "\x65\x61\x63\x68\xa7\x00\x33\x52\x41\x4d\x38\x00\x97"
+ "\x6c\x69\x6d\x69\x74\x73\x20\x6f\x6e\x85\x00\x90\x20"
+ "\x73\x79\x73\x74\x65\x6d\x73\x2e",
},
};
-static struct comp_testvec lz4hc_decomp_tv_template[] = {
+static const struct comp_testvec lz4hc_decomp_tv_template[] = {
{
- .inlen = 45,
- .outlen = 70,
- .input = "\xf0\x10\x4a\x6f\x69\x6e\x20\x75"
- "\x73\x20\x6e\x6f\x77\x20\x61\x6e"
- "\x64\x20\x73\x68\x61\x72\x65\x20"
- "\x74\x68\x65\x20\x73\x6f\x66\x74"
- "\x77\x0d\x00\x0f\x23\x00\x0b\x50"
- "\x77\x61\x72\x65\x20",
- .output = "Join us now and share the software "
- "Join us now and share the software ",
+ .inlen = 216,
+ .outlen = 255,
+ .input = "\xf9\x21\x4c\x5a\x34\x20\x69\x73\x20\x6c\x6f\x73\x73"
+ "\x6c\x65\x73\x73\x20\x63\x6f\x6d\x70\x72\x65\x73\x73"
+ "\x69\x6f\x6e\x20\x61\x6c\x67\x6f\x72\x69\x74\x68\x6d"
+ "\x2c\x20\x70\x72\x6f\x76\x69\x64\x69\x6e\x67\x21\x00"
+ "\xf0\x21\x73\x70\x65\x65\x64\x20\x61\x74\x20\x34\x30"
+ "\x30\x20\x4d\x42\x2f\x73\x20\x70\x65\x72\x20\x63\x6f"
+ "\x72\x65\x2c\x20\x73\x63\x61\x6c\x61\x62\x6c\x65\x20"
+ "\x77\x69\x74\x68\x20\x6d\x75\x6c\x74\x69\x2d\x1a\x00"
+ "\xf0\x00\x73\x20\x43\x50\x55\x2e\x20\x49\x74\x20\x66"
+ "\x65\x61\x74\x75\x11\x00\xf2\x0b\x61\x6e\x20\x65\x78"
+ "\x74\x72\x65\x6d\x65\x6c\x79\x20\x66\x61\x73\x74\x20"
+ "\x64\x65\x63\x6f\x64\x65\x72\x2c\x3d\x00\x02\x67\x00"
+ "\x22\x69\x6e\x46\x00\x5a\x70\x6c\x65\x20\x47\x6c\x00"
+ "\xf0\x00\x74\x79\x70\x69\x63\x61\x6c\x6c\x79\x20\x72"
+ "\x65\x61\x63\x68\xa7\x00\x33\x52\x41\x4d\x38\x00\x97"
+ "\x6c\x69\x6d\x69\x74\x73\x20\x6f\x6e\x85\x00\x90\x20"
+ "\x73\x79\x73\x74\x65\x6d\x73\x2e",
+ .output = "LZ4 is lossless compression algorithm, providing"
+ " compression speed at 400 MB/s per core, scalable "
+ "with multi-cores CPU. It features an extremely fast "
+ "decoder, with speed in multiple GB/s per core, "
+ "typically reaching RAM speed limits on multi-core "
+ "systems.",
},
};
diff --git a/drivers/Makefile b/drivers/Makefile
index c1fc75b..b9e35f9 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -99,6 +99,7 @@
obj-$(CONFIG_USB) += usb/
obj-$(CONFIG_PCI) += usb/
obj-$(CONFIG_USB_GADGET) += usb/
+obj-$(CONFIG_OF) += usb/
obj-$(CONFIG_SERIO) += input/serio/
obj-$(CONFIG_GAMEPORT) += input/gameport/
obj-$(CONFIG_INPUT) += input/
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index 675eaf3..b9cebca 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -2,7 +2,6 @@
# Makefile for the Linux ACPI interpreter
#
-ccflags-y := -Os
ccflags-$(CONFIG_ACPI_DEBUG) += -DACPI_DEBUG_OUTPUT
#
diff --git a/drivers/acpi/acpi_platform.c b/drivers/acpi/acpi_platform.c
index 296b7a1..5365ff6 100644
--- a/drivers/acpi/acpi_platform.c
+++ b/drivers/acpi/acpi_platform.c
@@ -24,9 +24,11 @@
ACPI_MODULE_NAME("platform");
static const struct acpi_device_id forbidden_id_list[] = {
- {"PNP0000", 0}, /* PIC */
- {"PNP0100", 0}, /* Timer */
- {"PNP0200", 0}, /* AT DMA Controller */
+ {"PNP0000", 0}, /* PIC */
+ {"PNP0100", 0}, /* Timer */
+ {"PNP0200", 0}, /* AT DMA Controller */
+ {"ACPI0009", 0}, /* IOxAPIC */
+ {"ACPI000A", 0}, /* IOAPIC */
{"", 0},
};
diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
index 5fdac39..549cdbe 100644
--- a/drivers/acpi/acpi_video.c
+++ b/drivers/acpi/acpi_video.c
@@ -1211,6 +1211,9 @@
union acpi_object *dod = NULL;
union acpi_object *obj;
+ if (!video->cap._DOD)
+ return AE_NOT_EXIST;
+
status = acpi_evaluate_object(video->device->handle, "_DOD", NULL, &buffer);
if (!ACPI_SUCCESS(status)) {
ACPI_EXCEPTION((AE_INFO, status, "Evaluating _DOD"));
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index eac4f3b..bb81cd0 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -1067,6 +1067,7 @@
if (list_empty(&ghes_sci))
unregister_acpi_hed_notifier(&ghes_notifier_sci);
mutex_unlock(&ghes_list_mutex);
+ synchronize_rcu();
break;
case ACPI_HEST_NOTIFY_NMI:
ghes_nmi_remove(ghes);
diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
index 96809cd..2f24b57 100644
--- a/drivers/acpi/blacklist.c
+++ b/drivers/acpi/blacklist.c
@@ -346,6 +346,34 @@
DMI_MATCH(DMI_PRODUCT_NAME, "XPS 13 9343"),
},
},
+ {
+ .callback = dmi_enable_rev_override,
+ .ident = "DELL Precision 5520",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Precision 5520"),
+ },
+ },
+ {
+ .callback = dmi_enable_rev_override,
+ .ident = "DELL Precision 3520",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3520"),
+ },
+ },
+ /*
+ * Resolves a quirk with the Dell Latitude 3350 that
+ * causes the ethernet adapter to not function.
+ */
+ {
+ .callback = dmi_enable_rev_override,
+ .ident = "DELL Latitude 3350",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Latitude 3350"),
+ },
+ },
#endif
{}
};
diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c
index 5ea5dc2..73c9c7fa 100644
--- a/drivers/acpi/glue.c
+++ b/drivers/acpi/glue.c
@@ -98,7 +98,15 @@
if (check_children && list_empty(&adev->children))
return -ENODEV;
- return sta_present ? FIND_CHILD_MAX_SCORE : FIND_CHILD_MIN_SCORE;
+ /*
+ * If the device has a _HID (or _CID) returning a valid ACPI/PNP
+ * device ID, it is better to make it look less attractive here, so that
+ * the other device with the same _ADR value (that may not have a valid
+ * device ID) can be matched going forward. [This means a second spec
+ * violation in a row, so whatever we do here is best effort anyway.]
+ */
+ return sta_present && list_empty(&adev->pnp.ids) ?
+ FIND_CHILD_MAX_SCORE : FIND_CHILD_MIN_SCORE;
}
struct acpi_device *acpi_find_child_device(struct acpi_device *parent,
diff --git a/drivers/acpi/ioapic.c b/drivers/acpi/ioapic.c
index ccdc8db..fa2cf2d 100644
--- a/drivers/acpi/ioapic.c
+++ b/drivers/acpi/ioapic.c
@@ -45,6 +45,12 @@
struct resource *res = data;
struct resource_win win;
+ /*
+ * We might assign this to 'res' later, make sure all pointers are
+ * cleared before the resource is added to the global list
+ */
+ memset(&win, 0, sizeof(win));
+
res->flags = 0;
if (acpi_dev_filter_resource_type(acpi_res, IORESOURCE_MEM) == 0)
return AE_OK;
diff --git a/drivers/acpi/nfit.c b/drivers/acpi/nfit.c
index 14c2a07..67d7489 100644
--- a/drivers/acpi/nfit.c
+++ b/drivers/acpi/nfit.c
@@ -979,7 +979,11 @@
const struct nfit_set_info_map *map0 = m0;
const struct nfit_set_info_map *map1 = m1;
- return map0->region_offset - map1->region_offset;
+ if (map0->region_offset < map1->region_offset)
+ return -1;
+ else if (map0->region_offset > map1->region_offset)
+ return 1;
+ return 0;
}
/* Retrieve the nth entry referencing this spa */
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index fcd4ce6..1c2b846 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -200,6 +200,7 @@
return -EINVAL;
/* The state of the list is 'on' IFF all resources are 'on'. */
+ cur_state = 0;
list_for_each_entry(entry, list, node) {
struct acpi_power_resource *resource = entry->resource;
acpi_handle handle = resource->device.handle;
diff --git a/drivers/android/Kconfig b/drivers/android/Kconfig
index 4d4cdc1a..01de42c 100644
--- a/drivers/android/Kconfig
+++ b/drivers/android/Kconfig
@@ -44,6 +44,16 @@
Note that enabling this will break newer Android user-space.
+config ANDROID_BINDER_IPC_SELFTEST
+ bool "Android Binder IPC Driver Selftest"
+ depends on ANDROID_BINDER_IPC
+ ---help---
+ This feature allows binder selftest to run.
+
+ Binder selftest checks the allocation and free of binder buffers
+ exhaustively with combinations of various buffer sizes and
+ alignments.
+
endif # if ANDROID
endmenu
diff --git a/drivers/android/Makefile b/drivers/android/Makefile
index 4b7c726..a01254c 100644
--- a/drivers/android/Makefile
+++ b/drivers/android/Makefile
@@ -1,3 +1,4 @@
ccflags-y += -I$(src) # needed for trace events
obj-$(CONFIG_ANDROID_BINDER_IPC) += binder.o binder_alloc.o
+obj-$(CONFIG_ANDROID_BINDER_IPC_SELFTEST) += binder_alloc_selftest.o
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 6d81971..af7b970 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -47,7 +47,7 @@
#include "binder_trace.h"
static HLIST_HEAD(binder_deferred_list);
-static DEFINE_MUTEX(binder_deferred_lock);
+static DEFINE_SPINLOCK(binder_deferred_lock);
static HLIST_HEAD(binder_devices);
static HLIST_HEAD(binder_procs);
@@ -1379,7 +1379,19 @@
target_thread = t->from;
if (target_thread) {
binder_proc_lock(target_thread->proc, __LINE__);
-
+ /*
+ * It's possible that binder_free_thread() just marked
+ * the thread a zombie before we took the lock; in that
+ * case, there's no point in queueing any work to the
+ * thread - proceed as if it's already dead.
+ */
+ if (target_thread->is_zombie) {
+ binder_proc_unlock(target_thread->proc,
+ __LINE__);
+ target_thread = NULL;
+ }
+ }
+ if (target_thread) {
binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
"send failed reply for transaction %d to %d:%d\n",
t->debug_id,
@@ -2745,8 +2757,9 @@
buf_node->async_todo.list.next,
struct binder_work, entry);
binder_dequeue_work(w, __LINE__);
- binder_enqueue_work(w, &thread->todo,
+ binder_enqueue_work(w, &proc->todo,
__LINE__);
+ binder_wakeup_proc(proc);
}
binder_proc_unlock(buffer->target_node->proc,
__LINE__);
@@ -3748,13 +3761,7 @@
wait_for_proc_work = binder_available_for_proc_work(thread);
binder_proc_unlock(thread->proc, __LINE__);
- if (binder_has_work(thread, wait_for_proc_work))
- goto ret_pollin;
- binder_put_thread(thread);
poll_wait(filp, &thread->wait, wait);
- thread = binder_get_thread(proc);
- if (!thread)
- return -ENOENT;
if (binder_has_work(thread, wait_for_proc_work))
goto ret_pollin;
binder_put_thread(thread);
@@ -3931,6 +3938,30 @@
binder_defer_work(proc, BINDER_ZOMBIE_CLEANUP);
}
+static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
+ struct binder_node_debug_info *info) {
+ struct rb_node *n;
+ binder_uintptr_t ptr = info->ptr;
+
+ memset(info, 0, sizeof(*info));
+
+ binder_proc_lock(proc, __LINE__);
+ for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
+ struct binder_node *node = rb_entry(n, struct binder_node,
+ rb_node);
+ if (node->ptr > ptr) {
+ info->ptr = node->ptr;
+ info->cookie = node->cookie;
+ info->has_strong_ref = node->has_strong_ref;
+ info->has_weak_ref = node->has_weak_ref;
+ break;
+ }
+ }
+ binder_proc_unlock(proc, __LINE__);
+
+ return 0;
+}
+
static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
int ret;
@@ -3939,6 +3970,8 @@
unsigned int size = _IOC_SIZE(cmd);
void __user *ubuf = (void __user *)arg;
+ binder_selftest_alloc(&proc->alloc);
+
trace_binder_ioctl(cmd, arg);
ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
@@ -4003,6 +4036,24 @@
}
break;
}
+ case BINDER_GET_NODE_DEBUG_INFO: {
+ struct binder_node_debug_info info;
+
+ if (copy_from_user(&info, ubuf, sizeof(info))) {
+ ret = -EFAULT;
+ goto err;
+ }
+
+ ret = binder_ioctl_get_node_debug_info(proc, &info);
+ if (ret < 0)
+ goto err;
+
+ if (copy_to_user(ubuf, &info, sizeof(info))) {
+ ret = -EFAULT;
+ goto err;
+ }
+ break;
+ }
default:
ret = -EINVAL;
goto err;
@@ -4548,7 +4599,7 @@
int defer;
do {
- mutex_lock(&binder_deferred_lock);
+ spin_lock(&binder_deferred_lock);
if (!hlist_empty(&binder_deferred_list)) {
proc = hlist_entry(binder_deferred_list.first,
struct binder_proc, deferred_work_node);
@@ -4559,7 +4610,7 @@
proc = NULL;
defer = 0;
}
- mutex_unlock(&binder_deferred_lock);
+ spin_unlock(&binder_deferred_lock);
if (defer & BINDER_DEFERRED_PUT_FILES) {
binder_proc_lock(proc, __LINE__);
@@ -4588,14 +4639,14 @@
static void
binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
{
- mutex_lock(&binder_deferred_lock);
+ spin_lock(&binder_deferred_lock);
proc->deferred_work |= defer;
if (hlist_unhashed(&proc->deferred_work_node)) {
hlist_add_head(&proc->deferred_work_node,
&binder_deferred_list);
queue_work(binder_deferred_workqueue, &binder_deferred_work);
}
- mutex_unlock(&binder_deferred_lock);
+ spin_unlock(&binder_deferred_lock);
}
static void _print_binder_transaction(struct seq_file *m,
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index 00ceade..057716b 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -47,14 +47,23 @@
pr_info(x); \
} while (0)
+static struct binder_buffer *binder_buffer_next(struct binder_buffer *buffer)
+{
+ return list_entry(buffer->entry.next, struct binder_buffer, entry);
+}
+
+static struct binder_buffer *binder_buffer_prev(struct binder_buffer *buffer)
+{
+ return list_entry(buffer->entry.prev, struct binder_buffer, entry);
+}
+
static size_t binder_buffer_size(struct binder_alloc *alloc,
struct binder_buffer *buffer)
{
if (list_is_last(&buffer->entry, &alloc->buffers))
- return alloc->buffer +
- alloc->buffer_size - (void *)buffer->data;
- return (size_t)list_entry(buffer->entry.next,
- struct binder_buffer, entry) - (size_t)buffer->data;
+ return (u8 *)alloc->buffer +
+ alloc->buffer_size - (u8 *)buffer->data;
+ return (u8 *)binder_buffer_next(buffer)->data - (u8 *)buffer->data;
}
static void binder_insert_free_buffer(struct binder_alloc *alloc,
@@ -104,9 +113,9 @@
buffer = rb_entry(parent, struct binder_buffer, rb_node);
BUG_ON(buffer->free);
- if (new_buffer < buffer)
+ if (new_buffer->data < buffer->data)
p = &parent->rb_left;
- else if (new_buffer > buffer)
+ else if (new_buffer->data > buffer->data)
p = &parent->rb_right;
else
BUG();
@@ -120,20 +129,19 @@
{
struct rb_node *n;
struct binder_buffer *buffer;
- struct binder_buffer *kern_ptr;
+ void *kern_ptr;
mutex_lock(&alloc->mutex);
- kern_ptr = (struct binder_buffer *)(user_ptr - alloc->user_buffer_offset
- - offsetof(struct binder_buffer, data));
+ kern_ptr = (void *)(user_ptr - alloc->user_buffer_offset);
n = alloc->allocated_buffers.rb_node;
while (n) {
buffer = rb_entry(n, struct binder_buffer, rb_node);
BUG_ON(buffer->free);
- if (kern_ptr < buffer)
+ if (kern_ptr < buffer->data)
n = n->rb_left;
- else if (kern_ptr > buffer)
+ else if (kern_ptr > buffer->data)
n = n->rb_right;
else {
/*
@@ -155,9 +163,9 @@
return NULL;
}
-static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
- void *start, void *end,
- struct vm_area_struct *vma)
+static int __binder_update_page_range(struct binder_alloc *alloc, int allocate,
+ void *start, void *end,
+ struct vm_area_struct *vma)
{
void *page_addr;
unsigned long user_page_addr;
@@ -257,6 +265,20 @@
return vma ? -ENOMEM : -ESRCH;
}
+static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
+ void *start, void *end,
+ struct vm_area_struct *vma)
+{
+ /*
+ * For regular updates, move up start if needed since MIN_ALLOC pages
+ * are always mapped
+ */
+ if (start - alloc->buffer < BINDER_MIN_ALLOC)
+ start = alloc->buffer + BINDER_MIN_ALLOC;
+
+ return __binder_update_page_range(alloc, allocate, start, end, vma);
+}
+
struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
size_t data_size,
size_t offsets_size,
@@ -308,6 +330,9 @@
goto error_unlock;
}
+ /* Pad 0-size buffers so they get assigned unique addresses */
+ size = max(size, sizeof(void *));
+
n = alloc->free_buffers.rb_node;
while (n) {
buffer = rb_entry(n, struct binder_buffer, rb_node);
@@ -369,14 +394,9 @@
has_page_addr =
(void *)(((uintptr_t)buffer->data + buffer_size) & PAGE_MASK);
- if (n == NULL) {
- if (size + sizeof(struct binder_buffer) + 4 >= buffer_size)
- buffer_size = size; /* no room for other buffers */
- else
- buffer_size = size + sizeof(struct binder_buffer);
- }
+ WARN_ON(n && buffer_size != size);
end_page_addr =
- (void *)PAGE_ALIGN((uintptr_t)buffer->data + buffer_size);
+ (void *)PAGE_ALIGN((uintptr_t)buffer->data + size);
if (end_page_addr > has_page_addr)
end_page_addr = has_page_addr;
ret = binder_update_page_range(alloc, 1,
@@ -385,18 +405,26 @@
eret = ERR_PTR(ret);
goto error_unlock;
}
+ if (buffer_size != size) {
+ struct binder_buffer *new_buffer;
+
+ new_buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
+ if (!new_buffer) {
+ pr_err("%s: %d failed to alloc new buffer struct\n",
+ __func__, alloc->pid);
+ eret = ERR_PTR(-ENOMEM);
+ goto err_alloc_buf_struct_failed;
+ }
+ new_buffer->data = (u8 *)buffer->data + size;
+ list_add(&new_buffer->entry, &buffer->entry);
+ new_buffer->free = 1;
+ binder_insert_free_buffer(alloc, new_buffer);
+ }
rb_erase(best_fit, &alloc->free_buffers);
buffer->free = 0;
buffer->free_in_progress = 0;
binder_insert_allocated_buffer(alloc, buffer);
- if (buffer_size != size) {
- struct binder_buffer *new_buffer = (void *)buffer->data + size;
-
- list_add(&new_buffer->entry, &buffer->entry);
- new_buffer->free = 1;
- binder_insert_free_buffer(alloc, new_buffer);
- }
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
"%d: binder_alloc_buf size %zd got %pK\n",
alloc->pid, size, buffer);
@@ -414,6 +442,10 @@
return buffer;
+err_alloc_buf_struct_failed:
+ binder_update_page_range(alloc, 0,
+ (void *)PAGE_ALIGN((uintptr_t)buffer->data),
+ end_page_addr, NULL);
error_unlock:
mutex_unlock(&alloc->mutex);
return eret;
@@ -421,57 +453,59 @@
static void *buffer_start_page(struct binder_buffer *buffer)
{
- return (void *)((uintptr_t)buffer & PAGE_MASK);
+ return (void *)((uintptr_t)buffer->data & PAGE_MASK);
}
-static void *buffer_end_page(struct binder_buffer *buffer)
+static void *prev_buffer_end_page(struct binder_buffer *buffer)
{
- return (void *)(((uintptr_t)(buffer + 1) - 1) & PAGE_MASK);
+ return (void *)(((uintptr_t)(buffer->data) - 1) & PAGE_MASK);
}
static void binder_delete_free_buffer(struct binder_alloc *alloc,
struct binder_buffer *buffer)
{
struct binder_buffer *prev, *next = NULL;
- int free_page_end = 1;
- int free_page_start = 1;
-
+ bool to_free = true;
BUG_ON(alloc->buffers.next == &buffer->entry);
- prev = list_entry(buffer->entry.prev, struct binder_buffer, entry);
+ prev = binder_buffer_prev(buffer);
BUG_ON(!prev->free);
- if (buffer_end_page(prev) == buffer_start_page(buffer)) {
- free_page_start = 0;
- if (buffer_end_page(prev) == buffer_end_page(buffer))
- free_page_end = 0;
+ if (prev_buffer_end_page(prev) == buffer_start_page(buffer)) {
+ to_free = false;
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%d: merge free, buffer %pK share page with %pK\n",
- alloc->pid, buffer, prev);
+ "%d: merge free, buffer %pK share page with %pK\n",
+ alloc->pid, buffer->data, prev->data);
}
if (!list_is_last(&buffer->entry, &alloc->buffers)) {
- next = list_entry(buffer->entry.next,
- struct binder_buffer, entry);
- if (buffer_start_page(next) == buffer_end_page(buffer)) {
- free_page_end = 0;
- if (buffer_start_page(next) ==
- buffer_start_page(buffer))
- free_page_start = 0;
+ next = binder_buffer_next(buffer);
+ if (buffer_start_page(next) == buffer_start_page(buffer)) {
+ to_free = false;
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%d: merge free, buffer %pK share page with %pK\n",
- alloc->pid, buffer, prev);
+ "%d: merge free, buffer %pK share page with %pK\n",
+ alloc->pid,
+ buffer->data,
+ next->data);
}
}
- list_del(&buffer->entry);
- if (free_page_start || free_page_end) {
+
+ if (PAGE_ALIGNED(buffer->data)) {
binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%d: merge free, buffer %pK do not share page%s%s with %pK or %pK\n",
- alloc->pid, buffer, free_page_start ? "" : " end",
- free_page_end ? "" : " start", prev, next);
- binder_update_page_range(alloc, 0, free_page_start ?
- buffer_start_page(buffer) : buffer_end_page(buffer),
- (free_page_end ? buffer_end_page(buffer) :
- buffer_start_page(buffer)) + PAGE_SIZE, NULL);
+ "%d: merge free, buffer start %pK is page aligned\n",
+ alloc->pid, buffer->data);
+ to_free = false;
}
+
+ if (to_free) {
+ binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
+ "%d: merge free, buffer %pK do not share page with %pK or %pK\n",
+ alloc->pid, buffer->data,
+ prev->data, next->data);
+ binder_update_page_range(alloc, 0, buffer_start_page(buffer),
+ buffer_start_page(buffer) + PAGE_SIZE,
+ NULL);
+ }
+ list_del(&buffer->entry);
+ kfree(buffer);
}
static void binder_free_buf_locked(struct binder_alloc *alloc,
@@ -492,8 +526,8 @@
BUG_ON(buffer->free);
BUG_ON(size > buffer_size);
BUG_ON(buffer->transaction != NULL);
- BUG_ON((void *)buffer < alloc->buffer);
- BUG_ON((void *)buffer > alloc->buffer + alloc->buffer_size);
+ BUG_ON(buffer->data < alloc->buffer);
+ BUG_ON(buffer->data > alloc->buffer + alloc->buffer_size);
if (buffer->async_transaction) {
alloc->free_async_space += size + sizeof(struct binder_buffer);
@@ -511,8 +545,7 @@
rb_erase(&buffer->rb_node, &alloc->allocated_buffers);
buffer->free = 1;
if (!list_is_last(&buffer->entry, &alloc->buffers)) {
- struct binder_buffer *next = list_entry(buffer->entry.next,
- struct binder_buffer, entry);
+ struct binder_buffer *next = binder_buffer_next(buffer);
if (next->free) {
rb_erase(&next->rb_node, &alloc->free_buffers);
@@ -520,8 +553,7 @@
}
}
if (alloc->buffers.next != &buffer->entry) {
- struct binder_buffer *prev = list_entry(buffer->entry.prev,
- struct binder_buffer, entry);
+ struct binder_buffer *prev = binder_buffer_prev(buffer);
if (prev->free) {
binder_delete_free_buffer(alloc, buffer);
@@ -587,14 +619,20 @@
}
alloc->buffer_size = vma->vm_end - vma->vm_start;
- if (binder_update_page_range(alloc, 1, alloc->buffer,
- alloc->buffer + PAGE_SIZE, vma)) {
+ buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
+ if (!buffer) {
+ ret = -ENOMEM;
+ failure_string = "alloc buffer struct";
+ goto err_alloc_buf_struct_failed;
+ }
+
+ if (__binder_update_page_range(alloc, 1, alloc->buffer,
+ alloc->buffer + BINDER_MIN_ALLOC, vma)) {
ret = -ENOMEM;
failure_string = "alloc small buf";
goto err_alloc_small_buf_failed;
}
- buffer = alloc->buffer;
- INIT_LIST_HEAD(&alloc->buffers);
+ buffer->data = alloc->buffer;
list_add(&buffer->entry, &alloc->buffers);
buffer->free = 1;
binder_insert_free_buffer(alloc, buffer);
@@ -607,6 +645,8 @@
return 0;
err_alloc_small_buf_failed:
+ kfree(buffer);
+err_alloc_buf_struct_failed:
kfree(alloc->pages);
alloc->pages = NULL;
err_alloc_pages_failed:
@@ -626,14 +666,13 @@
{
struct rb_node *n;
int buffers, page_count;
+ struct binder_buffer *buffer;
BUG_ON(alloc->vma);
buffers = 0;
mutex_lock(&alloc->mutex);
while ((n = rb_first(&alloc->allocated_buffers))) {
- struct binder_buffer *buffer;
-
buffer = rb_entry(n, struct binder_buffer, rb_node);
/* Transactiopn should already have been freed */
@@ -643,6 +682,16 @@
buffers++;
}
+ while (!list_empty(&alloc->buffers)) {
+ buffer = list_first_entry(&alloc->buffers,
+ struct binder_buffer, entry);
+ WARN_ON(!buffer->free);
+
+ list_del(&buffer->entry);
+ WARN_ON_ONCE(!list_empty(&alloc->buffers));
+ kfree(buffer);
+ }
+
page_count = 0;
if (alloc->pages) {
int i;
@@ -718,5 +767,6 @@
alloc->tsk = current->group_leader;
alloc->pid = current->group_leader->pid;
mutex_init(&alloc->mutex);
+ INIT_LIST_HEAD(&alloc->buffers);
}
diff --git a/drivers/android/binder_alloc.h b/drivers/android/binder_alloc.h
index 96f7c7f2..cce150e 100644
--- a/drivers/android/binder_alloc.h
+++ b/drivers/android/binder_alloc.h
@@ -27,6 +27,8 @@
#include <linux/vmalloc.h>
#include <linux/slab.h>
+#define BINDER_MIN_ALLOC (1 * PAGE_SIZE)
+
struct binder_transaction;
struct binder_buffer {
@@ -45,7 +47,7 @@
size_t data_size;
size_t offsets_size;
size_t extra_buffers_size;
- uint8_t data[0];
+ void *data;
};
struct binder_alloc {
@@ -65,6 +67,11 @@
int pid;
};
+#ifdef CONFIG_ANDROID_BINDER_IPC_SELFTEST
+void binder_selftest_alloc(struct binder_alloc *alloc);
+#else
+static inline void binder_selftest_alloc(struct binder_alloc *alloc) {}
+#endif
extern struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
size_t data_size,
size_t offsets_size,
diff --git a/drivers/android/binder_alloc_selftest.c b/drivers/android/binder_alloc_selftest.c
new file mode 100644
index 0000000..daff2e6
--- /dev/null
+++ b/drivers/android/binder_alloc_selftest.c
@@ -0,0 +1,269 @@
+/* binder_alloc_selftest.c
+ *
+ * Android IPC Subsystem
+ *
+ * Copyright (C) 2017 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/mm_types.h>
+#include <linux/err.h>
+#include "binder_alloc.h"
+
+#define BUFFER_NUM 5
+#define BUFFER_MIN_SIZE (PAGE_SIZE / 8)
+
+static bool binder_selftest_run = true;
+static int binder_selftest_failures;
+static DEFINE_MUTEX(binder_selftest_lock);
+
+/**
+ * enum buf_end_align_type - Page alignment of a buffer
+ * end with regard to the end of the previous buffer.
+ *
+ * In the pictures below, buf2 refers to the buffer we
+ * are aligning. buf1 refers to previous buffer by addr.
+ * Symbol [ means the start of a buffer, ] means the end
+ * of a buffer, and | means page boundaries.
+ */
+enum buf_end_align_type {
+ /**
+ * @SAME_PAGE_UNALIGNED: The end of this buffer is on
+ * the same page as the end of the previous buffer and
+ * is not page aligned. Examples:
+ * buf1 ][ buf2 ][ ...
+ * buf1 ]|[ buf2 ][ ...
+ */
+ SAME_PAGE_UNALIGNED = 0,
+ /**
+ * @SAME_PAGE_ALIGNED: The end of this buffer is on
+ * the same page as the end of the previous buffer and
+ * is page aligned. Examples:
+ * buf1 ][ buf2 ]| ...
+ * buf1 ]|[ buf2 ]| ...
+ */
+ SAME_PAGE_ALIGNED,
+ /**
+ * @NEXT_PAGE_UNALIGNED: The end of this buffer is on
+ * the page next to the end of the previous buffer and
+ * is not page aligned. Examples:
+ * buf1 ][ buf2 | buf2 ][ ...
+ * buf1 ]|[ buf2 | buf2 ][ ...
+ */
+ NEXT_PAGE_UNALIGNED,
+ /**
+ * @NEXT_PAGE_ALIGNED: The end of this buffer is on
+ * the page next to the end of the previous buffer and
+ * is page aligned. Examples:
+ * buf1 ][ buf2 | buf2 ]| ...
+ * buf1 ]|[ buf2 | buf2 ]| ...
+ */
+ NEXT_PAGE_ALIGNED,
+ /**
+ * @NEXT_NEXT_UNALIGNED: The end of this buffer is on
+ * the page that follows the page after the end of the
+ * previous buffer and is not page aligned. Examples:
+ * buf1 ][ buf2 | buf2 | buf2 ][ ...
+ * buf1 ]|[ buf2 | buf2 | buf2 ][ ...
+ */
+ NEXT_NEXT_UNALIGNED,
+ LOOP_END,
+};
+
+static void pr_err_size_seq(size_t *sizes, int *seq)
+{
+ int i;
+
+ pr_err("alloc sizes: ");
+ for (i = 0; i < BUFFER_NUM; i++)
+ pr_cont("[%zu]", sizes[i]);
+ pr_cont("\n");
+ pr_err("free seq: ");
+ for (i = 0; i < BUFFER_NUM; i++)
+ pr_cont("[%d]", seq[i]);
+ pr_cont("\n");
+}
+
+static bool check_buffer_pages_allocated(struct binder_alloc *alloc,
+ struct binder_buffer *buffer,
+ size_t size)
+{
+ void *page_addr, *end;
+ int page_index;
+
+ end = (void *)PAGE_ALIGN((uintptr_t)buffer->data + size);
+ page_addr = buffer->data;
+ for (; page_addr < end; page_addr += PAGE_SIZE) {
+ page_index = (page_addr - alloc->buffer) / PAGE_SIZE;
+ if (!alloc->pages[page_index]) {
+ pr_err("incorrect alloc state at page index %d\n",
+ page_index);
+ return false;
+ }
+ }
+ return true;
+}
+
+static void binder_selftest_alloc_buf(struct binder_alloc *alloc,
+ struct binder_buffer *buffers[],
+ size_t *sizes, int *seq)
+{
+ int i;
+
+ for (i = 0; i < BUFFER_NUM; i++) {
+ buffers[i] = binder_alloc_new_buf(alloc, sizes[i], 0, 0, 0);
+ if (IS_ERR(buffers[i]) ||
+ !check_buffer_pages_allocated(alloc, buffers[i],
+ sizes[i])) {
+ pr_err_size_seq(sizes, seq);
+ binder_selftest_failures++;
+ }
+ }
+}
+
+static void binder_selftest_free_buf(struct binder_alloc *alloc,
+ struct binder_buffer *buffers[],
+ size_t *sizes, int *seq)
+{
+ int i;
+
+ for (i = 0; i < BUFFER_NUM; i++)
+ binder_alloc_free_buf(alloc, buffers[seq[i]]);
+
+ for (i = 0; i < (alloc->buffer_size / PAGE_SIZE); i++) {
+ if ((!alloc->pages[i]) == (i * PAGE_SIZE < BINDER_MIN_ALLOC)) {
+ pr_err("incorrect free state at page index %d\n", i);
+ binder_selftest_failures++;
+ }
+ }
+}
+
+static void binder_selftest_alloc_free(struct binder_alloc *alloc,
+ size_t *sizes, int *seq)
+{
+ struct binder_buffer *buffers[BUFFER_NUM];
+
+ binder_selftest_alloc_buf(alloc, buffers, sizes, seq);
+ binder_selftest_free_buf(alloc, buffers, sizes, seq);
+}
+
+static bool is_dup(int *seq, int index, int val)
+{
+ int i;
+
+ for (i = 0; i < index; i++) {
+ if (seq[i] == val)
+ return true;
+ }
+ return false;
+}
+
+/* Generate BUFFER_NUM factorial free orders. */
+static void binder_selftest_free_seq(struct binder_alloc *alloc,
+ size_t *sizes, int *seq, int index)
+{
+ int i;
+
+ if (index == BUFFER_NUM) {
+ binder_selftest_alloc_free(alloc, sizes, seq);
+ return;
+ }
+ for (i = 0; i < BUFFER_NUM; i++) {
+ if (is_dup(seq, index, i))
+ continue;
+ seq[index] = i;
+ binder_selftest_free_seq(alloc, sizes, seq, index + 1);
+ }
+}
+
+static void binder_selftest_alloc_size(struct binder_alloc *alloc,
+ size_t *end_offset)
+{
+ int i;
+ int seq[BUFFER_NUM] = {0};
+ size_t front_sizes[BUFFER_NUM];
+ size_t back_sizes[BUFFER_NUM];
+ size_t last_offset, offset = 0;
+
+ for (i = 0; i < BUFFER_NUM; i++) {
+ last_offset = offset;
+ offset = end_offset[i];
+ front_sizes[i] = offset - last_offset;
+ back_sizes[BUFFER_NUM - i - 1] = front_sizes[i];
+ }
+ /*
+ * Buffers share the first or last few pages.
+ * Only BUFFER_NUM - 1 buffer sizes are adjustable since
+ * we need one giant buffer before getting to the last page.
+ */
+ if (BINDER_MIN_ALLOC)
+ front_sizes[0] += BINDER_MIN_ALLOC - PAGE_SIZE;
+ back_sizes[0] += alloc->buffer_size - end_offset[BUFFER_NUM - 1];
+ binder_selftest_free_seq(alloc, front_sizes, seq, 0);
+ binder_selftest_free_seq(alloc, back_sizes, seq, 0);
+}
+
+static void binder_selftest_alloc_offset(struct binder_alloc *alloc,
+ size_t *end_offset, int index)
+{
+ int align;
+ size_t end, prev;
+
+ if (index == BUFFER_NUM) {
+ binder_selftest_alloc_size(alloc, end_offset);
+ return;
+ }
+ prev = index == 0 ? 0 : end_offset[index - 1];
+ end = prev;
+
+ BUILD_BUG_ON(BUFFER_MIN_SIZE * BUFFER_NUM >= PAGE_SIZE);
+
+ for (align = SAME_PAGE_UNALIGNED; align < LOOP_END; align++) {
+ if (align % 2)
+ end = ALIGN(end, PAGE_SIZE);
+ else
+ end += BUFFER_MIN_SIZE;
+ end_offset[index] = end;
+ binder_selftest_alloc_offset(alloc, end_offset, index + 1);
+ }
+}
+
+/**
+ * binder_selftest_alloc() - Test alloc and free of buffer pages.
+ * @alloc: Pointer to alloc struct.
+ *
+ * Allocate BUFFER_NUM buffers to cover all page alignment cases,
+ * then free them in all orders possible. Check that pages are
+ * allocated after buffer alloc and freed after freeing buffer.
+ */
+void binder_selftest_alloc(struct binder_alloc *alloc)
+{
+ size_t end_offset[BUFFER_NUM];
+
+ if (!binder_selftest_run)
+ return;
+ mutex_lock(&binder_selftest_lock);
+ if (!binder_selftest_run || !alloc->vma)
+ goto done;
+ pr_info("STARTED\n");
+ binder_selftest_alloc_offset(alloc, end_offset, 0);
+ binder_selftest_run = false;
+ if (binder_selftest_failures > 0)
+ pr_info("%d tests FAILED\n", binder_selftest_failures);
+ else
+ pr_info("PASSED\n");
+
+done:
+ mutex_unlock(&binder_selftest_lock);
+}
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index e417e1a..5b2aee8 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -2832,10 +2832,12 @@
static struct ata_device *ata_find_dev(struct ata_port *ap, int devno)
{
if (!sata_pmp_attached(ap)) {
- if (likely(devno < ata_link_max_devices(&ap->link)))
+ if (likely(devno >= 0 &&
+ devno < ata_link_max_devices(&ap->link)))
return &ap->link.device[devno];
} else {
- if (likely(devno < ap->nr_pmp_links))
+ if (likely(devno >= 0 &&
+ devno < ap->nr_pmp_links))
return &ap->pmp_link[devno].device[0];
}
diff --git a/drivers/ata/pata_amd.c b/drivers/ata/pata_amd.c
index 8d4d959..8706533 100644
--- a/drivers/ata/pata_amd.c
+++ b/drivers/ata/pata_amd.c
@@ -616,6 +616,7 @@
{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP73_IDE), 8 },
{ PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP77_IDE), 8 },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CS5536_IDE), 9 },
+ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CS5536_DEV_IDE), 9 },
{ },
};
diff --git a/drivers/ata/pata_cs5536.c b/drivers/ata/pata_cs5536.c
index 6c15a55..dc12552 100644
--- a/drivers/ata/pata_cs5536.c
+++ b/drivers/ata/pata_cs5536.c
@@ -289,6 +289,7 @@
static const struct pci_device_id cs5536[] = {
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CS5536_IDE), },
+ { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CS5536_DEV_IDE), },
{ },
};
diff --git a/drivers/base/bus.c b/drivers/base/bus.c
index 5005924..0346e46 100644
--- a/drivers/base/bus.c
+++ b/drivers/base/bus.c
@@ -737,7 +737,7 @@
out_unregister:
kobject_put(&priv->kobj);
- kfree(drv->p);
+ /* drv->p is freed in driver_release() */
drv->p = NULL;
out_put_bus:
bus_put(bus);
diff --git a/drivers/base/core.c b/drivers/base/core.c
index bbe8e2e..f3d395b 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -2099,7 +2099,11 @@
pm_runtime_get_noresume(dev);
pm_runtime_barrier(dev);
- if (dev->bus && dev->bus->shutdown) {
+ if (dev->class && dev->class->shutdown) {
+ if (initcall_debug)
+ dev_info(dev, "shutdown\n");
+ dev->class->shutdown(dev);
+ } else if (dev->bus && dev->bus->shutdown) {
if (initcall_debug)
dev_info(dev, "shutdown\n");
dev->bus->shutdown(dev);
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index a48824d..78b0ece 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -1188,7 +1188,6 @@
}
dev->power.subsys_data->domain_data = &gpd_data->base;
- dev->pm_domain = &genpd->domain;
spin_unlock_irq(&dev->power.lock);
@@ -1207,7 +1206,6 @@
{
spin_lock_irq(&dev->power.lock);
- dev->pm_domain = NULL;
dev->power.subsys_data->domain_data = NULL;
spin_unlock_irq(&dev->power.lock);
@@ -1248,6 +1246,8 @@
if (ret)
goto out;
+ dev->pm_domain = &genpd->domain;
+
genpd->device_count++;
genpd->max_off_time_changed = true;
@@ -1299,6 +1299,8 @@
if (genpd->detach_dev)
genpd->detach_dev(genpd, dev);
+ dev->pm_domain = NULL;
+
list_del_init(&pdd->list_node);
mutex_unlock(&genpd->lock);
@@ -1373,7 +1375,7 @@
int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
struct generic_pm_domain *subdomain)
{
- struct gpd_link *link;
+ struct gpd_link *l, *link;
int ret = -EINVAL;
if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain))
@@ -1388,7 +1390,7 @@
goto out;
}
- list_for_each_entry(link, &genpd->master_links, master_node) {
+ list_for_each_entry_safe(link, l, &genpd->master_links, master_node) {
if (link->slave != subdomain)
continue;
@@ -1642,10 +1644,10 @@
*/
void of_genpd_del_provider(struct device_node *np)
{
- struct of_genpd_provider *cp;
+ struct of_genpd_provider *cp, *tmp;
mutex_lock(&of_genpd_mutex);
- list_for_each_entry(cp, &of_genpd_providers, link) {
+ list_for_each_entry_safe(cp, tmp, &of_genpd_providers, link) {
if (cp->node == np) {
list_del(&cp->link);
of_node_put(cp->node);
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 3252429..3a20dc5 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -889,13 +889,13 @@
unsigned long flags;
int retval;
- might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
-
if (rpmflags & RPM_GET_PUT) {
if (!atomic_dec_and_test(&dev->power.usage_count))
return 0;
}
+ might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
+
spin_lock_irqsave(&dev->power.lock, flags);
retval = rpm_idle(dev, rpmflags);
spin_unlock_irqrestore(&dev->power.lock, flags);
@@ -921,13 +921,13 @@
unsigned long flags;
int retval;
- might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
-
if (rpmflags & RPM_GET_PUT) {
if (!atomic_dec_and_test(&dev->power.usage_count))
return 0;
}
+ might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
+
spin_lock_irqsave(&dev->power.lock, flags);
retval = rpm_suspend(dev, rpmflags);
spin_unlock_irqrestore(&dev->power.lock, flags);
@@ -952,7 +952,8 @@
unsigned long flags;
int retval;
- might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe);
+ might_sleep_if(!(rpmflags & RPM_ASYNC) && !dev->power.irq_safe &&
+ dev->power.runtime_status != RPM_ACTIVE);
if (rpmflags & RPM_GET_PUT)
atomic_inc(&dev->power.usage_count);
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
index a7b4679..39efa7e 100644
--- a/drivers/base/power/sysfs.c
+++ b/drivers/base/power/sysfs.c
@@ -268,6 +268,8 @@
value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT;
else if (!strcmp(buf, "any") || !strcmp(buf, "any\n"))
value = PM_QOS_LATENCY_ANY;
+ else
+ return -EINVAL;
}
ret = dev_pm_qos_update_user_latency_tolerance(dev, value);
return ret < 0 ? ret : n;
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index c00d33d..a1d6982 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -62,6 +62,8 @@
static DECLARE_WAIT_QUEUE_HEAD(wakeup_count_wait_queue);
+DEFINE_STATIC_SRCU(wakeup_srcu);
+
static struct wakeup_source deleted_ws = {
.name = "deleted",
.lock = __SPIN_LOCK_UNLOCKED(deleted_ws.lock),
@@ -200,7 +202,7 @@
spin_lock_irqsave(&events_lock, flags);
list_del_rcu(&ws->entry);
spin_unlock_irqrestore(&events_lock, flags);
- synchronize_rcu();
+ synchronize_srcu(&wakeup_srcu);
}
EXPORT_SYMBOL_GPL(wakeup_source_remove);
@@ -332,13 +334,14 @@
void device_wakeup_arm_wake_irqs(void)
{
struct wakeup_source *ws;
+ int srcuidx;
- rcu_read_lock();
+ srcuidx = srcu_read_lock(&wakeup_srcu);
list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
if (ws->wakeirq)
dev_pm_arm_wake_irq(ws->wakeirq);
}
- rcu_read_unlock();
+ srcu_read_unlock(&wakeup_srcu, srcuidx);
}
/**
@@ -349,13 +352,14 @@
void device_wakeup_disarm_wake_irqs(void)
{
struct wakeup_source *ws;
+ int srcuidx;
- rcu_read_lock();
+ srcuidx = srcu_read_lock(&wakeup_srcu);
list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
if (ws->wakeirq)
dev_pm_disarm_wake_irq(ws->wakeirq);
}
- rcu_read_unlock();
+ srcu_read_unlock(&wakeup_srcu, srcuidx);
}
/**
@@ -840,10 +844,10 @@
void pm_print_active_wakeup_sources(void)
{
struct wakeup_source *ws;
- int active = 0;
+ int srcuidx, active = 0;
struct wakeup_source *last_activity_ws = NULL;
- rcu_read_lock();
+ srcuidx = srcu_read_lock(&wakeup_srcu);
list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
if (ws->active) {
pr_info("active wakeup source: %s\n", ws->name);
@@ -859,7 +863,7 @@
if (!active && last_activity_ws)
pr_info("last active wakeup source: %s\n",
last_activity_ws->name);
- rcu_read_unlock();
+ srcu_read_unlock(&wakeup_srcu, srcuidx);
}
EXPORT_SYMBOL_GPL(pm_print_active_wakeup_sources);
@@ -990,8 +994,9 @@
{
struct wakeup_source *ws;
ktime_t now = ktime_get();
+ int srcuidx;
- rcu_read_lock();
+ srcuidx = srcu_read_lock(&wakeup_srcu);
list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
spin_lock_irq(&ws->lock);
if (ws->autosleep_enabled != set) {
@@ -1005,7 +1010,7 @@
}
spin_unlock_irq(&ws->lock);
}
- rcu_read_unlock();
+ srcu_read_unlock(&wakeup_srcu, srcuidx);
}
#endif /* CONFIG_PM_AUTOSLEEP */
@@ -1066,15 +1071,16 @@
static int wakeup_sources_stats_show(struct seq_file *m, void *unused)
{
struct wakeup_source *ws;
+ int srcuidx;
seq_puts(m, "name\t\tactive_count\tevent_count\twakeup_count\t"
"expire_count\tactive_since\ttotal_time\tmax_time\t"
"last_change\tprevent_suspend_time\n");
- rcu_read_lock();
+ srcuidx = srcu_read_lock(&wakeup_srcu);
list_for_each_entry_rcu(ws, &wakeup_sources, entry)
print_wakeup_source_stats(m, ws);
- rcu_read_unlock();
+ srcu_read_unlock(&wakeup_srcu, srcuidx);
print_wakeup_source_stats(m, &deleted_ws);
diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
index 9462d27..8bdc34d 100644
--- a/drivers/block/drbd/drbd_bitmap.c
+++ b/drivers/block/drbd/drbd_bitmap.c
@@ -479,8 +479,14 @@
* this masks out the remaining bits.
* Returns the number of bits cleared.
*/
+#ifndef BITS_PER_PAGE
#define BITS_PER_PAGE (1UL << (PAGE_SHIFT + 3))
#define BITS_PER_PAGE_MASK (BITS_PER_PAGE - 1)
+#else
+# if BITS_PER_PAGE != (1UL << (PAGE_SHIFT + 3))
+# error "ambiguous BITS_PER_PAGE"
+# endif
+#endif
#define BITS_PER_LONG_MASK (BITS_PER_LONG - 1)
static int bm_clear_surplus(struct drbd_bitmap *b)
{
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 6ca35495..1e5cd39 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -641,11 +641,12 @@
if (err)
goto out_put_disk;
- q = vblk->disk->queue = blk_mq_init_queue(&vblk->tag_set);
+ q = blk_mq_init_queue(&vblk->tag_set);
if (IS_ERR(q)) {
err = -ENOMEM;
goto out_free_tags;
}
+ vblk->disk->queue = q;
q->queuedata = vblk;
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index 41fb1a9..33e23a7 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -595,8 +595,6 @@
unsigned long timeout;
int ret;
- xen_blkif_get(blkif);
-
while (!kthread_should_stop()) {
if (try_to_freeze())
continue;
@@ -650,7 +648,6 @@
print_stats(blkif);
blkif->xenblkd = NULL;
- xen_blkif_put(blkif);
return 0;
}
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index f53cff4..9233082 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -221,7 +221,6 @@
if (blkif->xenblkd) {
kthread_stop(blkif->xenblkd);
wake_up(&blkif->shutdown_wq);
- blkif->xenblkd = NULL;
}
/* The above kthread_stop() guarantees that at this point we
@@ -266,9 +265,10 @@
static void xen_blkif_free(struct xen_blkif *blkif)
{
-
- xen_blkif_disconnect(blkif);
+ WARN_ON(xen_blkif_disconnect(blkif));
xen_vbd_free(&blkif->vbd);
+ kfree(blkif->be->mode);
+ kfree(blkif->be);
/* Make sure everything is drained before shutting down */
BUG_ON(blkif->persistent_gnt_c != 0);
@@ -445,8 +445,6 @@
xen_blkif_put(be->blkif);
}
- kfree(be->mode);
- kfree(be);
return 0;
}
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 76ac317..c5a2057 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -581,13 +581,13 @@
if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) {
bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
- clear_page(mem);
+ memset(mem, 0, PAGE_SIZE);
return 0;
}
cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO);
if (size == PAGE_SIZE)
- copy_page(mem, cmem);
+ memcpy(mem, cmem, PAGE_SIZE);
else
ret = zcomp_decompress(zram->comp, cmem, size, mem);
zs_unmap_object(meta->mem_pool, handle);
@@ -750,7 +750,7 @@
if ((clen == PAGE_SIZE) && !is_partial_io(bvec)) {
src = kmap_atomic(page);
- copy_page(cmem, src);
+ memcpy(cmem, src, PAGE_SIZE);
kunmap_atomic(src);
} else {
memcpy(cmem, src, clen);
diff --git a/drivers/bluetooth/btfm_slim_codec.c b/drivers/bluetooth/btfm_slim_codec.c
index 738f696..057b9a8 100644
--- a/drivers/bluetooth/btfm_slim_codec.c
+++ b/drivers/bluetooth/btfm_slim_codec.c
@@ -26,6 +26,9 @@
#include <sound/tlv.h>
#include "btfm_slim.h"
+static int bt_soc_enable_status;
+
+
static int btfm_slim_codec_write(struct snd_soc_codec *codec, unsigned int reg,
unsigned int value)
{
@@ -38,8 +41,31 @@
return 0;
}
+static int bt_soc_status_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ ucontrol->value.integer.value[0] = bt_soc_enable_status;
+ return 1;
+}
+
+static int bt_soc_status_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ return 1;
+}
+
+static const struct snd_kcontrol_new status_controls[] = {
+ SOC_SINGLE_EXT("BT SOC status", 0, 0, 1, 0,
+ bt_soc_status_get,
+ bt_soc_status_put)
+
+};
+
+
static int btfm_slim_codec_probe(struct snd_soc_codec *codec)
{
+ snd_soc_add_codec_controls(codec, status_controls,
+ ARRAY_SIZE(status_controls));
return 0;
}
@@ -130,6 +156,7 @@
struct btfmslim *btfmslim = dai->dev->platform_data;
struct btfmslim_ch *ch;
uint8_t rxport, grp = false, nchan = 1;
+ bt_soc_enable_status = 0;
BTFMSLIM_DBG("dai->name: %s, dai->id: %d, dai->rate: %d", dai->name,
dai->id, dai->rate);
@@ -171,6 +198,10 @@
}
ret = btfm_slim_enable_ch(btfmslim, ch, rxport, dai->rate, grp, nchan);
+
+ /* save the enable channel status */
+ if (ret == 0)
+ bt_soc_enable_status = 1;
return ret;
}
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index cd6b141..7bb8055 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -333,6 +333,7 @@
{ USB_DEVICE(0x13d3, 0x3410), .driver_info = BTUSB_REALTEK },
{ USB_DEVICE(0x13d3, 0x3416), .driver_info = BTUSB_REALTEK },
{ USB_DEVICE(0x13d3, 0x3459), .driver_info = BTUSB_REALTEK },
+ { USB_DEVICE(0x13d3, 0x3494), .driver_info = BTUSB_REALTEK },
/* Additional Realtek 8821AE Bluetooth devices */
{ USB_DEVICE(0x0b05, 0x17dc), .driver_info = BTUSB_REALTEK },
diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c
index cb852cc..f9b569e 100644
--- a/drivers/bluetooth/hci_bcm.c
+++ b/drivers/bluetooth/hci_bcm.c
@@ -287,6 +287,9 @@
hu->priv = bcm;
+ if (!hu->tty->dev)
+ goto out;
+
mutex_lock(&bcm_device_lock);
list_for_each(p, &bcm_device_list) {
struct bcm_device *dev = list_entry(p, struct bcm_device, list);
@@ -307,7 +310,7 @@
}
mutex_unlock(&bcm_device_lock);
-
+out:
return 0;
}
diff --git a/drivers/bluetooth/hci_intel.c b/drivers/bluetooth/hci_intel.c
index b906550..0c63fce 100644
--- a/drivers/bluetooth/hci_intel.c
+++ b/drivers/bluetooth/hci_intel.c
@@ -307,6 +307,9 @@
struct list_head *p;
int err = -ENODEV;
+ if (!hu->tty->dev)
+ return err;
+
mutex_lock(&intel_device_list_lock);
list_for_each(p, &intel_device_list) {
@@ -379,6 +382,9 @@
struct intel_data *intel = container_of(work, struct intel_data,
busy_work);
+ if (!intel->hu->tty->dev)
+ return;
+
/* Link is busy, delay the suspend */
mutex_lock(&intel_device_list_lock);
list_for_each(p, &intel_device_list) {
@@ -913,6 +919,8 @@
list_for_each(p, &intel_device_list) {
struct intel_device *dev = list_entry(p, struct intel_device,
list);
+ if (!hu->tty->dev)
+ break;
if (hu->tty->dev->parent == dev->pdev->dev.parent) {
if (device_may_wakeup(&dev->pdev->dev))
idev = dev;
@@ -1094,6 +1102,9 @@
BT_DBG("hu %p skb %p", hu, skb);
+ if (!hu->tty->dev)
+ goto out_enqueue;
+
/* Be sure our controller is resumed and potential LPM transaction
* completed before enqueuing any packet.
*/
@@ -1110,7 +1121,7 @@
}
}
mutex_unlock(&intel_device_list_lock);
-
+out_enqueue:
skb_queue_tail(&intel->txq, skb);
return 0;
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index f65cae9..2082097 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -586,12 +586,11 @@
config DEVPORT
bool "/dev/port character device"
- depends on !M68K
depends on ISA || PCI
default y
help
- Say Y here if you want to support the /dev/port device. The
- /dev/port device is similar to /dev/mem, but for I/O ports.
+ Say Y here if you want to support the /dev/port device. The /dev/port
+ device is similar to /dev/mem, but for I/O ports.
source "drivers/s390/char/Kconfig"
diff --git a/drivers/char/diag/diag_masks.c b/drivers/char/diag/diag_masks.c
index f6b7861..9ecd62c 100644
--- a/drivers/char/diag/diag_masks.c
+++ b/drivers/char/diag/diag_masks.c
@@ -290,6 +290,7 @@
struct diag_mask_info *mask_info = NULL;
struct diag_msg_mask_t *mask = NULL;
struct diag_ctrl_msg_mask header;
+ uint8_t msg_mask_tbl_count_local;
if (peripheral >= NUM_PERIPHERALS)
return;
@@ -316,6 +317,8 @@
return;
}
buf = mask_info->update_buf;
+ msg_mask_tbl_count_local = driver->msg_mask_tbl_count;
+ mutex_unlock(&driver->msg_mask_lock);
mutex_lock(&mask_info->lock);
switch (mask_info->status) {
case DIAG_CTRL_MASK_ALL_DISABLED:
@@ -332,9 +335,11 @@
goto err;
}
- for (i = 0; i < driver->msg_mask_tbl_count; i++, mask++) {
- if (((first < mask->ssid_first) ||
- (last > mask->ssid_last_tools)) && first != ALL_SSID) {
+ for (i = 0; i < msg_mask_tbl_count_local; i++, mask++) {
+ mutex_lock(&driver->msg_mask_lock);
+ if (((mask->ssid_first > first) ||
+ (mask->ssid_last_tools < last)) && first != ALL_SSID) {
+ mutex_unlock(&driver->msg_mask_lock);
continue;
}
@@ -375,19 +380,19 @@
if (mask_size > 0)
memcpy(buf + header_len, mask->ptr, mask_size);
mutex_unlock(&mask->lock);
+ mutex_unlock(&driver->msg_mask_lock);
err = diagfwd_write(peripheral, TYPE_CNTL, buf,
header_len + mask_size);
if (err && err != -ENODEV)
- pr_err_ratelimited("diag: Unable to send msg masks to peripheral %d\n",
- peripheral);
+ pr_err_ratelimited("diag: Unable to send msg masks to peripheral %d, error = %d\n",
+ peripheral, err);
if (first != ALL_SSID)
break;
}
err:
mutex_unlock(&mask_info->lock);
- mutex_unlock(&driver->msg_mask_lock);
}
static void diag_send_time_sync_update(uint8_t peripheral)
@@ -665,8 +670,8 @@
}
req = (struct diag_msg_build_mask_t *)src_buf;
- mutex_lock(&driver->msg_mask_lock);
mutex_lock(&mask_info->lock);
+ mutex_lock(&driver->msg_mask_lock);
mask = (struct diag_msg_mask_t *)mask_info->ptr;
for (i = 0; i < driver->msg_mask_tbl_count; i++, mask++) {
if (i < (driver->msg_mask_tbl_count - 1)) {
@@ -706,8 +711,8 @@
pr_err_ratelimited("diag: In %s, unable to allocate memory for msg mask ptr, mask_size: %d\n",
__func__, mask_size);
mutex_unlock(&mask->lock);
- mutex_unlock(&mask_info->lock);
mutex_unlock(&driver->msg_mask_lock);
+ mutex_unlock(&mask_info->lock);
return -ENOMEM;
}
mask->ptr = temp;
@@ -726,8 +731,8 @@
mask_info->status = DIAG_CTRL_MASK_VALID;
break;
}
- mutex_unlock(&mask_info->lock);
mutex_unlock(&driver->msg_mask_lock);
+ mutex_unlock(&mask_info->lock);
if (diag_check_update(APPS_DATA))
diag_update_userspace_clients(MSG_MASKS_TYPE);
@@ -780,9 +785,11 @@
}
req = (struct diag_msg_config_rsp_t *)src_buf;
- mutex_lock(&driver->msg_mask_lock);
- mask = (struct diag_msg_mask_t *)mask_info->ptr;
+
mutex_lock(&mask_info->lock);
+ mutex_lock(&driver->msg_mask_lock);
+
+ mask = (struct diag_msg_mask_t *)mask_info->ptr;
mask_info->status = (req->rt_mask) ? DIAG_CTRL_MASK_ALL_ENABLED :
DIAG_CTRL_MASK_ALL_DISABLED;
for (i = 0; i < driver->msg_mask_tbl_count; i++, mask++) {
@@ -791,8 +798,8 @@
mask->range * sizeof(uint32_t));
mutex_unlock(&mask->lock);
}
- mutex_unlock(&mask_info->lock);
mutex_unlock(&driver->msg_mask_lock);
+ mutex_unlock(&mask_info->lock);
if (diag_check_update(APPS_DATA))
diag_update_userspace_clients(MSG_MASKS_TYPE);
@@ -1296,8 +1303,8 @@
struct diag_msg_mask_t *mask = (struct diag_msg_mask_t *)msg_mask.ptr;
struct diag_ssid_range_t range;
- mutex_lock(&driver->msg_mask_lock);
mutex_lock(&msg_mask.lock);
+ mutex_lock(&driver->msg_mask_lock);
driver->msg_mask_tbl_count = MSG_MASK_TBL_CNT;
for (i = 0; i < driver->msg_mask_tbl_count; i++, mask++) {
range.ssid_first = msg_mask_tbl[i].ssid_first;
@@ -1306,8 +1313,8 @@
if (err)
break;
}
- mutex_unlock(&msg_mask.lock);
mutex_unlock(&driver->msg_mask_lock);
+ mutex_unlock(&msg_mask.lock);
return err;
}
@@ -1320,8 +1327,8 @@
struct diag_msg_mask_t *build_mask = NULL;
struct diag_ssid_range_t range;
- mutex_lock(&driver->msg_mask_lock);
mutex_lock(&msg_bt_mask.lock);
+ mutex_lock(&driver->msg_mask_lock);
driver->bt_msg_mask_tbl_count = MSG_MASK_TBL_CNT;
build_mask = (struct diag_msg_mask_t *)msg_bt_mask.ptr;
for (i = 0; i < driver->bt_msg_mask_tbl_count; i++, build_mask++) {
@@ -1434,9 +1441,8 @@
}
memcpy(build_mask->ptr, tbl, tbl_size);
}
- mutex_unlock(&msg_bt_mask.lock);
mutex_unlock(&driver->msg_mask_lock);
-
+ mutex_unlock(&msg_bt_mask.lock);
return err;
}
@@ -1606,8 +1612,8 @@
err = __diag_mask_init(dest, MSG_MASK_SIZE, APPS_BUF_SIZE);
if (err)
return err;
- mutex_lock(&driver->msg_mask_lock);
mutex_lock(&dest->lock);
+ mutex_lock(&driver->msg_mask_lock);
src_mask = (struct diag_msg_mask_t *)src->ptr;
dest_mask = (struct diag_msg_mask_t *)dest->ptr;
@@ -1624,9 +1630,8 @@
src_mask++;
dest_mask++;
}
- mutex_unlock(&dest->lock);
mutex_unlock(&driver->msg_mask_lock);
-
+ mutex_unlock(&dest->lock);
return err;
}
@@ -1637,15 +1642,15 @@
if (!mask_info)
return;
- mutex_lock(&driver->msg_mask_lock);
mutex_lock(&mask_info->lock);
+ mutex_lock(&driver->msg_mask_lock);
mask = (struct diag_msg_mask_t *)mask_info->ptr;
for (i = 0; i < driver->msg_mask_tbl_count; i++, mask++) {
kfree(mask->ptr);
mask->ptr = NULL;
}
- mutex_unlock(&mask_info->lock);
mutex_unlock(&driver->msg_mask_lock);
+ mutex_unlock(&mask_info->lock);
__diag_mask_exit(mask_info);
}
@@ -1814,8 +1819,9 @@
return -EIO;
}
mutex_unlock(&driver->diag_maskclear_mutex);
- mutex_lock(&driver->msg_mask_lock);
mutex_lock(&mask_info->lock);
+ mutex_lock(&driver->msg_mask_lock);
+
mask = (struct diag_msg_mask_t *)(mask_info->ptr);
for (i = 0; i < driver->msg_mask_tbl_count; i++, mask++) {
ptr = mask_info->update_buf;
@@ -1852,8 +1858,8 @@
}
total_len += len;
}
- mutex_unlock(&mask_info->lock);
mutex_unlock(&driver->msg_mask_lock);
+ mutex_unlock(&mask_info->lock);
return err ? err : total_len;
}
diff --git a/drivers/char/diag/diagfwd_glink.c b/drivers/char/diag/diagfwd_glink.c
index 03d496c..f1f8f0b 100644
--- a/drivers/char/diag/diagfwd_glink.c
+++ b/drivers/char/diag/diagfwd_glink.c
@@ -361,13 +361,44 @@
diagfwd_channel_read(glink_info->fwd_ctxt);
}
+struct diag_glink_read_work {
+ struct diag_glink_info *glink_info;
+ const void *ptr_read_done;
+ const void *ptr_rx_done;
+ size_t ptr_read_size;
+ struct work_struct work;
+};
+
+static void diag_glink_notify_rx_work_fn(struct work_struct *work)
+{
+ struct diag_glink_read_work *read_work = container_of(work,
+ struct diag_glink_read_work, work);
+ struct diag_glink_info *glink_info = read_work->glink_info;
+
+ if (!glink_info || !glink_info->hdl) {
+ kfree(read_work);
+ return;
+ }
+
+ diagfwd_channel_read_done(glink_info->fwd_ctxt,
+ (unsigned char *)(read_work->ptr_read_done),
+ read_work->ptr_read_size);
+
+ glink_rx_done(glink_info->hdl, read_work->ptr_rx_done, false);
+
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: Rx done for packet %pK of len: %d periph: %d ch: %d\n",
+ read_work->ptr_rx_done, (int)read_work->ptr_read_size,
+ glink_info->peripheral, glink_info->type);
+ kfree(read_work);
+}
static void diag_glink_notify_rx(void *hdl, const void *priv,
const void *pkt_priv, const void *ptr,
size_t size)
{
struct diag_glink_info *glink_info = (struct diag_glink_info *)priv;
- int err = 0;
+ struct diag_glink_read_work *read_work;
if (!glink_info || !glink_info->hdl || !ptr || !pkt_priv || !hdl)
return;
@@ -379,12 +410,25 @@
"diag: received a packet %pK of len:%d from periph:%d ch:%d\n",
ptr, (int)size, glink_info->peripheral, glink_info->type);
+ read_work = kmalloc(sizeof(*read_work), GFP_ATOMIC);
+ if (!read_work) {
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
+ "diag: Could not allocate read_work\n");
+ glink_rx_done(glink_info->hdl, ptr, true);
+ return;
+ }
+
memcpy((void *)pkt_priv, ptr, size);
- err = diagfwd_channel_read_done(glink_info->fwd_ctxt,
- (unsigned char *)pkt_priv, size);
- glink_rx_done(glink_info->hdl, ptr, false);
+
+ read_work->glink_info = glink_info;
+ read_work->ptr_read_done = pkt_priv;
+ read_work->ptr_rx_done = ptr;
+ read_work->ptr_read_size = size;
+ INIT_WORK(&read_work->work, diag_glink_notify_rx_work_fn);
+ queue_work(glink_info->wq, &read_work->work);
+
DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
- "diag: Rx done for packet %pK of len:%d periph:%d ch:%d\n",
+ "diag: Rx queued for packet %pK of len: %d periph: %d ch: %d\n",
ptr, (int)size, glink_info->peripheral, glink_info->type);
}
@@ -473,6 +517,8 @@
atomic_set(&glink_info->opened, 1);
diagfwd_channel_open(glink_info->fwd_ctxt);
diagfwd_late_open(glink_info->fwd_ctxt);
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "glink channel open: p: %d t: %d\n",
+ glink_info->peripheral, glink_info->type);
}
static void diag_glink_remote_disconnect_work_fn(struct work_struct *work)
@@ -494,9 +540,9 @@
late_init_work);
if (!glink_info || !glink_info->hdl)
return;
- DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "p: %d t: %d\n",
- glink_info->peripheral, glink_info->type);
diagfwd_channel_open(glink_info->fwd_ctxt);
+ DIAG_LOG(DIAG_DEBUG_PERIPHERALS, "glink late init p: %d t: %d\n",
+ glink_info->peripheral, glink_info->type);
}
static void diag_glink_transport_notify_state(void *handle, const void *priv,
diff --git a/drivers/char/diag/diagfwd_peripheral.c b/drivers/char/diag/diagfwd_peripheral.c
index 4b5db01..55bb667 100644
--- a/drivers/char/diag/diagfwd_peripheral.c
+++ b/drivers/char/diag/diagfwd_peripheral.c
@@ -1072,7 +1072,14 @@
mutex_lock(&driver->diagfwd_channel_mutex[fwd_info->peripheral]);
fwd_info->ch_open = 1;
diagfwd_buffers_init(fwd_info);
- diagfwd_write_buffers_init(fwd_info);
+
+ /*
+ * Initialize buffers for glink supported
+ * peripherals only.
+ */
+ if (fwd_info->transport == TRANSPORT_GLINK)
+ diagfwd_write_buffers_init(fwd_info);
+
if (fwd_info && fwd_info->c_ops && fwd_info->c_ops->open)
fwd_info->c_ops->open(fwd_info);
for (i = 0; i < NUM_WRITE_BUFFERS; i++) {
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index a084a47..25372dc 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -3877,6 +3877,9 @@
* because the lower layer is allowed to hold locks while calling
* message delivery.
*/
+
+ rcu_read_lock();
+
if (!run_to_completion)
spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
if (intf->curr_msg == NULL && !intf->in_shutdown) {
@@ -3899,6 +3902,8 @@
if (newmsg)
intf->handlers->sender(intf->send_info, newmsg);
+ rcu_read_unlock();
+
handle_new_recv_msgs(intf);
}
diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
index 90e6246..f53e8ba 100644
--- a/drivers/char/ipmi/ipmi_ssif.c
+++ b/drivers/char/ipmi/ipmi_ssif.c
@@ -758,6 +758,11 @@
result, len, data[2]);
} else if (data[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2
|| data[1] != IPMI_GET_MSG_FLAGS_CMD) {
+ /*
+ * Don't abort here, maybe it was a queued
+ * response to a previous command.
+ */
+ ipmi_ssif_unlock_cond(ssif_info, flags);
pr_warn(PFX "Invalid response getting flags: %x %x\n",
data[0], data[1]);
} else {
@@ -888,6 +893,7 @@
* for details on the intricacies of this.
*/
int left;
+ unsigned char *data_to_send;
ssif_inc_stat(ssif_info, sent_messages_parts);
@@ -896,6 +902,7 @@
left = 32;
/* Length byte. */
ssif_info->multi_data[ssif_info->multi_pos] = left;
+ data_to_send = ssif_info->multi_data + ssif_info->multi_pos;
ssif_info->multi_pos += left;
if (left < 32)
/*
@@ -909,7 +916,7 @@
rv = ssif_i2c_send(ssif_info, msg_written_handler,
I2C_SMBUS_WRITE,
SSIF_IPMI_MULTI_PART_REQUEST_MIDDLE,
- ssif_info->multi_data + ssif_info->multi_pos,
+ data_to_send,
I2C_SMBUS_BLOCK_DATA);
if (rv < 0) {
/* request failed, just return the error. */
diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c
index 096f0cef..40d400f 100644
--- a/drivers/char/ipmi/ipmi_watchdog.c
+++ b/drivers/char/ipmi/ipmi_watchdog.c
@@ -1162,10 +1162,11 @@
ipmi_watchdog_state = WDOG_TIMEOUT_NONE;
ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB);
} else if (ipmi_watchdog_state != WDOG_TIMEOUT_NONE) {
- /* Set a long timer to let the reboot happens, but
- reboot if it hangs, but only if the watchdog
+ /* Set a long timer to let the reboot happen or
+ reset if it hangs, but only if the watchdog
timer was already running. */
- timeout = 120;
+ if (timeout < 120)
+ timeout = 120;
pretimeout = 0;
ipmi_watchdog_state = WDOG_TIMEOUT_RESET;
ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB);
diff --git a/drivers/char/lp.c b/drivers/char/lp.c
index c4094c4..34ef474 100644
--- a/drivers/char/lp.c
+++ b/drivers/char/lp.c
@@ -859,7 +859,11 @@
} else if (!strcmp(str, "auto")) {
parport_nr[0] = LP_PARPORT_AUTO;
} else if (!strcmp(str, "none")) {
- parport_nr[parport_ptr++] = LP_PARPORT_NONE;
+ if (parport_ptr < LP_NO)
+ parport_nr[parport_ptr++] = LP_PARPORT_NONE;
+ else
+ printk(KERN_INFO "lp: too many ports, %s ignored.\n",
+ str);
} else if (!strcmp(str, "reset")) {
reset = 1;
}
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 6b1721f..2898d19 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -59,6 +59,10 @@
#endif
#ifdef CONFIG_STRICT_DEVMEM
+static inline int page_is_allowed(unsigned long pfn)
+{
+ return devmem_is_allowed(pfn);
+}
static inline int range_is_allowed(unsigned long pfn, unsigned long size)
{
u64 from = ((u64)pfn) << PAGE_SHIFT;
@@ -78,6 +82,10 @@
return 1;
}
#else
+static inline int page_is_allowed(unsigned long pfn)
+{
+ return 1;
+}
static inline int range_is_allowed(unsigned long pfn, unsigned long size)
{
return 1;
@@ -125,23 +133,31 @@
while (count > 0) {
unsigned long remaining;
+ int allowed;
sz = size_inside_page(p, count);
- if (!range_is_allowed(p >> PAGE_SHIFT, count))
+ allowed = page_is_allowed(p >> PAGE_SHIFT);
+ if (!allowed)
return -EPERM;
+ if (allowed == 2) {
+ /* Show zeros for restricted memory. */
+ remaining = clear_user(buf, sz);
+ } else {
+ /*
+ * On ia64 if a page has been mapped somewhere as
+ * uncached, then it must also be accessed uncached
+ * by the kernel or data corruption may occur.
+ */
+ ptr = xlate_dev_mem_ptr(p);
+ if (!ptr)
+ return -EFAULT;
- /*
- * On ia64 if a page has been mapped somewhere as uncached, then
- * it must also be accessed uncached by the kernel or data
- * corruption may occur.
- */
- ptr = xlate_dev_mem_ptr(p);
- if (!ptr)
- return -EFAULT;
+ remaining = copy_to_user(buf, ptr, sz);
- remaining = copy_to_user(buf, ptr, sz);
- unxlate_dev_mem_ptr(p, ptr);
+ unxlate_dev_mem_ptr(p, ptr);
+ }
+
if (remaining)
return -EFAULT;
@@ -184,30 +200,36 @@
#endif
while (count > 0) {
+ int allowed;
+
sz = size_inside_page(p, count);
- if (!range_is_allowed(p >> PAGE_SHIFT, sz))
+ allowed = page_is_allowed(p >> PAGE_SHIFT);
+ if (!allowed)
return -EPERM;
- /*
- * On ia64 if a page has been mapped somewhere as uncached, then
- * it must also be accessed uncached by the kernel or data
- * corruption may occur.
- */
- ptr = xlate_dev_mem_ptr(p);
- if (!ptr) {
- if (written)
- break;
- return -EFAULT;
- }
+ /* Skip actual writing when a page is marked as restricted. */
+ if (allowed == 1) {
+ /*
+ * On ia64 if a page has been mapped somewhere as
+ * uncached, then it must also be accessed uncached
+ * by the kernel or data corruption may occur.
+ */
+ ptr = xlate_dev_mem_ptr(p);
+ if (!ptr) {
+ if (written)
+ break;
+ return -EFAULT;
+ }
- copied = copy_from_user(ptr, buf, sz);
- unxlate_dev_mem_ptr(p, ptr);
- if (copied) {
- written += sz - copied;
- if (written)
- break;
- return -EFAULT;
+ copied = copy_from_user(ptr, buf, sz);
+ unxlate_dev_mem_ptr(p, ptr);
+ if (copied) {
+ written += sz - copied;
+ if (written)
+ break;
+ return -EFAULT;
+ }
}
buf += sz;
@@ -321,6 +343,11 @@
static int mmap_mem(struct file *file, struct vm_area_struct *vma)
{
size_t size = vma->vm_end - vma->vm_start;
+ phys_addr_t offset = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
+
+ /* It's illegal to wrap around the end of the physical address space. */
+ if (offset + (phys_addr_t)size - 1 < offset)
+ return -EINVAL;
if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
return -EINVAL;
diff --git a/drivers/char/pcmcia/cm4040_cs.c b/drivers/char/pcmcia/cm4040_cs.c
index fc061f7..a7de8ae 100644
--- a/drivers/char/pcmcia/cm4040_cs.c
+++ b/drivers/char/pcmcia/cm4040_cs.c
@@ -374,7 +374,7 @@
rc = write_sync_reg(SCR_HOST_TO_READER_START, dev);
if (rc <= 0) {
- DEBUGP(5, dev, "write_sync_reg c=%.2Zx\n", rc);
+ DEBUGP(5, dev, "write_sync_reg c=%.2zx\n", rc);
DEBUGP(2, dev, "<- cm4040_write (failed)\n");
if (rc == -ERESTARTSYS)
return rc;
@@ -387,7 +387,7 @@
for (i = 0; i < bytes_to_write; i++) {
rc = wait_for_bulk_out_ready(dev);
if (rc <= 0) {
- DEBUGP(5, dev, "wait_for_bulk_out_ready rc=%.2Zx\n",
+ DEBUGP(5, dev, "wait_for_bulk_out_ready rc=%.2zx\n",
rc);
DEBUGP(2, dev, "<- cm4040_write (failed)\n");
if (rc == -ERESTARTSYS)
@@ -403,7 +403,7 @@
rc = write_sync_reg(SCR_HOST_TO_READER_DONE, dev);
if (rc <= 0) {
- DEBUGP(5, dev, "write_sync_reg c=%.2Zx\n", rc);
+ DEBUGP(5, dev, "write_sync_reg c=%.2zx\n", rc);
DEBUGP(2, dev, "<- cm4040_write (failed)\n");
if (rc == -ERESTARTSYS)
return rc;
diff --git a/drivers/char/random.c b/drivers/char/random.c
index d93dfeb..1822472 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -1798,13 +1798,15 @@
return 0;
}
+static DEFINE_PER_CPU(__u32 [MD5_DIGEST_WORDS], get_random_int_hash)
+ __aligned(sizeof(unsigned long));
+
/*
* Get a random word for internal kernel use only. Similar to urandom but
* with the goal of minimal entropy pool depletion. As a result, the random
* value is not cryptographically secure but for several uses the cost of
* depleting entropy is too high
*/
-static DEFINE_PER_CPU(__u32 [MD5_DIGEST_WORDS], get_random_int_hash);
unsigned int get_random_int(void)
{
__u32 *hash;
diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c
index 2521425..a0d9ac6 100644
--- a/drivers/char/tpm/tpm-chip.c
+++ b/drivers/char/tpm/tpm-chip.c
@@ -29,33 +29,92 @@
#include "tpm.h"
#include "tpm_eventlog.h"
-static DECLARE_BITMAP(dev_mask, TPM_NUM_DEVICES);
-static LIST_HEAD(tpm_chip_list);
-static DEFINE_SPINLOCK(driver_lock);
+DEFINE_IDR(dev_nums_idr);
+static DEFINE_MUTEX(idr_lock);
struct class *tpm_class;
dev_t tpm_devt;
-/*
- * tpm_chip_find_get - return tpm_chip for a given chip number
- * @chip_num the device number for the chip
+/**
+ * tpm_try_get_ops() - Get a ref to the tpm_chip
+ * @chip: Chip to ref
+ *
+ * The caller must already have some kind of locking to ensure that chip is
+ * valid. This function will lock the chip so that the ops member can be
+ * accessed safely. The locking prevents tpm_chip_unregister from
+ * completing, so it should not be held for long periods.
+ *
+ * Returns -ERRNO if the chip could not be got.
*/
+int tpm_try_get_ops(struct tpm_chip *chip)
+{
+ int rc = -EIO;
+
+ get_device(&chip->dev);
+
+ down_read(&chip->ops_sem);
+ if (!chip->ops)
+ goto out_lock;
+
+ if (!try_module_get(chip->dev.parent->driver->owner))
+ goto out_lock;
+
+ return 0;
+out_lock:
+ up_read(&chip->ops_sem);
+ put_device(&chip->dev);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(tpm_try_get_ops);
+
+/**
+ * tpm_put_ops() - Release a ref to the tpm_chip
+ * @chip: Chip to put
+ *
+ * This is the opposite pair to tpm_try_get_ops(). After this returns chip may
+ * be kfree'd.
+ */
+void tpm_put_ops(struct tpm_chip *chip)
+{
+ module_put(chip->dev.parent->driver->owner);
+ up_read(&chip->ops_sem);
+ put_device(&chip->dev);
+}
+EXPORT_SYMBOL_GPL(tpm_put_ops);
+
+/**
+ * tpm_chip_find_get() - return tpm_chip for a given chip number
+ * @chip_num: id to find
+ *
+ * The return'd chip has been tpm_try_get_ops'd and must be released via
+ * tpm_put_ops
+ */
struct tpm_chip *tpm_chip_find_get(int chip_num)
{
- struct tpm_chip *pos, *chip = NULL;
+ struct tpm_chip *chip, *res = NULL;
+ int chip_prev;
- rcu_read_lock();
- list_for_each_entry_rcu(pos, &tpm_chip_list, list) {
- if (chip_num != TPM_ANY_NUM && chip_num != pos->dev_num)
- continue;
+ mutex_lock(&idr_lock);
- if (try_module_get(pos->pdev->driver->owner)) {
- chip = pos;
- break;
- }
+ if (chip_num == TPM_ANY_NUM) {
+ chip_num = 0;
+ do {
+ chip_prev = chip_num;
+ chip = idr_get_next(&dev_nums_idr, &chip_num);
+ if (chip && !tpm_try_get_ops(chip)) {
+ res = chip;
+ break;
+ }
+ } while (chip_prev != chip_num);
+ } else {
+ chip = idr_find_slowpath(&dev_nums_idr, chip_num);
+ if (chip && !tpm_try_get_ops(chip))
+ res = chip;
}
- rcu_read_unlock();
- return chip;
+
+ mutex_unlock(&idr_lock);
+
+ return res;
}
/**
@@ -68,12 +127,48 @@
{
struct tpm_chip *chip = container_of(dev, struct tpm_chip, dev);
- spin_lock(&driver_lock);
- clear_bit(chip->dev_num, dev_mask);
- spin_unlock(&driver_lock);
+ mutex_lock(&idr_lock);
+ idr_remove(&dev_nums_idr, chip->dev_num);
+ mutex_unlock(&idr_lock);
+
kfree(chip);
}
+
+/**
+ * tpm_class_shutdown() - prepare the TPM device for loss of power.
+ * @dev: device to which the chip is associated.
+ *
+ * Issues a TPM2_Shutdown command prior to loss of power, as required by the
+ * TPM 2.0 spec.
+ * Then, calls bus- and device- specific shutdown code.
+ *
+ * XXX: This codepath relies on the fact that sysfs is not enabled for
+ * TPM2: sysfs uses an implicit lock on chip->ops, so this could race if TPM2
+ * has sysfs support enabled before TPM sysfs's implicit locking is fixed.
+ */
+static int tpm_class_shutdown(struct device *dev)
+{
+ struct tpm_chip *chip = container_of(dev, struct tpm_chip, dev);
+
+ if (chip->flags & TPM_CHIP_FLAG_TPM2) {
+ down_write(&chip->ops_sem);
+ tpm2_shutdown(chip, TPM2_SU_CLEAR);
+ chip->ops = NULL;
+ up_write(&chip->ops_sem);
+ }
+ /* Allow bus- and device-specific code to run. Note: since chip->ops
+ * is NULL, more-specific shutdown code will not be able to issue TPM
+ * commands.
+ */
+ if (dev->bus && dev->bus->shutdown)
+ dev->bus->shutdown(dev);
+ else if (dev->driver && dev->driver->shutdown)
+ dev->driver->shutdown(dev);
+ return 0;
+}
+
+
/**
* tpmm_chip_alloc() - allocate a new struct tpm_chip instance
* @dev: device to which the chip is associated
@@ -88,37 +183,35 @@
const struct tpm_class_ops *ops)
{
struct tpm_chip *chip;
+ int rc;
chip = kzalloc(sizeof(*chip), GFP_KERNEL);
if (chip == NULL)
return ERR_PTR(-ENOMEM);
mutex_init(&chip->tpm_mutex);
- INIT_LIST_HEAD(&chip->list);
+ init_rwsem(&chip->ops_sem);
chip->ops = ops;
- spin_lock(&driver_lock);
- chip->dev_num = find_first_zero_bit(dev_mask, TPM_NUM_DEVICES);
- spin_unlock(&driver_lock);
-
- if (chip->dev_num >= TPM_NUM_DEVICES) {
+ mutex_lock(&idr_lock);
+ rc = idr_alloc(&dev_nums_idr, NULL, 0, TPM_NUM_DEVICES, GFP_KERNEL);
+ mutex_unlock(&idr_lock);
+ if (rc < 0) {
dev_err(dev, "No available tpm device numbers\n");
kfree(chip);
- return ERR_PTR(-ENOMEM);
+ return ERR_PTR(rc);
}
-
- set_bit(chip->dev_num, dev_mask);
+ chip->dev_num = rc;
scnprintf(chip->devname, sizeof(chip->devname), "tpm%d", chip->dev_num);
- chip->pdev = dev;
-
dev_set_drvdata(dev, chip);
chip->dev.class = tpm_class;
+ chip->dev.class->shutdown = tpm_class_shutdown;
chip->dev.release = tpm_dev_release;
- chip->dev.parent = chip->pdev;
+ chip->dev.parent = dev;
#ifdef CONFIG_ACPI
chip->dev.groups = chip->groups;
#endif
@@ -133,7 +226,7 @@
device_initialize(&chip->dev);
cdev_init(&chip->cdev, &tpm_fops);
- chip->cdev.owner = chip->pdev->driver->owner;
+ chip->cdev.owner = dev->driver->owner;
chip->cdev.kobj.parent = &chip->dev.kobj;
devm_add_action(dev, (void (*)(void *)) put_device, &chip->dev);
@@ -167,6 +260,11 @@
return rc;
}
+ /* Make the chip available. */
+ mutex_lock(&idr_lock);
+ idr_replace(&dev_nums_idr, chip, chip->dev_num);
+ mutex_unlock(&idr_lock);
+
return rc;
}
@@ -174,6 +272,16 @@
{
cdev_del(&chip->cdev);
device_del(&chip->dev);
+
+ /* Make the chip unavailable. */
+ mutex_lock(&idr_lock);
+ idr_replace(&dev_nums_idr, NULL, chip->dev_num);
+ mutex_unlock(&idr_lock);
+
+ /* Make the driver uncallable. */
+ down_write(&chip->ops_sem);
+ chip->ops = NULL;
+ up_write(&chip->ops_sem);
}
static int tpm1_chip_register(struct tpm_chip *chip)
@@ -228,17 +336,11 @@
if (rc)
goto out_err;
- /* Make the chip available. */
- spin_lock(&driver_lock);
- list_add_tail_rcu(&chip->list, &tpm_chip_list);
- spin_unlock(&driver_lock);
-
chip->flags |= TPM_CHIP_FLAG_REGISTERED;
if (!(chip->flags & TPM_CHIP_FLAG_TPM2)) {
- rc = __compat_only_sysfs_link_entry_to_kobj(&chip->pdev->kobj,
- &chip->dev.kobj,
- "ppi");
+ rc = __compat_only_sysfs_link_entry_to_kobj(
+ &chip->dev.parent->kobj, &chip->dev.kobj, "ppi");
if (rc && rc != -ENOENT) {
tpm_chip_unregister(chip);
return rc;
@@ -259,6 +361,9 @@
* Takes the chip first away from the list of available TPM chips and then
* cleans up all the resources reserved by tpm_chip_register().
*
+ * Once this function returns the driver call backs in 'op's will not be
+ * running and will no longer start.
+ *
* NOTE: This function should be only called before deinitializing chip
* resources.
*/
@@ -267,13 +372,8 @@
if (!(chip->flags & TPM_CHIP_FLAG_REGISTERED))
return;
- spin_lock(&driver_lock);
- list_del_rcu(&chip->list);
- spin_unlock(&driver_lock);
- synchronize_rcu();
-
if (!(chip->flags & TPM_CHIP_FLAG_TPM2))
- sysfs_remove_link(&chip->pdev->kobj, "ppi");
+ sysfs_remove_link(&chip->dev.parent->kobj, "ppi");
tpm1_chip_unregister(chip);
tpm_del_char_device(chip);
diff --git a/drivers/char/tpm/tpm-dev.c b/drivers/char/tpm/tpm-dev.c
index 4f3137d..912ad30 100644
--- a/drivers/char/tpm/tpm-dev.c
+++ b/drivers/char/tpm/tpm-dev.c
@@ -61,7 +61,7 @@
* by the check of is_open variable, which is protected
* by driver_lock. */
if (test_and_set_bit(0, &chip->is_open)) {
- dev_dbg(chip->pdev, "Another process owns this TPM\n");
+ dev_dbg(&chip->dev, "Another process owns this TPM\n");
return -EBUSY;
}
@@ -79,7 +79,6 @@
INIT_WORK(&priv->work, timeout_work);
file->private_data = priv;
- get_device(chip->pdev);
return 0;
}
@@ -137,9 +136,18 @@
return -EFAULT;
}
- /* atomic tpm command send and result receive */
+ /* atomic tpm command send and result receive. We only hold the ops
+ * lock during this period so that the tpm can be unregistered even if
+ * the char dev is held open.
+ */
+ if (tpm_try_get_ops(priv->chip)) {
+ mutex_unlock(&priv->buffer_mutex);
+ return -EPIPE;
+ }
out_size = tpm_transmit(priv->chip, priv->data_buffer,
sizeof(priv->data_buffer), 0);
+
+ tpm_put_ops(priv->chip);
if (out_size < 0) {
mutex_unlock(&priv->buffer_mutex);
return out_size;
@@ -166,7 +174,6 @@
file->private_data = NULL;
atomic_set(&priv->data_pending, 0);
clear_bit(0, &priv->chip->is_open);
- put_device(priv->chip->pdev);
kfree(priv);
return 0;
}
diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
index 17abe52..aaa5fa9 100644
--- a/drivers/char/tpm/tpm-interface.c
+++ b/drivers/char/tpm/tpm-interface.c
@@ -343,7 +343,7 @@
if (count == 0)
return -ENODATA;
if (count > bufsiz) {
- dev_err(chip->pdev,
+ dev_err(&chip->dev,
"invalid count value %x %zx\n", count, bufsiz);
return -E2BIG;
}
@@ -353,7 +353,7 @@
rc = chip->ops->send(chip, (u8 *) buf, count);
if (rc < 0) {
- dev_err(chip->pdev,
+ dev_err(&chip->dev,
"tpm_transmit: tpm_send: error %zd\n", rc);
goto out;
}
@@ -372,7 +372,7 @@
goto out_recv;
if (chip->ops->req_canceled(chip, status)) {
- dev_err(chip->pdev, "Operation Canceled\n");
+ dev_err(&chip->dev, "Operation Canceled\n");
rc = -ECANCELED;
goto out;
}
@@ -382,14 +382,14 @@
} while (time_before(jiffies, stop));
chip->ops->cancel(chip);
- dev_err(chip->pdev, "Operation Timed out\n");
+ dev_err(&chip->dev, "Operation Timed out\n");
rc = -ETIME;
goto out;
out_recv:
rc = chip->ops->recv(chip, (u8 *) buf, bufsiz);
if (rc < 0)
- dev_err(chip->pdev,
+ dev_err(&chip->dev,
"tpm_transmit: tpm_recv: error %zd\n", rc);
out:
if (!(flags & TPM_TRANSMIT_UNLOCKED))
@@ -416,7 +416,7 @@
err = be32_to_cpu(header->return_code);
if (err != 0 && desc)
- dev_err(chip->pdev, "A TPM error (%d) occurred %s\n", err,
+ dev_err(&chip->dev, "A TPM error (%d) occurred %s\n", err,
desc);
return err;
@@ -514,7 +514,7 @@
if (rc == TPM_ERR_INVALID_POSTINIT) {
/* The TPM is not started, we are the first to talk to it.
Execute a startup command. */
- dev_info(chip->pdev, "Issuing TPM_STARTUP");
+ dev_info(&chip->dev, "Issuing TPM_STARTUP");
if (tpm_startup(chip, TPM_ST_CLEAR))
return rc;
@@ -526,7 +526,7 @@
0, NULL);
}
if (rc) {
- dev_err(chip->pdev,
+ dev_err(&chip->dev,
"A TPM error (%zd) occurred attempting to determine the timeouts\n",
rc);
goto duration;
@@ -565,7 +565,7 @@
/* Report adjusted timeouts */
if (chip->vendor.timeout_adjusted) {
- dev_info(chip->pdev,
+ dev_info(&chip->dev,
HW_ERR "Adjusting reported timeouts: A %lu->%luus B %lu->%luus C %lu->%luus D %lu->%luus\n",
old_timeout[0], new_timeout[0],
old_timeout[1], new_timeout[1],
@@ -612,7 +612,7 @@
chip->vendor.duration[TPM_MEDIUM] *= 1000;
chip->vendor.duration[TPM_LONG] *= 1000;
chip->vendor.duration_adjusted = true;
- dev_info(chip->pdev, "Adjusting TPM timeout parameters.");
+ dev_info(&chip->dev, "Adjusting TPM timeout parameters.");
}
return 0;
}
@@ -687,7 +687,7 @@
rc = (chip->flags & TPM_CHIP_FLAG_TPM2) != 0;
- tpm_chip_put(chip);
+ tpm_put_ops(chip);
return rc;
}
@@ -716,7 +716,7 @@
rc = tpm2_pcr_read(chip, pcr_idx, res_buf);
else
rc = tpm_pcr_read_dev(chip, pcr_idx, res_buf);
- tpm_chip_put(chip);
+ tpm_put_ops(chip);
return rc;
}
EXPORT_SYMBOL_GPL(tpm_pcr_read);
@@ -751,7 +751,7 @@
if (chip->flags & TPM_CHIP_FLAG_TPM2) {
rc = tpm2_pcr_extend(chip, pcr_idx, hash);
- tpm_chip_put(chip);
+ tpm_put_ops(chip);
return rc;
}
@@ -761,7 +761,7 @@
rc = tpm_transmit_cmd(chip, &cmd, EXTEND_PCR_RESULT_SIZE, 0,
"attempting extend a PCR value");
- tpm_chip_put(chip);
+ tpm_put_ops(chip);
return rc;
}
EXPORT_SYMBOL_GPL(tpm_pcr_extend);
@@ -802,7 +802,9 @@
* around 300ms while the self test is ongoing, keep trying
* until the self test duration expires. */
if (rc == -ETIME) {
- dev_info(chip->pdev, HW_ERR "TPM command timed out during continue self test");
+ dev_info(
+ &chip->dev, HW_ERR
+ "TPM command timed out during continue self test");
msleep(delay_msec);
continue;
}
@@ -812,7 +814,7 @@
rc = be32_to_cpu(cmd.header.out.return_code);
if (rc == TPM_ERR_DISABLED || rc == TPM_ERR_DEACTIVATED) {
- dev_info(chip->pdev,
+ dev_info(&chip->dev,
"TPM is disabled/deactivated (0x%X)\n", rc);
/* TPM is disabled and/or deactivated; driver can
* proceed and TPM does handle commands for
@@ -840,7 +842,7 @@
rc = tpm_transmit_cmd(chip, cmd, buflen, 0, "attempting tpm_cmd");
- tpm_chip_put(chip);
+ tpm_put_ops(chip);
return rc;
}
EXPORT_SYMBOL_GPL(tpm_send);
@@ -966,10 +968,10 @@
}
if (rc)
- dev_err(chip->pdev,
+ dev_err(&chip->dev,
"Error (%d) sending savestate before suspend\n", rc);
else if (try > 0)
- dev_warn(chip->pdev, "TPM savestate took %dms\n",
+ dev_warn(&chip->dev, "TPM savestate took %dms\n",
try * TPM_TIMEOUT_RETRY);
return rc;
@@ -1023,7 +1025,7 @@
if (chip->flags & TPM_CHIP_FLAG_TPM2) {
err = tpm2_get_random(chip, out, max);
- tpm_chip_put(chip);
+ tpm_put_ops(chip);
return err;
}
@@ -1045,7 +1047,7 @@
num_bytes -= recd;
} while (retries-- && total < max);
- tpm_chip_put(chip);
+ tpm_put_ops(chip);
return total ? total : -EIO;
}
EXPORT_SYMBOL_GPL(tpm_get_random);
@@ -1071,7 +1073,7 @@
rc = tpm2_seal_trusted(chip, payload, options);
- tpm_chip_put(chip);
+ tpm_put_ops(chip);
return rc;
}
EXPORT_SYMBOL_GPL(tpm_seal_trusted);
@@ -1097,7 +1099,8 @@
rc = tpm2_unseal_trusted(chip, payload, options);
- tpm_chip_put(chip);
+ tpm_put_ops(chip);
+
return rc;
}
EXPORT_SYMBOL_GPL(tpm_unseal_trusted);
@@ -1124,6 +1127,7 @@
static void __exit tpm_exit(void)
{
+ idr_destroy(&dev_nums_idr);
class_destroy(tpm_class);
unregister_chrdev_region(tpm_devt, TPM_NUM_DEVICES);
}
diff --git a/drivers/char/tpm/tpm-sysfs.c b/drivers/char/tpm/tpm-sysfs.c
index f880856..06ac6e9 100644
--- a/drivers/char/tpm/tpm-sysfs.c
+++ b/drivers/char/tpm/tpm-sysfs.c
@@ -38,6 +38,8 @@
struct tpm_chip *chip = dev_get_drvdata(dev);
+ memset(&tpm_cmd, 0, sizeof(tpm_cmd));
+
tpm_cmd.header.in = tpm_readpubek_header;
err = tpm_transmit_cmd(chip, &tpm_cmd, READ_PUBEK_RESULT_SIZE, 0,
"attempting to read the PUBEK");
@@ -284,16 +286,28 @@
int tpm_sysfs_add_device(struct tpm_chip *chip)
{
int err;
- err = sysfs_create_group(&chip->pdev->kobj,
+
+ /* XXX: If you wish to remove this restriction, you must first update
+ * tpm_sysfs to explicitly lock chip->ops.
+ */
+ if (chip->flags & TPM_CHIP_FLAG_TPM2)
+ return 0;
+
+ err = sysfs_create_group(&chip->dev.parent->kobj,
&tpm_dev_group);
if (err)
- dev_err(chip->pdev,
+ dev_err(&chip->dev,
"failed to create sysfs attributes, %d\n", err);
return err;
}
void tpm_sysfs_del_device(struct tpm_chip *chip)
{
- sysfs_remove_group(&chip->pdev->kobj, &tpm_dev_group);
+ /* The sysfs routines rely on an implicit tpm_try_get_ops, this
+ * function is called before ops is null'd and the sysfs core
+ * synchronizes this removal so that no callbacks are running or can
+ * run again
+ */
+ sysfs_remove_group(&chip->dev.parent->kobj, &tpm_dev_group);
}
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
index 2216861..772d99b 100644
--- a/drivers/char/tpm/tpm.h
+++ b/drivers/char/tpm/tpm.h
@@ -34,7 +34,7 @@
enum tpm_const {
TPM_MINOR = 224, /* officially assigned */
TPM_BUFSIZE = 4096,
- TPM_NUM_DEVICES = 256,
+ TPM_NUM_DEVICES = 65536,
TPM_RETRY = 50, /* 5 seconds */
};
@@ -171,11 +171,16 @@
};
struct tpm_chip {
- struct device *pdev; /* Device stuff */
struct device dev;
struct cdev cdev;
+ /* A driver callback under ops cannot be run unless ops_sem is held
+ * (sometimes implicitly, eg for the sysfs code). ops becomes null
+ * when the driver is unregistered, see tpm_try_get_ops.
+ */
+ struct rw_semaphore ops_sem;
const struct tpm_class_ops *ops;
+
unsigned int flags;
int dev_num; /* /dev/tpm# */
@@ -195,17 +200,10 @@
acpi_handle acpi_dev_handle;
char ppi_version[TPM_PPI_VERSION_LEN + 1];
#endif /* CONFIG_ACPI */
-
- struct list_head list;
};
#define to_tpm_chip(d) container_of(d, struct tpm_chip, dev)
-static inline void tpm_chip_put(struct tpm_chip *chip)
-{
- module_put(chip->pdev->driver->owner);
-}
-
static inline int tpm_read_index(int base, int index)
{
outb(index, base);
@@ -497,6 +495,7 @@
extern struct class *tpm_class;
extern dev_t tpm_devt;
extern const struct file_operations tpm_fops;
+extern struct idr dev_nums_idr;
enum tpm_transmit_flags {
TPM_TRANSMIT_UNLOCKED = BIT(0),
@@ -517,6 +516,9 @@
wait_queue_head_t *, bool);
struct tpm_chip *tpm_chip_find_get(int chip_num);
+__must_check int tpm_try_get_ops(struct tpm_chip *chip);
+void tpm_put_ops(struct tpm_chip *chip);
+
extern struct tpm_chip *tpmm_chip_alloc(struct device *dev,
const struct tpm_class_ops *ops);
extern int tpm_chip_register(struct tpm_chip *chip);
diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c
index cb7e4f6..286bd09 100644
--- a/drivers/char/tpm/tpm2-cmd.c
+++ b/drivers/char/tpm/tpm2-cmd.c
@@ -570,7 +570,7 @@
rc = tpm_buf_init(&buf, TPM2_ST_NO_SESSIONS, TPM2_CC_FLUSH_CONTEXT);
if (rc) {
- dev_warn(chip->pdev, "0x%08x was not flushed, out of memory\n",
+ dev_warn(&chip->dev, "0x%08x was not flushed, out of memory\n",
handle);
return;
}
@@ -580,7 +580,7 @@
rc = tpm_transmit_cmd(chip, buf.data, PAGE_SIZE, flags,
"flushing context");
if (rc)
- dev_warn(chip->pdev, "0x%08x was not flushed, rc=%d\n", handle,
+ dev_warn(&chip->dev, "0x%08x was not flushed, rc=%d\n", handle,
rc);
tpm_buf_destroy(&buf);
@@ -753,7 +753,7 @@
* except print the error code on a system failure.
*/
if (rc < 0)
- dev_warn(chip->pdev, "transmit returned %d while stopping the TPM",
+ dev_warn(&chip->dev, "transmit returned %d while stopping the TPM",
rc);
}
EXPORT_SYMBOL_GPL(tpm2_shutdown);
@@ -820,7 +820,7 @@
* immediately. This is a workaround for that.
*/
if (rc == TPM2_RC_TESTING) {
- dev_warn(chip->pdev, "Got RC_TESTING, ignoring\n");
+ dev_warn(&chip->dev, "Got RC_TESTING, ignoring\n");
rc = 0;
}
diff --git a/drivers/char/tpm/tpm_atmel.c b/drivers/char/tpm/tpm_atmel.c
index dfadad0..a48a878 100644
--- a/drivers/char/tpm/tpm_atmel.c
+++ b/drivers/char/tpm/tpm_atmel.c
@@ -49,7 +49,7 @@
for (i = 0; i < 6; i++) {
status = ioread8(chip->vendor.iobase + 1);
if ((status & ATML_STATUS_DATA_AVAIL) == 0) {
- dev_err(chip->pdev, "error reading header\n");
+ dev_err(&chip->dev, "error reading header\n");
return -EIO;
}
*buf++ = ioread8(chip->vendor.iobase);
@@ -60,12 +60,12 @@
size = be32_to_cpu(*native_size);
if (count < size) {
- dev_err(chip->pdev,
+ dev_err(&chip->dev,
"Recv size(%d) less than available space\n", size);
for (; i < size; i++) { /* clear the waiting data anyway */
status = ioread8(chip->vendor.iobase + 1);
if ((status & ATML_STATUS_DATA_AVAIL) == 0) {
- dev_err(chip->pdev, "error reading data\n");
+ dev_err(&chip->dev, "error reading data\n");
return -EIO;
}
}
@@ -76,7 +76,7 @@
for (; i < size; i++) {
status = ioread8(chip->vendor.iobase + 1);
if ((status & ATML_STATUS_DATA_AVAIL) == 0) {
- dev_err(chip->pdev, "error reading data\n");
+ dev_err(&chip->dev, "error reading data\n");
return -EIO;
}
*buf++ = ioread8(chip->vendor.iobase);
@@ -86,7 +86,7 @@
status = ioread8(chip->vendor.iobase + 1);
if (status & ATML_STATUS_DATA_AVAIL) {
- dev_err(chip->pdev, "data available is stuck\n");
+ dev_err(&chip->dev, "data available is stuck\n");
return -EIO;
}
@@ -97,9 +97,9 @@
{
int i;
- dev_dbg(chip->pdev, "tpm_atml_send:\n");
+ dev_dbg(&chip->dev, "tpm_atml_send:\n");
for (i = 0; i < count; i++) {
- dev_dbg(chip->pdev, "%d 0x%x(%d)\n", i, buf[i], buf[i]);
+ dev_dbg(&chip->dev, "%d 0x%x(%d)\n", i, buf[i], buf[i]);
iowrite8(buf[i], chip->vendor.iobase);
}
diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c
index 2b21398..35308df 100644
--- a/drivers/char/tpm/tpm_crb.c
+++ b/drivers/char/tpm/tpm_crb.c
@@ -118,8 +118,7 @@
memcpy_fromio(buf, priv->rsp, 6);
expected = be32_to_cpup((__be32 *) &buf[2]);
-
- if (expected > count)
+ if (expected > count || expected < 6)
return -EIO;
memcpy_fromio(&buf[6], &priv->rsp[6], expected - 6);
diff --git a/drivers/char/tpm/tpm_i2c_atmel.c b/drivers/char/tpm/tpm_i2c_atmel.c
index 8dfb88b..dd8f0eb 100644
--- a/drivers/char/tpm/tpm_i2c_atmel.c
+++ b/drivers/char/tpm/tpm_i2c_atmel.c
@@ -52,7 +52,7 @@
static int i2c_atmel_send(struct tpm_chip *chip, u8 *buf, size_t len)
{
struct priv_data *priv = chip->vendor.priv;
- struct i2c_client *client = to_i2c_client(chip->pdev);
+ struct i2c_client *client = to_i2c_client(chip->dev.parent);
s32 status;
priv->len = 0;
@@ -62,7 +62,7 @@
status = i2c_master_send(client, buf, len);
- dev_dbg(chip->pdev,
+ dev_dbg(&chip->dev,
"%s(buf=%*ph len=%0zx) -> sts=%d\n", __func__,
(int)min_t(size_t, 64, len), buf, len, status);
return status;
@@ -71,7 +71,7 @@
static int i2c_atmel_recv(struct tpm_chip *chip, u8 *buf, size_t count)
{
struct priv_data *priv = chip->vendor.priv;
- struct i2c_client *client = to_i2c_client(chip->pdev);
+ struct i2c_client *client = to_i2c_client(chip->dev.parent);
struct tpm_output_header *hdr =
(struct tpm_output_header *)priv->buffer;
u32 expected_len;
@@ -88,7 +88,7 @@
return -ENOMEM;
if (priv->len >= expected_len) {
- dev_dbg(chip->pdev,
+ dev_dbg(&chip->dev,
"%s early(buf=%*ph count=%0zx) -> ret=%d\n", __func__,
(int)min_t(size_t, 64, expected_len), buf, count,
expected_len);
@@ -97,7 +97,7 @@
}
rc = i2c_master_recv(client, buf, expected_len);
- dev_dbg(chip->pdev,
+ dev_dbg(&chip->dev,
"%s reread(buf=%*ph count=%0zx) -> ret=%d\n", __func__,
(int)min_t(size_t, 64, expected_len), buf, count,
expected_len);
@@ -106,13 +106,13 @@
static void i2c_atmel_cancel(struct tpm_chip *chip)
{
- dev_err(chip->pdev, "TPM operation cancellation was requested, but is not supported");
+ dev_err(&chip->dev, "TPM operation cancellation was requested, but is not supported");
}
static u8 i2c_atmel_read_status(struct tpm_chip *chip)
{
struct priv_data *priv = chip->vendor.priv;
- struct i2c_client *client = to_i2c_client(chip->pdev);
+ struct i2c_client *client = to_i2c_client(chip->dev.parent);
int rc;
/* The TPM fails the I2C read until it is ready, so we do the entire
@@ -125,7 +125,7 @@
/* Once the TPM has completed the command the command remains readable
* until another command is issued. */
rc = i2c_master_recv(client, priv->buffer, sizeof(priv->buffer));
- dev_dbg(chip->pdev,
+ dev_dbg(&chip->dev,
"%s: sts=%d", __func__, rc);
if (rc <= 0)
return 0;
diff --git a/drivers/char/tpm/tpm_i2c_infineon.c b/drivers/char/tpm/tpm_i2c_infineon.c
index 63d5d22e..f2aa99e 100644
--- a/drivers/char/tpm/tpm_i2c_infineon.c
+++ b/drivers/char/tpm/tpm_i2c_infineon.c
@@ -446,7 +446,7 @@
/* read first 10 bytes, including tag, paramsize, and result */
size = recv_data(chip, buf, TPM_HEADER_SIZE);
if (size < TPM_HEADER_SIZE) {
- dev_err(chip->pdev, "Unable to read header\n");
+ dev_err(&chip->dev, "Unable to read header\n");
goto out;
}
@@ -459,14 +459,14 @@
size += recv_data(chip, &buf[TPM_HEADER_SIZE],
expected - TPM_HEADER_SIZE);
if (size < expected) {
- dev_err(chip->pdev, "Unable to read remainder of result\n");
+ dev_err(&chip->dev, "Unable to read remainder of result\n");
size = -ETIME;
goto out;
}
wait_for_stat(chip, TPM_STS_VALID, chip->vendor.timeout_c, &status);
if (status & TPM_STS_DATA_AVAIL) { /* retry? */
- dev_err(chip->pdev, "Error left over data\n");
+ dev_err(&chip->dev, "Error left over data\n");
size = -EIO;
goto out;
}
diff --git a/drivers/char/tpm/tpm_i2c_nuvoton.c b/drivers/char/tpm/tpm_i2c_nuvoton.c
index 847f159..a1e1474 100644
--- a/drivers/char/tpm/tpm_i2c_nuvoton.c
+++ b/drivers/char/tpm/tpm_i2c_nuvoton.c
@@ -96,13 +96,13 @@
/* read TPM_STS register */
static u8 i2c_nuvoton_read_status(struct tpm_chip *chip)
{
- struct i2c_client *client = to_i2c_client(chip->pdev);
+ struct i2c_client *client = to_i2c_client(chip->dev.parent);
s32 status;
u8 data;
status = i2c_nuvoton_read_buf(client, TPM_STS, 1, &data);
if (status <= 0) {
- dev_err(chip->pdev, "%s() error return %d\n", __func__,
+ dev_err(&chip->dev, "%s() error return %d\n", __func__,
status);
data = TPM_STS_ERR_VAL;
}
@@ -127,13 +127,13 @@
/* write commandReady to TPM_STS register */
static void i2c_nuvoton_ready(struct tpm_chip *chip)
{
- struct i2c_client *client = to_i2c_client(chip->pdev);
+ struct i2c_client *client = to_i2c_client(chip->dev.parent);
s32 status;
/* this causes the current command to be aborted */
status = i2c_nuvoton_write_status(client, TPM_STS_COMMAND_READY);
if (status < 0)
- dev_err(chip->pdev,
+ dev_err(&chip->dev,
"%s() fail to write TPM_STS.commandReady\n", __func__);
}
@@ -212,7 +212,7 @@
return 0;
} while (time_before(jiffies, stop));
}
- dev_err(chip->pdev, "%s(%02x, %02x) -> timeout\n", __func__, mask,
+ dev_err(&chip->dev, "%s(%02x, %02x) -> timeout\n", __func__, mask,
value);
return -ETIMEDOUT;
}
@@ -240,7 +240,7 @@
&chip->vendor.read_queue) == 0) {
burst_count = i2c_nuvoton_get_burstcount(client, chip);
if (burst_count < 0) {
- dev_err(chip->pdev,
+ dev_err(&chip->dev,
"%s() fail to read burstCount=%d\n", __func__,
burst_count);
return -EIO;
@@ -249,12 +249,12 @@
rc = i2c_nuvoton_read_buf(client, TPM_DATA_FIFO_R,
bytes2read, &buf[size]);
if (rc < 0) {
- dev_err(chip->pdev,
+ dev_err(&chip->dev,
"%s() fail on i2c_nuvoton_read_buf()=%d\n",
__func__, rc);
return -EIO;
}
- dev_dbg(chip->pdev, "%s(%d):", __func__, bytes2read);
+ dev_dbg(&chip->dev, "%s(%d):", __func__, bytes2read);
size += bytes2read;
}
@@ -264,7 +264,7 @@
/* Read TPM command results */
static int i2c_nuvoton_recv(struct tpm_chip *chip, u8 *buf, size_t count)
{
- struct device *dev = chip->pdev;
+ struct device *dev = chip->dev.parent;
struct i2c_client *client = to_i2c_client(dev);
s32 rc;
int expected, status, burst_count, retries, size = 0;
@@ -334,7 +334,7 @@
break;
}
i2c_nuvoton_ready(chip);
- dev_dbg(chip->pdev, "%s() -> %d\n", __func__, size);
+ dev_dbg(&chip->dev, "%s() -> %d\n", __func__, size);
return size;
}
@@ -347,7 +347,7 @@
*/
static int i2c_nuvoton_send(struct tpm_chip *chip, u8 *buf, size_t len)
{
- struct device *dev = chip->pdev;
+ struct device *dev = chip->dev.parent;
struct i2c_client *client = to_i2c_client(dev);
u32 ordinal;
size_t count = 0;
diff --git a/drivers/char/tpm/tpm_infineon.c b/drivers/char/tpm/tpm_infineon.c
index 6c488e6..e3cf9f3 100644
--- a/drivers/char/tpm/tpm_infineon.c
+++ b/drivers/char/tpm/tpm_infineon.c
@@ -195,9 +195,9 @@
}
if (i == TPM_MAX_TRIES) { /* timeout occurs */
if (wait_for_bit == STAT_XFE)
- dev_err(chip->pdev, "Timeout in wait(STAT_XFE)\n");
+ dev_err(&chip->dev, "Timeout in wait(STAT_XFE)\n");
if (wait_for_bit == STAT_RDA)
- dev_err(chip->pdev, "Timeout in wait(STAT_RDA)\n");
+ dev_err(&chip->dev, "Timeout in wait(STAT_RDA)\n");
return -EIO;
}
return 0;
@@ -220,7 +220,7 @@
static void tpm_wtx(struct tpm_chip *chip)
{
number_of_wtx++;
- dev_info(chip->pdev, "Granting WTX (%02d / %02d)\n",
+ dev_info(&chip->dev, "Granting WTX (%02d / %02d)\n",
number_of_wtx, TPM_MAX_WTX_PACKAGES);
wait_and_send(chip, TPM_VL_VER);
wait_and_send(chip, TPM_CTRL_WTX);
@@ -231,7 +231,7 @@
static void tpm_wtx_abort(struct tpm_chip *chip)
{
- dev_info(chip->pdev, "Aborting WTX\n");
+ dev_info(&chip->dev, "Aborting WTX\n");
wait_and_send(chip, TPM_VL_VER);
wait_and_send(chip, TPM_CTRL_WTX_ABORT);
wait_and_send(chip, 0x00);
@@ -257,7 +257,7 @@
}
if (buf[0] != TPM_VL_VER) {
- dev_err(chip->pdev,
+ dev_err(&chip->dev,
"Wrong transport protocol implementation!\n");
return -EIO;
}
@@ -272,7 +272,7 @@
}
if ((size == 0x6D00) && (buf[1] == 0x80)) {
- dev_err(chip->pdev, "Error handling on vendor layer!\n");
+ dev_err(&chip->dev, "Error handling on vendor layer!\n");
return -EIO;
}
@@ -284,7 +284,7 @@
}
if (buf[1] == TPM_CTRL_WTX) {
- dev_info(chip->pdev, "WTX-package received\n");
+ dev_info(&chip->dev, "WTX-package received\n");
if (number_of_wtx < TPM_MAX_WTX_PACKAGES) {
tpm_wtx(chip);
goto recv_begin;
@@ -295,14 +295,14 @@
}
if (buf[1] == TPM_CTRL_WTX_ABORT_ACK) {
- dev_info(chip->pdev, "WTX-abort acknowledged\n");
+ dev_info(&chip->dev, "WTX-abort acknowledged\n");
return size;
}
if (buf[1] == TPM_CTRL_ERROR) {
- dev_err(chip->pdev, "ERROR-package received:\n");
+ dev_err(&chip->dev, "ERROR-package received:\n");
if (buf[4] == TPM_INF_NAK)
- dev_err(chip->pdev,
+ dev_err(&chip->dev,
"-> Negative acknowledgement"
" - retransmit command!\n");
return -EIO;
@@ -321,7 +321,7 @@
ret = empty_fifo(chip, 1);
if (ret) {
- dev_err(chip->pdev, "Timeout while clearing FIFO\n");
+ dev_err(&chip->dev, "Timeout while clearing FIFO\n");
return -EIO;
}
diff --git a/drivers/char/tpm/tpm_nsc.c b/drivers/char/tpm/tpm_nsc.c
index 289389e..766370b 100644
--- a/drivers/char/tpm/tpm_nsc.c
+++ b/drivers/char/tpm/tpm_nsc.c
@@ -113,7 +113,7 @@
}
while (time_before(jiffies, stop));
- dev_info(chip->pdev, "wait for ready failed\n");
+ dev_info(&chip->dev, "wait for ready failed\n");
return -EBUSY;
}
@@ -129,12 +129,12 @@
return -EIO;
if (wait_for_stat(chip, NSC_STATUS_F0, NSC_STATUS_F0, &data) < 0) {
- dev_err(chip->pdev, "F0 timeout\n");
+ dev_err(&chip->dev, "F0 timeout\n");
return -EIO;
}
if ((data =
inb(chip->vendor.base + NSC_DATA)) != NSC_COMMAND_NORMAL) {
- dev_err(chip->pdev, "not in normal mode (0x%x)\n",
+ dev_err(&chip->dev, "not in normal mode (0x%x)\n",
data);
return -EIO;
}
@@ -143,7 +143,7 @@
for (p = buffer; p < &buffer[count]; p++) {
if (wait_for_stat
(chip, NSC_STATUS_OBF, NSC_STATUS_OBF, &data) < 0) {
- dev_err(chip->pdev,
+ dev_err(&chip->dev,
"OBF timeout (while reading data)\n");
return -EIO;
}
@@ -154,11 +154,11 @@
if ((data & NSC_STATUS_F0) == 0 &&
(wait_for_stat(chip, NSC_STATUS_F0, NSC_STATUS_F0, &data) < 0)) {
- dev_err(chip->pdev, "F0 not set\n");
+ dev_err(&chip->dev, "F0 not set\n");
return -EIO;
}
if ((data = inb(chip->vendor.base + NSC_DATA)) != NSC_COMMAND_EOC) {
- dev_err(chip->pdev,
+ dev_err(&chip->dev,
"expected end of command(0x%x)\n", data);
return -EIO;
}
@@ -189,19 +189,19 @@
return -EIO;
if (wait_for_stat(chip, NSC_STATUS_IBF, 0, &data) < 0) {
- dev_err(chip->pdev, "IBF timeout\n");
+ dev_err(&chip->dev, "IBF timeout\n");
return -EIO;
}
outb(NSC_COMMAND_NORMAL, chip->vendor.base + NSC_COMMAND);
if (wait_for_stat(chip, NSC_STATUS_IBR, NSC_STATUS_IBR, &data) < 0) {
- dev_err(chip->pdev, "IBR timeout\n");
+ dev_err(&chip->dev, "IBR timeout\n");
return -EIO;
}
for (i = 0; i < count; i++) {
if (wait_for_stat(chip, NSC_STATUS_IBF, 0, &data) < 0) {
- dev_err(chip->pdev,
+ dev_err(&chip->dev,
"IBF timeout (while writing data)\n");
return -EIO;
}
@@ -209,7 +209,7 @@
}
if (wait_for_stat(chip, NSC_STATUS_IBF, 0, &data) < 0) {
- dev_err(chip->pdev, "IBF timeout\n");
+ dev_err(&chip->dev, "IBF timeout\n");
return -EIO;
}
outb(NSC_COMMAND_EOC, chip->vendor.base + NSC_COMMAND);
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
index 65f7eec..7f13221a 100644
--- a/drivers/char/tpm/tpm_tis.c
+++ b/drivers/char/tpm/tpm_tis.c
@@ -293,7 +293,7 @@
/* read first 10 bytes, including tag, paramsize, and result */
if ((size =
recv_data(chip, buf, TPM_HEADER_SIZE)) < TPM_HEADER_SIZE) {
- dev_err(chip->pdev, "Unable to read header\n");
+ dev_err(&chip->dev, "Unable to read header\n");
goto out;
}
@@ -306,7 +306,7 @@
if ((size +=
recv_data(chip, &buf[TPM_HEADER_SIZE],
expected - TPM_HEADER_SIZE)) < expected) {
- dev_err(chip->pdev, "Unable to read remainder of result\n");
+ dev_err(&chip->dev, "Unable to read remainder of result\n");
size = -ETIME;
goto out;
}
@@ -315,7 +315,7 @@
&chip->vendor.int_queue, false);
status = tpm_tis_status(chip);
if (status & TPM_STS_DATA_AVAIL) { /* retry? */
- dev_err(chip->pdev, "Error left over data\n");
+ dev_err(&chip->dev, "Error left over data\n");
size = -EIO;
goto out;
}
@@ -401,7 +401,7 @@
iowrite32(intmask,
chip->vendor.iobase +
TPM_INT_ENABLE(chip->vendor.locality));
- free_irq(chip->vendor.irq, chip);
+ devm_free_irq(&chip->dev, chip->vendor.irq, chip);
chip->vendor.irq = 0;
}
@@ -463,7 +463,7 @@
msleep(1);
if (!priv->irq_tested) {
disable_interrupts(chip);
- dev_err(chip->pdev,
+ dev_err(&chip->dev,
FW_BUG "TPM interrupt not working, polling instead\n");
}
priv->irq_tested = true;
@@ -533,7 +533,7 @@
rc = tpm_tis_send_data(chip, cmd_getticks, len);
if (rc == 0) {
- dev_info(chip->pdev, "Detected an iTPM.\n");
+ dev_info(&chip->dev, "Detected an iTPM.\n");
rc = 1;
} else
rc = -EFAULT;
@@ -766,7 +766,7 @@
if (devm_request_irq
(dev, i, tis_int_probe, IRQF_SHARED,
chip->devname, chip) != 0) {
- dev_info(chip->pdev,
+ dev_info(&chip->dev,
"Unable to request irq: %d for probe\n",
i);
continue;
@@ -818,7 +818,7 @@
if (devm_request_irq
(dev, chip->vendor.irq, tis_int_handler, IRQF_SHARED,
chip->devname, chip) != 0) {
- dev_info(chip->pdev,
+ dev_info(&chip->dev,
"Unable to request irq: %d for use\n",
chip->vendor.irq);
chip->vendor.irq = 0;
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 090183f..be0b09a 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -1130,6 +1130,8 @@
{
struct port *port;
struct scatterlist sg[1];
+ void *data;
+ int ret;
if (unlikely(early_put_chars))
return early_put_chars(vtermno, buf, count);
@@ -1138,8 +1140,14 @@
if (!port)
return -EPIPE;
- sg_init_one(sg, buf, count);
- return __send_to_port(port, sg, 1, count, (void *)buf, false);
+ data = kmemdup(buf, count, GFP_ATOMIC);
+ if (!data)
+ return -ENOMEM;
+
+ sg_init_one(sg, data, count);
+ ret = __send_to_port(port, sg, 1, count, data, false);
+ kfree(data);
+ return ret;
}
/*
@@ -1856,7 +1864,7 @@
{
struct ports_device *portdev;
- portdev = container_of(work, struct ports_device, control_work);
+ portdev = container_of(work, struct ports_device, config_work);
if (!use_multiport(portdev)) {
struct virtio_device *vdev;
struct port *port;
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
index 37f67c7..0082b30 100644
--- a/drivers/clk/Makefile
+++ b/drivers/clk/Makefile
@@ -78,7 +78,9 @@
obj-$(CONFIG_ARCH_OMAP2PLUS) += ti/
obj-$(CONFIG_ARCH_U8500) += ux500/
obj-$(CONFIG_COMMON_CLK_VERSATILE) += versatile/
+ifeq ($(CONFIG_COMMON_CLK), y)
obj-$(CONFIG_X86) += x86/
+endif
obj-$(CONFIG_ARCH_ZX) += zte/
obj-$(CONFIG_ARCH_ZYNQ) += zynq/
obj-$(CONFIG_H8300) += h8300/
diff --git a/drivers/clk/sunxi/clk-simple-gates.c b/drivers/clk/sunxi/clk-simple-gates.c
index 0214c65..97cb422 100644
--- a/drivers/clk/sunxi/clk-simple-gates.c
+++ b/drivers/clk/sunxi/clk-simple-gates.c
@@ -98,6 +98,8 @@
sunxi_simple_gates_setup(node, NULL, 0);
}
+CLK_OF_DECLARE(sun4i_a10_gates, "allwinner,sun4i-a10-gates-clk",
+ sunxi_simple_gates_init);
CLK_OF_DECLARE(sun4i_a10_apb0, "allwinner,sun4i-a10-apb0-gates-clk",
sunxi_simple_gates_init);
CLK_OF_DECLARE(sun4i_a10_apb1, "allwinner,sun4i-a10-apb1-gates-clk",
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index d4b2b87..ac94a80 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -730,9 +730,11 @@
char *buf)
{
unsigned int cur_freq = __cpufreq_get(policy);
- if (!cur_freq)
- return sprintf(buf, "<unknown>");
- return sprintf(buf, "%u\n", cur_freq);
+
+ if (cur_freq)
+ return sprintf(buf, "%u\n", cur_freq);
+
+ return sprintf(buf, "<unknown>\n");
}
/**
@@ -2568,6 +2570,7 @@
if (!(cpufreq_driver->flags & CPUFREQ_STICKY) &&
list_empty(&cpufreq_policy_list)) {
/* if all ->init() calls failed, unregister */
+ ret = -ENODEV;
pr_debug("%s: No CPU initialized for driver %s\n", __func__,
driver_data->name);
goto err_if_unreg;
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index 1fa1deb..c395f91 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -212,8 +212,8 @@
int ret;
ret = sscanf(buf, "%u", &input);
- /* cannot be lower than 11 otherwise freq will not fall */
- if (ret != 1 || input < 11 || input > 100 ||
+ /* cannot be lower than 1 otherwise freq will not fall */
+ if (ret != 1 || input < 1 || input > 100 ||
input >= cs_tuners->up_threshold)
return -EINVAL;
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
index 1be58ff..f7d75be 100644
--- a/drivers/cpufreq/cpufreq_stats.c
+++ b/drivers/cpufreq/cpufreq_stats.c
@@ -9,38 +9,269 @@
* published by the Free Software Foundation.
*/
+#include <linux/atomic.h>
#include <linux/cpu.h>
#include <linux/cpufreq.h>
-#include <linux/module.h>
-#include <linux/slab.h>
#include <linux/cputime.h>
+#include <linux/hashtable.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/proc_fs.h>
+#include <linux/profile.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/sort.h>
+
+#define UID_HASH_BITS 10
+
+DECLARE_HASHTABLE(uid_hash_table, UID_HASH_BITS);
static spinlock_t cpufreq_stats_lock;
+static DEFINE_SPINLOCK(task_time_in_state_lock); /* task->time_in_state */
+static DEFINE_RT_MUTEX(uid_lock); /* uid_hash_table */
+
+struct uid_entry {
+ uid_t uid;
+ unsigned int dead_max_state;
+ unsigned int alive_max_state;
+ u64 *dead_time_in_state;
+ u64 *alive_time_in_state;
+ struct hlist_node hash;
+};
+
struct cpufreq_stats {
unsigned int total_trans;
unsigned long long last_time;
unsigned int max_state;
- unsigned int state_num;
- unsigned int last_index;
+ int prev_states;
+ atomic_t curr_state;
u64 *time_in_state;
unsigned int *freq_table;
-#ifdef CONFIG_CPU_FREQ_STAT_DETAILS
- unsigned int *trans_table;
-#endif
};
+static int cpufreq_max_state;
+static int cpufreq_last_max_state;
+static unsigned int *cpufreq_states;
+static bool cpufreq_stats_initialized;
+
+struct cpufreq_stats_attribute {
+ struct attribute attr;
+
+ ssize_t (*show)(struct cpufreq_stats *, char *);
+};
+
+/* Caller must hold uid lock */
+static struct uid_entry *find_uid_entry(uid_t uid)
+{
+ struct uid_entry *uid_entry;
+
+ hash_for_each_possible(uid_hash_table, uid_entry, hash, uid) {
+ if (uid_entry->uid == uid)
+ return uid_entry;
+ }
+ return NULL;
+}
+
+/* Caller must hold uid lock */
+static struct uid_entry *find_or_register_uid(uid_t uid)
+{
+ struct uid_entry *uid_entry;
+
+ uid_entry = find_uid_entry(uid);
+ if (uid_entry)
+ return uid_entry;
+
+ uid_entry = kzalloc(sizeof(struct uid_entry), GFP_ATOMIC);
+ if (!uid_entry)
+ return NULL;
+
+ uid_entry->uid = uid;
+
+ hash_add(uid_hash_table, &uid_entry->hash, uid);
+
+ return uid_entry;
+}
+
+
+static int uid_time_in_state_show(struct seq_file *m, void *v)
+{
+ struct uid_entry *uid_entry;
+ struct task_struct *task, *temp;
+ unsigned long bkt, flags;
+ struct cpufreq_policy *last_policy = NULL;
+ int i;
+
+ if (!cpufreq_stats_initialized)
+ return 0;
+
+ seq_puts(m, "uid:");
+ for_each_possible_cpu(i) {
+ struct cpufreq_frequency_table *table, *pos;
+ struct cpufreq_policy *policy;
+
+ policy = cpufreq_cpu_get(i);
+ if (!policy)
+ continue;
+ table = cpufreq_frequency_get_table(i);
+
+ /* Assumes cpus are colocated within a policy */
+ if (table && last_policy != policy) {
+ last_policy = policy;
+ cpufreq_for_each_valid_entry(pos, table)
+ seq_printf(m, " %d", pos->frequency);
+ }
+ cpufreq_cpu_put(policy);
+ }
+ seq_putc(m, '\n');
+
+ rt_mutex_lock(&uid_lock);
+
+ rcu_read_lock();
+ do_each_thread(temp, task) {
+
+ uid_entry = find_or_register_uid(from_kuid_munged(
+ current_user_ns(), task_uid(task)));
+ if (!uid_entry)
+ continue;
+
+ if (uid_entry->alive_max_state < task->max_state) {
+ uid_entry->alive_time_in_state = krealloc(
+ uid_entry->alive_time_in_state,
+ task->max_state *
+ sizeof(uid_entry->alive_time_in_state[0]),
+ GFP_ATOMIC);
+ memset(uid_entry->alive_time_in_state +
+ uid_entry->alive_max_state,
+ 0, (task->max_state -
+ uid_entry->alive_max_state) *
+ sizeof(uid_entry->alive_time_in_state[0]));
+ uid_entry->alive_max_state = task->max_state;
+ }
+
+ spin_lock_irqsave(&task_time_in_state_lock, flags);
+ if (task->time_in_state) {
+ for (i = 0; i < task->max_state; ++i) {
+ uid_entry->alive_time_in_state[i] +=
+ atomic_read(&task->time_in_state[i]);
+ }
+ }
+ spin_unlock_irqrestore(&task_time_in_state_lock, flags);
+
+ } while_each_thread(temp, task);
+ rcu_read_unlock();
+
+ hash_for_each(uid_hash_table, bkt, uid_entry, hash) {
+ int max_state = uid_entry->dead_max_state;
+
+ if (uid_entry->alive_max_state > max_state)
+ max_state = uid_entry->alive_max_state;
+ if (max_state)
+ seq_printf(m, "%d:", uid_entry->uid);
+ for (i = 0; i < max_state; ++i) {
+ u64 total_time_in_state = 0;
+
+ if (uid_entry->dead_time_in_state &&
+ i < uid_entry->dead_max_state) {
+ total_time_in_state =
+ uid_entry->dead_time_in_state[i];
+ }
+ if (uid_entry->alive_time_in_state &&
+ i < uid_entry->alive_max_state) {
+ total_time_in_state +=
+ uid_entry->alive_time_in_state[i];
+ }
+ seq_printf(m, " %lu", (unsigned long)
+ cputime_to_clock_t(total_time_in_state));
+ }
+ if (max_state)
+ seq_putc(m, '\n');
+
+ kfree(uid_entry->alive_time_in_state);
+ uid_entry->alive_time_in_state = NULL;
+ uid_entry->alive_max_state = 0;
+ }
+
+ rt_mutex_unlock(&uid_lock);
+ return 0;
+}
+
static int cpufreq_stats_update(struct cpufreq_stats *stats)
{
unsigned long long cur_time = get_jiffies_64();
spin_lock(&cpufreq_stats_lock);
- stats->time_in_state[stats->last_index] += cur_time - stats->last_time;
+ stats->time_in_state[atomic_read(&stats->curr_state)] +=
+ cur_time - stats->last_time;
stats->last_time = cur_time;
spin_unlock(&cpufreq_stats_lock);
return 0;
}
+void cpufreq_task_stats_init(struct task_struct *p)
+{
+ size_t alloc_size;
+ void *temp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&task_time_in_state_lock, flags);
+ p->time_in_state = NULL;
+ spin_unlock_irqrestore(&task_time_in_state_lock, flags);
+ WRITE_ONCE(p->max_state, 0);
+
+ if (!cpufreq_stats_initialized)
+ return;
+
+ /* We use one array to avoid multiple allocs per task */
+ WRITE_ONCE(p->max_state, cpufreq_max_state);
+
+ alloc_size = p->max_state * sizeof(p->time_in_state[0]);
+ temp = kzalloc(alloc_size, GFP_ATOMIC);
+
+ spin_lock_irqsave(&task_time_in_state_lock, flags);
+ p->time_in_state = temp;
+ spin_unlock_irqrestore(&task_time_in_state_lock, flags);
+}
+
+void cpufreq_task_stats_exit(struct task_struct *p)
+{
+ unsigned long flags;
+ void *temp;
+
+ spin_lock_irqsave(&task_time_in_state_lock, flags);
+ temp = p->time_in_state;
+ p->time_in_state = NULL;
+ spin_unlock_irqrestore(&task_time_in_state_lock, flags);
+ kfree(temp);
+}
+
+int proc_time_in_state_show(struct seq_file *m, struct pid_namespace *ns,
+ struct pid *pid, struct task_struct *p)
+{
+ int i;
+ cputime_t cputime;
+ unsigned long flags;
+
+ if (!cpufreq_stats_initialized || !p->time_in_state)
+ return 0;
+
+ spin_lock(&cpufreq_stats_lock);
+ for (i = 0; i < p->max_state; ++i) {
+ cputime = 0;
+ spin_lock_irqsave(&task_time_in_state_lock, flags);
+ if (p->time_in_state)
+ cputime = atomic_read(&p->time_in_state[i]);
+ spin_unlock_irqrestore(&task_time_in_state_lock, flags);
+
+ seq_printf(m, "%d %lu\n", cpufreq_states[i],
+ (unsigned long)cputime_to_clock_t(cputime));
+ }
+ spin_unlock(&cpufreq_stats_lock);
+
+ return 0;
+}
+
static ssize_t show_total_trans(struct cpufreq_policy *policy, char *buf)
{
return sprintf(buf, "%d\n", policy->stats->total_trans);
@@ -53,7 +284,7 @@
int i;
cpufreq_stats_update(stats);
- for (i = 0; i < stats->state_num; i++) {
+ for (i = 0; i < stats->max_state; i++) {
len += sprintf(buf + len, "%u %llu\n", stats->freq_table[i],
(unsigned long long)
jiffies_64_to_clock_t(stats->time_in_state[i]));
@@ -61,49 +292,95 @@
return len;
}
-#ifdef CONFIG_CPU_FREQ_STAT_DETAILS
-static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf)
+/* Called without cpufreq_stats_lock held */
+void acct_update_power(struct task_struct *task, cputime_t cputime)
{
- struct cpufreq_stats *stats = policy->stats;
+ struct cpufreq_stats *stats;
+ struct cpufreq_policy *policy;
+ unsigned int cpu_num;
+ unsigned int state;
+ unsigned long flags;
+
+ if (!task)
+ return;
+
+ cpu_num = task_cpu(task);
+ policy = cpufreq_cpu_get(cpu_num);
+ if (!policy)
+ return;
+
+ stats = policy->stats;
+ if (!stats) {
+ cpufreq_cpu_put(policy);
+ return;
+ }
+
+ state = stats->prev_states + atomic_read(&policy->stats->curr_state);
+
+ /* This function is called from a different context
+ * Interruptions in between reads/assignements are ok
+ */
+ if (cpufreq_stats_initialized &&
+ !(task->flags & PF_EXITING) &&
+ state < READ_ONCE(task->max_state)) {
+ spin_lock_irqsave(&task_time_in_state_lock, flags);
+ if (task->time_in_state)
+ atomic64_add(cputime, &task->time_in_state[state]);
+ spin_unlock_irqrestore(&task_time_in_state_lock, flags);
+ }
+ cpufreq_cpu_put(policy);
+
+}
+EXPORT_SYMBOL_GPL(acct_update_power);
+
+static ssize_t show_all_time_in_state(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
ssize_t len = 0;
- int i, j;
+ unsigned int i, cpu, freq;
+ struct cpufreq_policy *policy;
+ struct cpufreq_stats *stats;
- len += snprintf(buf + len, PAGE_SIZE - len, " From : To\n");
- len += snprintf(buf + len, PAGE_SIZE - len, " : ");
- for (i = 0; i < stats->state_num; i++) {
- if (len >= PAGE_SIZE)
- break;
- len += snprintf(buf + len, PAGE_SIZE - len, "%9u ",
- stats->freq_table[i]);
+ len += scnprintf(buf + len, PAGE_SIZE - len, "freq\t\t");
+ for_each_possible_cpu(cpu) {
+ policy = cpufreq_cpu_get(cpu);
+ if (!policy)
+ continue;
+ stats = policy->stats;
+ len += scnprintf(buf + len, PAGE_SIZE - len, "cpu%u\t\t", cpu);
+ cpufreq_stats_update(stats);
+ cpufreq_cpu_put(policy);
}
- if (len >= PAGE_SIZE)
- return PAGE_SIZE;
- len += snprintf(buf + len, PAGE_SIZE - len, "\n");
-
- for (i = 0; i < stats->state_num; i++) {
- if (len >= PAGE_SIZE)
- break;
-
- len += snprintf(buf + len, PAGE_SIZE - len, "%9u: ",
- stats->freq_table[i]);
-
- for (j = 0; j < stats->state_num; j++) {
- if (len >= PAGE_SIZE)
- break;
- len += snprintf(buf + len, PAGE_SIZE - len, "%9u ",
- stats->trans_table[i*stats->max_state+j]);
+ if (!cpufreq_stats_initialized)
+ goto out;
+ for (i = 0; i < cpufreq_max_state; i++) {
+ freq = cpufreq_states[i];
+ len += scnprintf(buf + len, PAGE_SIZE - len, "\n%u\t\t", freq);
+ for_each_possible_cpu(cpu) {
+ policy = cpufreq_cpu_get(cpu);
+ if (!policy)
+ continue;
+ stats = policy->stats;
+ if (i >= stats->prev_states &&
+ i < stats->prev_states + stats->max_state) {
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "%lu\t\t", (unsigned long)
+ cputime64_to_clock_t(
+ stats->time_in_state[i -
+ stats->prev_states]));
+ } else {
+ len += scnprintf(buf + len, PAGE_SIZE - len,
+ "N/A\t\t");
+ }
+ cpufreq_cpu_put(policy);
}
- if (len >= PAGE_SIZE)
- break;
- len += snprintf(buf + len, PAGE_SIZE - len, "\n");
}
- if (len >= PAGE_SIZE)
- return PAGE_SIZE;
+
+out:
+ len += scnprintf(buf + len, PAGE_SIZE - len, "\n");
return len;
}
-cpufreq_freq_attr_ro(trans_table);
-#endif
cpufreq_freq_attr_ro(total_trans);
cpufreq_freq_attr_ro(time_in_state);
@@ -111,9 +388,6 @@
static struct attribute *default_attrs[] = {
&total_trans.attr,
&time_in_state.attr,
-#ifdef CONFIG_CPU_FREQ_STAT_DETAILS
- &trans_table.attr,
-#endif
NULL
};
static struct attribute_group stats_attr_group = {
@@ -121,6 +395,10 @@
.name = "stats"
};
+static struct kobj_attribute _attr_all_time_in_state = __ATTR(all_time_in_state,
+ 0444, show_all_time_in_state, NULL);
+
+
static int freq_table_get_index(struct cpufreq_stats *stats, unsigned int freq)
{
int index;
@@ -134,7 +412,6 @@
{
struct cpufreq_stats *stats = policy->stats;
- /* Already freed */
if (!stats)
return;
@@ -144,6 +421,7 @@
kfree(stats->time_in_state);
kfree(stats);
policy->stats = NULL;
+ /* cpufreq_last_max_state is always incrementing, not changed here */
}
static void cpufreq_stats_free_table(unsigned int cpu)
@@ -154,103 +432,144 @@
if (!policy)
return;
- __cpufreq_stats_free_table(policy);
+ if (cpufreq_frequency_get_table(policy->cpu))
+ __cpufreq_stats_free_table(policy);
cpufreq_cpu_put(policy);
}
-static int __cpufreq_stats_create_table(struct cpufreq_policy *policy)
+
+static int cpufreq_stats_create_all_table(void)
{
- unsigned int i = 0, count = 0, ret = -ENOMEM;
+ struct cpufreq_policy *last_policy = NULL;
+ struct cpufreq_policy *policy;
struct cpufreq_stats *stats;
- unsigned int alloc_size;
- unsigned int cpu = policy->cpu;
- struct cpufreq_frequency_table *pos, *table;
+ int cpu, i;
- /* We need cpufreq table for creating stats table */
- table = cpufreq_frequency_get_table(cpu);
- if (unlikely(!table))
- return 0;
-
- /* stats already initialized */
- if (policy->stats)
- return -EEXIST;
-
- stats = kzalloc(sizeof(*stats), GFP_KERNEL);
- if (!stats)
+ cpufreq_states = kcalloc(cpufreq_max_state, sizeof(unsigned int),
+ GFP_KERNEL);
+ if (cpufreq_states == NULL)
return -ENOMEM;
- /* Find total allocation size */
- cpufreq_for_each_valid_entry(pos, table)
- count++;
+ for_each_possible_cpu(cpu) {
+ policy = cpufreq_cpu_get(cpu);
+ if (!policy)
+ continue;
+ stats = policy->stats;
+ if (policy != last_policy) {
+ for (i = 0; i < stats->max_state; ++i)
+ cpufreq_states[stats->prev_states + i]
+ = stats->freq_table[i];
+ last_policy = policy;
+ }
+ cpufreq_cpu_put(policy);
+ }
+ return 0;
+}
- alloc_size = count * sizeof(int) + count * sizeof(u64);
+static int __cpufreq_stats_create_table(struct cpufreq_policy *policy,
+ struct cpufreq_frequency_table *table, int count)
+{
+ unsigned int i, ret = 0;
+ struct cpufreq_stats *stats;
+ unsigned int alloc_size;
+ struct cpufreq_frequency_table *pos;
-#ifdef CONFIG_CPU_FREQ_STAT_DETAILS
- alloc_size += count * count * sizeof(int);
-#endif
+ if (policy->stats)
+ return -EBUSY;
- /* Allocate memory for time_in_state/freq_table/trans_table in one go */
+ stats = kzalloc(sizeof(*stats), GFP_KERNEL);
+ if (stats == NULL)
+ return -ENOMEM;
+
+ ret = sysfs_create_group(&policy->kobj, &stats_attr_group);
+ if (ret)
+ pr_warn("Cannot create stats attr group\n");
+
+ alloc_size = count * sizeof(u64) + count * sizeof(unsigned int);
+
stats->time_in_state = kzalloc(alloc_size, GFP_KERNEL);
- if (!stats->time_in_state)
- goto free_stat;
-
+ if (!stats->time_in_state) {
+ ret = -ENOMEM;
+ goto error_alloc;
+ }
stats->freq_table = (unsigned int *)(stats->time_in_state + count);
-#ifdef CONFIG_CPU_FREQ_STAT_DETAILS
- stats->trans_table = stats->freq_table + count;
-#endif
-
- stats->max_state = count;
-
- /* Find valid-unique entries */
+ i = 0;
cpufreq_for_each_valid_entry(pos, table)
if (freq_table_get_index(stats, pos->frequency) == -1)
stats->freq_table[i++] = pos->frequency;
- stats->state_num = i;
+ cpufreq_last_max_state = cpufreq_max_state;
+ stats->prev_states = cpufreq_last_max_state;
+ stats->max_state = count;
+ cpufreq_max_state += count;
+
+ spin_lock(&cpufreq_stats_lock);
stats->last_time = get_jiffies_64();
- stats->last_index = freq_table_get_index(stats, policy->cur);
-
+ atomic_set(&stats->curr_state,
+ freq_table_get_index(stats, policy->cur));
+ spin_unlock(&cpufreq_stats_lock);
policy->stats = stats;
- ret = sysfs_create_group(&policy->kobj, &stats_attr_group);
- if (!ret)
- return 0;
-
- /* We failed, release resources */
- policy->stats = NULL;
- kfree(stats->time_in_state);
-free_stat:
+ return 0;
+error_alloc:
+ sysfs_remove_group(&policy->kobj, &stats_attr_group);
kfree(stats);
-
+ policy->stats = NULL;
return ret;
}
-static void cpufreq_stats_create_table(unsigned int cpu)
+static void cpufreq_stats_create_table(struct cpufreq_policy *policy)
{
- struct cpufreq_policy *policy;
+ struct cpufreq_frequency_table *table, *pos;
+ int count = 0;
- /*
- * "likely(!policy)" because normally cpufreq_stats will be registered
- * before cpufreq driver
- */
- policy = cpufreq_cpu_get(cpu);
- if (likely(!policy))
- return;
+ table = cpufreq_frequency_get_table(policy->cpu);
+ if (likely(table)) {
+ cpufreq_for_each_valid_entry(pos, table)
+ count++;
- __cpufreq_stats_create_table(policy);
+ __cpufreq_stats_create_table(policy, table, count);
+ }
+}
- cpufreq_cpu_put(policy);
+void cpufreq_task_stats_remove_uids(uid_t uid_start, uid_t uid_end)
+{
+ struct uid_entry *uid_entry;
+ struct hlist_node *tmp;
+
+ rt_mutex_lock(&uid_lock);
+
+ for (; uid_start <= uid_end; uid_start++) {
+ hash_for_each_possible_safe(uid_hash_table, uid_entry, tmp,
+ hash, uid_start) {
+ if (uid_start == uid_entry->uid) {
+ hash_del(&uid_entry->hash);
+ kfree(uid_entry->dead_time_in_state);
+ kfree(uid_entry);
+ }
+ }
+ }
+
+ rt_mutex_unlock(&uid_lock);
}
static int cpufreq_stat_notifier_policy(struct notifier_block *nb,
- unsigned long val, void *data)
+ unsigned long val, void *data)
{
- int ret = 0;
+ int ret = 0, count = 0;
struct cpufreq_policy *policy = data;
+ struct cpufreq_frequency_table *table, *pos;
+
+ table = cpufreq_frequency_get_table(policy->cpu);
+ if (!table)
+ return 0;
+
+ cpufreq_for_each_valid_entry(pos, table)
+ count++;
if (val == CPUFREQ_CREATE_POLICY)
- ret = __cpufreq_stats_create_table(policy);
+ ret = __cpufreq_stats_create_table(policy, table, count);
else if (val == CPUFREQ_REMOVE_POLICY)
__cpufreq_stats_free_table(policy);
@@ -258,51 +577,95 @@
}
static int cpufreq_stat_notifier_trans(struct notifier_block *nb,
- unsigned long val, void *data)
+ unsigned long val, void *data)
{
struct cpufreq_freqs *freq = data;
- struct cpufreq_policy *policy = cpufreq_cpu_get(freq->cpu);
- struct cpufreq_stats *stats;
- int old_index, new_index;
+ struct cpufreq_stats *stat;
+ struct cpufreq_policy *policy;
- if (!policy) {
- pr_err("%s: No policy found\n", __func__);
+ if (val != CPUFREQ_POSTCHANGE)
+ return 0;
+
+ policy = cpufreq_cpu_get(freq->cpu);
+ if (!policy)
+ return 0;
+
+ stat = policy->stats;
+ if (!stat) {
+ cpufreq_cpu_put(policy);
return 0;
}
- if (val != CPUFREQ_POSTCHANGE)
- goto put_policy;
-
- if (!policy->stats) {
- pr_debug("%s: No stats found\n", __func__);
- goto put_policy;
- }
-
- stats = policy->stats;
-
- old_index = stats->last_index;
- new_index = freq_table_get_index(stats, freq->new);
-
- /* We can't do stats->time_in_state[-1]= .. */
- if (old_index == -1 || new_index == -1)
- goto put_policy;
-
- if (old_index == new_index)
- goto put_policy;
-
- cpufreq_stats_update(stats);
-
- stats->last_index = new_index;
-#ifdef CONFIG_CPU_FREQ_STAT_DETAILS
- stats->trans_table[old_index * stats->max_state + new_index]++;
-#endif
- stats->total_trans++;
-
-put_policy:
+ cpufreq_stats_update(policy->stats);
+ spin_lock(&cpufreq_stats_lock);
+ atomic_set(&stat->curr_state, freq_table_get_index(stat, freq->new));
+ stat->total_trans++;
+ spin_unlock(&cpufreq_stats_lock);
cpufreq_cpu_put(policy);
return 0;
}
+
+static int process_notifier(struct notifier_block *self,
+ unsigned long cmd, void *v)
+{
+ struct task_struct *task = v;
+ struct uid_entry *uid_entry;
+ unsigned long flags;
+ uid_t uid;
+ int i;
+
+ if (!task)
+ return NOTIFY_OK;
+
+ rt_mutex_lock(&uid_lock);
+
+ uid = from_kuid_munged(current_user_ns(), task_uid(task));
+ uid_entry = find_or_register_uid(uid);
+ if (!uid_entry) {
+ rt_mutex_unlock(&uid_lock);
+ pr_err("%s: failed to find uid %d\n", __func__, uid);
+ return NOTIFY_OK;
+ }
+
+ if (uid_entry->dead_max_state < task->max_state) {
+ uid_entry->dead_time_in_state = krealloc(
+ uid_entry->dead_time_in_state,
+ task->max_state *
+ sizeof(uid_entry->dead_time_in_state[0]),
+ GFP_ATOMIC);
+ memset(uid_entry->dead_time_in_state +
+ uid_entry->dead_max_state,
+ 0, (task->max_state - uid_entry->dead_max_state) *
+ sizeof(uid_entry->dead_time_in_state[0]));
+ uid_entry->dead_max_state = task->max_state;
+ }
+
+ spin_lock_irqsave(&task_time_in_state_lock, flags);
+ if (task->time_in_state) {
+ for (i = 0; i < task->max_state; ++i) {
+ uid_entry->dead_time_in_state[i] +=
+ atomic_read(&task->time_in_state[i]);
+ }
+ }
+ spin_unlock_irqrestore(&task_time_in_state_lock, flags);
+
+ rt_mutex_unlock(&uid_lock);
+ return NOTIFY_OK;
+}
+
+static int uid_time_in_state_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, uid_time_in_state_show, PDE_DATA(inode));
+}
+
+static const struct file_operations uid_time_in_state_fops = {
+ .open = uid_time_in_state_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
static struct notifier_block notifier_policy_block = {
.notifier_call = cpufreq_stat_notifier_policy
};
@@ -311,10 +674,16 @@
.notifier_call = cpufreq_stat_notifier_trans
};
+static struct notifier_block process_notifier_block = {
+ .notifier_call = process_notifier,
+};
+
static int __init cpufreq_stats_init(void)
{
int ret;
unsigned int cpu;
+ struct cpufreq_policy *policy;
+ struct cpufreq_policy *last_policy = NULL;
spin_lock_init(&cpufreq_stats_lock);
ret = cpufreq_register_notifier(¬ifier_policy_block,
@@ -323,10 +692,24 @@
return ret;
get_online_cpus();
- for_each_online_cpu(cpu)
- cpufreq_stats_create_table(cpu);
+ for_each_online_cpu(cpu) {
+ policy = cpufreq_cpu_get(cpu);
+ if (!policy)
+ continue;
+ if (policy != last_policy) {
+ cpufreq_stats_create_table(policy);
+ last_policy = policy;
+ }
+ cpufreq_cpu_put(policy);
+ }
put_online_cpus();
+ /* XXX TODO task support for time_in_state doesn't update freq
+ * info for tasks already initialized, so tasks initialized early
+ * (before cpufreq_stat_init is done) do not get time_in_state data
+ * and CPUFREQ_TRANSITION_NOTIFIER does not update freq info for
+ * tasks already created
+ */
ret = cpufreq_register_notifier(¬ifier_trans_block,
CPUFREQ_TRANSITION_NOTIFIER);
if (ret) {
@@ -338,7 +721,21 @@
put_online_cpus();
return ret;
}
+ ret = sysfs_create_file(cpufreq_global_kobject,
+ &_attr_all_time_in_state.attr);
+ if (ret)
+ pr_warn("Cannot create sysfs file for cpufreq stats\n");
+ proc_create_data("uid_time_in_state", 0444, NULL,
+ &uid_time_in_state_fops, NULL);
+
+ profile_event_register(PROFILE_TASK_EXIT, &process_notifier_block);
+
+ ret = cpufreq_stats_create_all_table();
+ if (ret)
+ pr_warn("Cannot create cpufreq all freqs table\n");
+
+ cpufreq_stats_initialized = true;
return 0;
}
static void __exit cpufreq_stats_exit(void)
diff --git a/drivers/cpufreq/s3c2416-cpufreq.c b/drivers/cpufreq/s3c2416-cpufreq.c
index d6d4257..5b2db3c 100644
--- a/drivers/cpufreq/s3c2416-cpufreq.c
+++ b/drivers/cpufreq/s3c2416-cpufreq.c
@@ -400,7 +400,6 @@
rate = clk_get_rate(s3c_freq->hclk);
if (rate < 133 * 1000 * 1000) {
pr_err("cpufreq: HCLK not at 133MHz\n");
- clk_put(s3c_freq->hclk);
ret = -EINVAL;
goto err_armclk;
}
diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c
index 0dadb63..7abe908 100644
--- a/drivers/crypto/atmel-sha.c
+++ b/drivers/crypto/atmel-sha.c
@@ -963,7 +963,9 @@
ctx->flags |= SHA_FLAGS_FINUP;
err1 = atmel_sha_update(req);
- if (err1 == -EINPROGRESS || err1 == -EBUSY)
+ if (err1 == -EINPROGRESS ||
+ (err1 == -EBUSY && (ahash_request_flags(req) &
+ CRYPTO_TFM_REQ_MAY_BACKLOG)))
return err1;
/*
diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c
index 99d5e11..e06cc5d 100644
--- a/drivers/crypto/caam/caamhash.c
+++ b/drivers/crypto/caam/caamhash.c
@@ -498,7 +498,7 @@
ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
if (!ret) {
/* in progress */
- wait_for_completion_interruptible(&result.completion);
+ wait_for_completion(&result.completion);
ret = result.err;
#ifdef DEBUG
print_hex_dump(KERN_ERR,
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index 69d4a13..53e6145 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -278,7 +278,8 @@
/* Try to run it through DECO0 */
ret = run_descriptor_deco0(ctrldev, desc, &status);
- if (ret || status) {
+ if (ret ||
+ (status && status != JRSTA_SSRC_JUMP_HALT_CC)) {
dev_err(ctrldev,
"Failed to deinstantiate RNG4 SH%d\n",
sh_idx);
diff --git a/drivers/crypto/caam/key_gen.c b/drivers/crypto/caam/key_gen.c
index e1eaf4f..3ce1d5c 100644
--- a/drivers/crypto/caam/key_gen.c
+++ b/drivers/crypto/caam/key_gen.c
@@ -103,7 +103,7 @@
ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result);
if (!ret) {
/* in progress */
- wait_for_completion_interruptible(&result.completion);
+ wait_for_completion(&result.completion);
ret = result.err;
#ifdef DEBUG
print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ",
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index 9a8a18a..6a60936 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -804,7 +804,7 @@
* crypto alg
*/
#define TALITOS_CRA_PRIORITY 3000
-#define TALITOS_MAX_KEY_SIZE 96
+#define TALITOS_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + SHA512_BLOCK_SIZE)
#define TALITOS_MAX_IV_LENGTH 16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
struct talitos_ctx {
@@ -1388,6 +1388,11 @@
{
struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
+ if (keylen > TALITOS_MAX_KEY_SIZE) {
+ crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+ }
+
memcpy(&ctx->key, key, keylen);
ctx->keylen = keylen;
diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c
index 57ff462..c97336a 100644
--- a/drivers/dma/ep93xx_dma.c
+++ b/drivers/dma/ep93xx_dma.c
@@ -325,6 +325,8 @@
| M2P_CONTROL_ENABLE;
m2p_set_control(edmac, control);
+ edmac->buffer = 0;
+
return 0;
}
diff --git a/drivers/dma/ioat/hw.h b/drivers/dma/ioat/hw.h
index 690e3b4..b36da3c 100644
--- a/drivers/dma/ioat/hw.h
+++ b/drivers/dma/ioat/hw.h
@@ -64,6 +64,8 @@
#define PCI_DEVICE_ID_INTEL_IOAT_BDX8 0x6f2e
#define PCI_DEVICE_ID_INTEL_IOAT_BDX9 0x6f2f
+#define PCI_DEVICE_ID_INTEL_IOAT_SKX 0x2021
+
#define IOAT_VER_1_2 0x12 /* Version 1.2 */
#define IOAT_VER_2_0 0x20 /* Version 2.0 */
#define IOAT_VER_3_0 0x30 /* Version 3.0 */
diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c
index 4ef0c5e..abb75eb 100644
--- a/drivers/dma/ioat/init.c
+++ b/drivers/dma/ioat/init.c
@@ -105,6 +105,8 @@
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX8) },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX9) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SKX) },
+
/* I/OAT v3.3 platforms */
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD0) },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD1) },
@@ -250,10 +252,15 @@
}
}
+static inline bool is_skx_ioat(struct pci_dev *pdev)
+{
+ return (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_SKX) ? true : false;
+}
+
static bool is_xeon_cb32(struct pci_dev *pdev)
{
return is_jf_ioat(pdev) || is_snb_ioat(pdev) || is_ivb_ioat(pdev) ||
- is_hsw_ioat(pdev) || is_bdx_ioat(pdev);
+ is_hsw_ioat(pdev) || is_bdx_ioat(pdev) || is_skx_ioat(pdev);
}
bool is_bwd_ioat(struct pci_dev *pdev)
@@ -1350,6 +1357,8 @@
device->version = readb(device->reg_base + IOAT_VER_OFFSET);
if (device->version >= IOAT_VER_3_0) {
+ if (is_skx_ioat(pdev))
+ device->version = IOAT_VER_3_2;
err = ioat3_dma_probe(device, ioat_dca_enabled);
if (device->version >= IOAT_VER_3_3)
diff --git a/drivers/dma/sh/usb-dmac.c b/drivers/dma/sh/usb-dmac.c
index b1bc945..56410ea 100644
--- a/drivers/dma/sh/usb-dmac.c
+++ b/drivers/dma/sh/usb-dmac.c
@@ -117,7 +117,7 @@
#define USB_DMASWR 0x0008
#define USB_DMASWR_SWR (1 << 0)
#define USB_DMAOR 0x0060
-#define USB_DMAOR_AE (1 << 2)
+#define USB_DMAOR_AE (1 << 1)
#define USB_DMAOR_DME (1 << 0)
#define USB_DMASAR 0x0000
diff --git a/drivers/dma/ti-dma-crossbar.c b/drivers/dma/ti-dma-crossbar.c
index a415edb..149ec2b 100644
--- a/drivers/dma/ti-dma-crossbar.c
+++ b/drivers/dma/ti-dma-crossbar.c
@@ -146,6 +146,7 @@
match = of_match_node(ti_am335x_master_match, dma_node);
if (!match) {
dev_err(&pdev->dev, "DMA master is not supported\n");
+ of_node_put(dma_node);
return -EINVAL;
}
@@ -310,6 +311,7 @@
match = of_match_node(ti_dra7_master_match, dma_node);
if (!match) {
dev_err(&pdev->dev, "DMA master is not supported\n");
+ of_node_put(dma_node);
return -EINVAL;
}
diff --git a/drivers/gpio/gpio-msm-smp2p-test.c b/drivers/gpio/gpio-msm-smp2p-test.c
index 5907513..b841953 100644
--- a/drivers/gpio/gpio-msm-smp2p-test.c
+++ b/drivers/gpio/gpio-msm-smp2p-test.c
@@ -206,7 +206,7 @@
strlcpy(mock->remote_item.entries[0].name, "smp2p",
SMP2P_MAX_ENTRY_NAME);
SMP2P_SET_ENT_VALID(
- mock->remote_item.header.valid_total_ent, 1);
+ &mock->remote_item.header, valid_total_ent, 1);
msm_smp2p_set_remote_mock_exists(true);
mock->tx_interrupt();
@@ -306,7 +306,7 @@
strlcpy(mock->remote_item.entries[0].name, "smp2p",
SMP2P_MAX_ENTRY_NAME);
SMP2P_SET_ENT_VALID(
- mock->remote_item.header.valid_total_ent, 1);
+ &mock->remote_item.header, valid_total_ent, 1);
msm_smp2p_set_remote_mock_exists(true);
mock->tx_interrupt();
@@ -474,7 +474,7 @@
strlcpy(mock->remote_item.entries[0].name, "smp2p",
SMP2P_MAX_ENTRY_NAME);
SMP2P_SET_ENT_VALID(
- mock->remote_item.header.valid_total_ent, 1);
+ &mock->remote_item.header, valid_total_ent, 1);
/* register for interrupts */
smp2p_gpio_open_test_entry("smp2p",
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
index 51a9942..f4cae53 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c
@@ -681,6 +681,10 @@
DRM_INFO("Changing default dispclk from %dMhz to 600Mhz\n",
adev->clock.default_dispclk / 100);
adev->clock.default_dispclk = 60000;
+ } else if (adev->clock.default_dispclk <= 60000) {
+ DRM_INFO("Changing default dispclk from %dMhz to 625Mhz\n",
+ adev->clock.default_dispclk / 100);
+ adev->clock.default_dispclk = 62500;
}
adev->clock.dp_extclk =
le16_to_cpu(firmware_info->info_21.usUniphyDPModeExtClkFreq);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 25a3e248..2bc17a9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -124,6 +124,13 @@
}
break;
}
+
+ if (!(*out_ring && (*out_ring)->adev)) {
+ DRM_ERROR("Ring %d is not initialized on IP %d\n",
+ ring, ip_type);
+ return -EINVAL;
+ }
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
index 7c42ff6..a092433 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c
@@ -25,6 +25,7 @@
* Alex Deucher
* Jerome Glisse
*/
+#include <linux/irq.h>
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include <drm/amdgpu_drm.h>
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 475c38f..e40a6d8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -1126,6 +1126,9 @@
if (size & 0x3 || *pos & 0x3)
return -EINVAL;
+ if (*pos >= adev->mc.mc_vram_size)
+ return -ENXIO;
+
while (size) {
unsigned long flags;
uint32_t value;
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c b/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c
index 49aa350..247b088 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_crtc.c
@@ -164,7 +164,7 @@
struct drm_device *dev = crtc->dev;
struct amdgpu_device *adev = dev->dev_private;
int index = GetIndexIntoMasterTable(COMMAND, EnableDispPowerGating);
- ENABLE_DISP_POWER_GATING_PARAMETERS_V2_1 args;
+ ENABLE_DISP_POWER_GATING_PS_ALLOCATION args;
memset(&args, 0, sizeof(args));
@@ -177,7 +177,7 @@
void amdgpu_atombios_crtc_powergate_init(struct amdgpu_device *adev)
{
int index = GetIndexIntoMasterTable(COMMAND, EnableDispPowerGating);
- ENABLE_DISP_POWER_GATING_PARAMETERS_V2_1 args;
+ ENABLE_DISP_POWER_GATING_PS_ALLOCATION args;
memset(&args, 0, sizeof(args));
diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
index 57a2e34..0f0094b5 100644
--- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
@@ -893,6 +893,12 @@
u32 vblank_time = amdgpu_dpm_get_vblank_time(adev);
u32 switch_limit = adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5 ? 450 : 300;
+ /* disable mclk switching if the refresh is >120Hz, even if the
+ * blanking period would allow it
+ */
+ if (amdgpu_dpm_get_vrefresh(adev) > 120)
+ return true;
+
if (vblank_time < switch_limit)
return true;
else
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index 5b261ad..3a25da4 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -1126,23 +1126,10 @@
a.full = dfixed_const(available_bandwidth);
b.full = dfixed_const(wm->num_heads);
a.full = dfixed_div(a, b);
+ tmp = div_u64((u64) dmif_size * (u64) wm->disp_clk, mc_latency + 512);
+ tmp = min(dfixed_trunc(a), tmp);
- b.full = dfixed_const(mc_latency + 512);
- c.full = dfixed_const(wm->disp_clk);
- b.full = dfixed_div(b, c);
-
- c.full = dfixed_const(dmif_size);
- b.full = dfixed_div(c, b);
-
- tmp = min(dfixed_trunc(a), dfixed_trunc(b));
-
- b.full = dfixed_const(1000);
- c.full = dfixed_const(wm->disp_clk);
- b.full = dfixed_div(c, b);
- c.full = dfixed_const(wm->bytes_per_pixel);
- b.full = dfixed_mul(b, c);
-
- lb_fill_bw = min(tmp, dfixed_trunc(b));
+ lb_fill_bw = min(tmp, wm->disp_clk * wm->bytes_per_pixel / 1000);
a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
b.full = dfixed_const(1000);
@@ -1250,14 +1237,14 @@
{
struct drm_display_mode *mode = &amdgpu_crtc->base.mode;
struct dce10_wm_params wm_low, wm_high;
- u32 pixel_period;
+ u32 active_time;
u32 line_time = 0;
u32 latency_watermark_a = 0, latency_watermark_b = 0;
u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
if (amdgpu_crtc->base.enabled && num_heads && mode) {
- pixel_period = 1000000 / (u32)mode->clock;
- line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535);
+ active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock;
+ line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535);
/* watermark for high clocks */
if (adev->pm.dpm_enabled) {
@@ -1272,7 +1259,7 @@
wm_high.disp_clk = mode->clock;
wm_high.src_width = mode->crtc_hdisplay;
- wm_high.active_time = mode->crtc_hdisplay * pixel_period;
+ wm_high.active_time = active_time;
wm_high.blank_time = line_time - wm_high.active_time;
wm_high.interlaced = false;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
@@ -1311,7 +1298,7 @@
wm_low.disp_clk = mode->clock;
wm_low.src_width = mode->crtc_hdisplay;
- wm_low.active_time = mode->crtc_hdisplay * pixel_period;
+ wm_low.active_time = active_time;
wm_low.blank_time = line_time - wm_low.active_time;
wm_low.interlaced = false;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index 267749a..d6d3cda 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -1114,23 +1114,10 @@
a.full = dfixed_const(available_bandwidth);
b.full = dfixed_const(wm->num_heads);
a.full = dfixed_div(a, b);
+ tmp = div_u64((u64) dmif_size * (u64) wm->disp_clk, mc_latency + 512);
+ tmp = min(dfixed_trunc(a), tmp);
- b.full = dfixed_const(mc_latency + 512);
- c.full = dfixed_const(wm->disp_clk);
- b.full = dfixed_div(b, c);
-
- c.full = dfixed_const(dmif_size);
- b.full = dfixed_div(c, b);
-
- tmp = min(dfixed_trunc(a), dfixed_trunc(b));
-
- b.full = dfixed_const(1000);
- c.full = dfixed_const(wm->disp_clk);
- b.full = dfixed_div(c, b);
- c.full = dfixed_const(wm->bytes_per_pixel);
- b.full = dfixed_mul(b, c);
-
- lb_fill_bw = min(tmp, dfixed_trunc(b));
+ lb_fill_bw = min(tmp, wm->disp_clk * wm->bytes_per_pixel / 1000);
a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
b.full = dfixed_const(1000);
@@ -1238,14 +1225,14 @@
{
struct drm_display_mode *mode = &amdgpu_crtc->base.mode;
struct dce10_wm_params wm_low, wm_high;
- u32 pixel_period;
+ u32 active_time;
u32 line_time = 0;
u32 latency_watermark_a = 0, latency_watermark_b = 0;
u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
if (amdgpu_crtc->base.enabled && num_heads && mode) {
- pixel_period = 1000000 / (u32)mode->clock;
- line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535);
+ active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock;
+ line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535);
/* watermark for high clocks */
if (adev->pm.dpm_enabled) {
@@ -1260,7 +1247,7 @@
wm_high.disp_clk = mode->clock;
wm_high.src_width = mode->crtc_hdisplay;
- wm_high.active_time = mode->crtc_hdisplay * pixel_period;
+ wm_high.active_time = active_time;
wm_high.blank_time = line_time - wm_high.active_time;
wm_high.interlaced = false;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
@@ -1299,7 +1286,7 @@
wm_low.disp_clk = mode->clock;
wm_low.src_width = mode->crtc_hdisplay;
- wm_low.active_time = mode->crtc_hdisplay * pixel_period;
+ wm_low.active_time = active_time;
wm_low.blank_time = line_time - wm_low.active_time;
wm_low.interlaced = false;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index 9b4dcf7..d6e51d4 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -1096,23 +1096,10 @@
a.full = dfixed_const(available_bandwidth);
b.full = dfixed_const(wm->num_heads);
a.full = dfixed_div(a, b);
+ tmp = div_u64((u64) dmif_size * (u64) wm->disp_clk, mc_latency + 512);
+ tmp = min(dfixed_trunc(a), tmp);
- b.full = dfixed_const(mc_latency + 512);
- c.full = dfixed_const(wm->disp_clk);
- b.full = dfixed_div(b, c);
-
- c.full = dfixed_const(dmif_size);
- b.full = dfixed_div(c, b);
-
- tmp = min(dfixed_trunc(a), dfixed_trunc(b));
-
- b.full = dfixed_const(1000);
- c.full = dfixed_const(wm->disp_clk);
- b.full = dfixed_div(c, b);
- c.full = dfixed_const(wm->bytes_per_pixel);
- b.full = dfixed_mul(b, c);
-
- lb_fill_bw = min(tmp, dfixed_trunc(b));
+ lb_fill_bw = min(tmp, wm->disp_clk * wm->bytes_per_pixel / 1000);
a.full = dfixed_const(max_src_lines_per_dst_line * wm->src_width * wm->bytes_per_pixel);
b.full = dfixed_const(1000);
@@ -1220,14 +1207,14 @@
{
struct drm_display_mode *mode = &amdgpu_crtc->base.mode;
struct dce8_wm_params wm_low, wm_high;
- u32 pixel_period;
+ u32 active_time;
u32 line_time = 0;
u32 latency_watermark_a = 0, latency_watermark_b = 0;
u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
if (amdgpu_crtc->base.enabled && num_heads && mode) {
- pixel_period = 1000000 / (u32)mode->clock;
- line_time = min((u32)mode->crtc_htotal * pixel_period, (u32)65535);
+ active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock;
+ line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535);
/* watermark for high clocks */
if (adev->pm.dpm_enabled) {
@@ -1242,7 +1229,7 @@
wm_high.disp_clk = mode->clock;
wm_high.src_width = mode->crtc_hdisplay;
- wm_high.active_time = mode->crtc_hdisplay * pixel_period;
+ wm_high.active_time = active_time;
wm_high.blank_time = line_time - wm_high.active_time;
wm_high.interlaced = false;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
@@ -1281,7 +1268,7 @@
wm_low.disp_clk = mode->clock;
wm_low.src_width = mode->crtc_hdisplay;
- wm_low.active_time = mode->crtc_hdisplay * pixel_period;
+ wm_low.active_time = active_time;
wm_low.blank_time = line_time - wm_low.active_time;
wm_low.interlaced = false;
if (mode->flags & DRM_MODE_FLAG_INTERLACE)
diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
index 7e9154c..d1c9525 100644
--- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
@@ -2258,7 +2258,7 @@
if (pi->caps_stable_p_state) {
stable_p_state_sclk = (max_limits->sclk * 75) / 100;
- for (i = table->count - 1; i >= 0; i++) {
+ for (i = table->count - 1; i >= 0; i--) {
if (stable_p_state_sclk >= table->entries[i].clk) {
stable_p_state_sclk = table->entries[i].clk;
break;
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index 05f6522c..b5c64ed 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -113,6 +113,11 @@
struct ttm_bo_kmap_obj cache_kmap;
int next_cursor;
bool support_wide_screen;
+ enum {
+ ast_use_p2a,
+ ast_use_dt,
+ ast_use_defaults
+ } config_mode;
enum ast_tx_chip tx_chip_type;
u8 dp501_maxclk;
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index 9b8f0b9..498a940 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -62,13 +62,84 @@
return ret;
}
+static void ast_detect_config_mode(struct drm_device *dev, u32 *scu_rev)
+{
+ struct device_node *np = dev->pdev->dev.of_node;
+ struct ast_private *ast = dev->dev_private;
+ uint32_t data, jregd0, jregd1;
+
+ /* Defaults */
+ ast->config_mode = ast_use_defaults;
+ *scu_rev = 0xffffffff;
+
+ /* Check if we have device-tree properties */
+ if (np && !of_property_read_u32(np, "aspeed,scu-revision-id",
+ scu_rev)) {
+ /* We do, disable P2A access */
+ ast->config_mode = ast_use_dt;
+ DRM_INFO("Using device-tree for configuration\n");
+ return;
+ }
+
+ /* Not all families have a P2A bridge */
+ if (dev->pdev->device != PCI_CHIP_AST2000)
+ return;
+
+ /*
+ * The BMC will set SCU 0x40 D[12] to 1 if the P2 bridge
+ * is disabled. We force using P2A if VGA only mode bit
+ * is set D[7]
+ */
+ jregd0 = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd0, 0xff);
+ jregd1 = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd1, 0xff);
+ if (!(jregd0 & 0x80) || !(jregd1 & 0x10)) {
+ /* Double check it's actually working */
+ data = ast_read32(ast, 0xf004);
+ if (data != 0xFFFFFFFF) {
+ /* P2A works, grab silicon revision */
+ ast->config_mode = ast_use_p2a;
+
+ DRM_INFO("Using P2A bridge for configuration\n");
+
+ /* Read SCU7c (silicon revision register) */
+ ast_write32(ast, 0xf004, 0x1e6e0000);
+ ast_write32(ast, 0xf000, 0x1);
+ *scu_rev = ast_read32(ast, 0x1207c);
+ return;
+ }
+ }
+
+ /* We have a P2A bridge but it's disabled */
+ DRM_INFO("P2A bridge disabled, using default configuration\n");
+}
static int ast_detect_chip(struct drm_device *dev, bool *need_post)
{
struct ast_private *ast = dev->dev_private;
- uint32_t data, jreg;
+ uint32_t jreg, scu_rev;
+
+ /*
+ * If VGA isn't enabled, we need to enable now or subsequent
+ * access to the scratch registers will fail. We also inform
+ * our caller that it needs to POST the chip
+ * (Assumption: VGA not enabled -> need to POST)
+ */
+ if (!ast_is_vga_enabled(dev)) {
+ ast_enable_vga(dev);
+ DRM_INFO("VGA not enabled on entry, requesting chip POST\n");
+ *need_post = true;
+ } else
+ *need_post = false;
+
+
+ /* Enable extended register access */
+ ast_enable_mmio(dev);
ast_open_key(ast);
+ /* Find out whether P2A works or whether to use device-tree */
+ ast_detect_config_mode(dev, &scu_rev);
+
+ /* Identify chipset */
if (dev->pdev->device == PCI_CHIP_AST1180) {
ast->chip = AST1100;
DRM_INFO("AST 1180 detected\n");
@@ -80,12 +151,7 @@
ast->chip = AST2300;
DRM_INFO("AST 2300 detected\n");
} else if (dev->pdev->revision >= 0x10) {
- uint32_t data;
- ast_write32(ast, 0xf004, 0x1e6e0000);
- ast_write32(ast, 0xf000, 0x1);
-
- data = ast_read32(ast, 0x1207c);
- switch (data & 0x0300) {
+ switch (scu_rev & 0x0300) {
case 0x0200:
ast->chip = AST1100;
DRM_INFO("AST 1100 detected\n");
@@ -110,20 +176,6 @@
}
}
- /*
- * If VGA isn't enabled, we need to enable now or subsequent
- * access to the scratch registers will fail. We also inform
- * our caller that it needs to POST the chip
- * (Assumption: VGA not enabled -> need to POST)
- */
- if (!ast_is_vga_enabled(dev)) {
- ast_enable_vga(dev);
- ast_enable_mmio(dev);
- DRM_INFO("VGA not enabled on entry, requesting chip POST\n");
- *need_post = true;
- } else
- *need_post = false;
-
/* Check if we support wide screen */
switch (ast->chip) {
case AST1180:
@@ -140,14 +192,11 @@
ast->support_wide_screen = true;
else {
ast->support_wide_screen = false;
- /* Read SCU7c (silicon revision register) */
- ast_write32(ast, 0xf004, 0x1e6e0000);
- ast_write32(ast, 0xf000, 0x1);
- data = ast_read32(ast, 0x1207c);
- data &= 0x300;
- if (ast->chip == AST2300 && data == 0x0) /* ast1300 */
+ if (ast->chip == AST2300 &&
+ (scu_rev & 0x300) == 0x0) /* ast1300 */
ast->support_wide_screen = true;
- if (ast->chip == AST2400 && data == 0x100) /* ast1400 */
+ if (ast->chip == AST2400 &&
+ (scu_rev & 0x300) == 0x100) /* ast1400 */
ast->support_wide_screen = true;
}
break;
@@ -212,29 +261,49 @@
static int ast_get_dram_info(struct drm_device *dev)
{
+ struct device_node *np = dev->pdev->dev.of_node;
struct ast_private *ast = dev->dev_private;
- uint32_t data, data2;
- uint32_t denum, num, div, ref_pll;
+ uint32_t mcr_cfg, mcr_scu_mpll, mcr_scu_strap;
+ uint32_t denum, num, div, ref_pll, dsel;
- ast_write32(ast, 0xf004, 0x1e6e0000);
- ast_write32(ast, 0xf000, 0x1);
+ switch (ast->config_mode) {
+ case ast_use_dt:
+ /*
+ * If some properties are missing, use reasonable
+ * defaults for AST2400
+ */
+ if (of_property_read_u32(np, "aspeed,mcr-configuration",
+ &mcr_cfg))
+ mcr_cfg = 0x00000577;
+ if (of_property_read_u32(np, "aspeed,mcr-scu-mpll",
+ &mcr_scu_mpll))
+ mcr_scu_mpll = 0x000050C0;
+ if (of_property_read_u32(np, "aspeed,mcr-scu-strap",
+ &mcr_scu_strap))
+ mcr_scu_strap = 0;
+ break;
+ case ast_use_p2a:
+ ast_write32(ast, 0xf004, 0x1e6e0000);
+ ast_write32(ast, 0xf000, 0x1);
+ mcr_cfg = ast_read32(ast, 0x10004);
+ mcr_scu_mpll = ast_read32(ast, 0x10120);
+ mcr_scu_strap = ast_read32(ast, 0x10170);
+ break;
+ case ast_use_defaults:
+ default:
+ ast->dram_bus_width = 16;
+ ast->dram_type = AST_DRAM_1Gx16;
+ ast->mclk = 396;
+ return 0;
+ }
-
- ast_write32(ast, 0x10000, 0xfc600309);
-
- do {
- if (pci_channel_offline(dev->pdev))
- return -EIO;
- } while (ast_read32(ast, 0x10000) != 0x01);
- data = ast_read32(ast, 0x10004);
-
- if (data & 0x40)
+ if (mcr_cfg & 0x40)
ast->dram_bus_width = 16;
else
ast->dram_bus_width = 32;
if (ast->chip == AST2300 || ast->chip == AST2400) {
- switch (data & 0x03) {
+ switch (mcr_cfg & 0x03) {
case 0:
ast->dram_type = AST_DRAM_512Mx16;
break;
@@ -250,13 +319,13 @@
break;
}
} else {
- switch (data & 0x0c) {
+ switch (mcr_cfg & 0x0c) {
case 0:
case 4:
ast->dram_type = AST_DRAM_512Mx16;
break;
case 8:
- if (data & 0x40)
+ if (mcr_cfg & 0x40)
ast->dram_type = AST_DRAM_1Gx16;
else
ast->dram_type = AST_DRAM_512Mx32;
@@ -267,17 +336,15 @@
}
}
- data = ast_read32(ast, 0x10120);
- data2 = ast_read32(ast, 0x10170);
- if (data2 & 0x2000)
+ if (mcr_scu_strap & 0x2000)
ref_pll = 14318;
else
ref_pll = 12000;
- denum = data & 0x1f;
- num = (data & 0x3fe0) >> 5;
- data = (data & 0xc000) >> 14;
- switch (data) {
+ denum = mcr_scu_mpll & 0x1f;
+ num = (mcr_scu_mpll & 0x3fe0) >> 5;
+ dsel = (mcr_scu_mpll & 0xc000) >> 14;
+ switch (dsel) {
case 3:
div = 0x4;
break;
diff --git a/drivers/gpu/drm/ast/ast_post.c b/drivers/gpu/drm/ast/ast_post.c
index 30672a3..c7c58be 100644
--- a/drivers/gpu/drm/ast/ast_post.c
+++ b/drivers/gpu/drm/ast/ast_post.c
@@ -375,12 +375,17 @@
ast_enable_mmio(dev);
ast_set_def_ext_reg(dev);
- if (ast->chip == AST2300 || ast->chip == AST2400)
- ast_init_dram_2300(dev);
- else
- ast_init_dram_reg(dev);
+ if (ast->config_mode == ast_use_p2a) {
+ if (ast->chip == AST2300 || ast->chip == AST2400)
+ ast_init_dram_2300(dev);
+ else
+ ast_init_dram_reg(dev);
- ast_init_3rdtx(dev);
+ ast_init_3rdtx(dev);
+ } else {
+ if (ast->tx_chip_type != AST_TX_NONE)
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa3, 0xcf, 0x80); /* Enable DVO */
+ }
}
/* AST 2300 DRAM settings */
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index 6253775..50d74e5 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -1247,6 +1247,9 @@
if (config->funcs->atomic_check)
ret = config->funcs->atomic_check(state->dev, state);
+ if (ret)
+ return ret;
+
if (!state->allow_modeset) {
for_each_crtc_in_state(state, crtc, crtc_state, i) {
if (drm_atomic_crtc_needs_modeset(crtc_state)) {
@@ -1257,7 +1260,7 @@
}
}
- return ret;
+ return 0;
}
EXPORT_SYMBOL(drm_atomic_check_only);
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index a3b96d6..58bf94b 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -330,6 +330,13 @@
return false;
}
+ /*
+ * ignore out-of-order messages or messages that are part of a
+ * failed transaction
+ */
+ if (!recv_hdr.somt && !msg->have_somt)
+ return false;
+
/* get length contained in this portion */
msg->curchunk_len = recv_hdr.msg_len;
msg->curchunk_hdrlen = hdrlen;
@@ -2163,7 +2170,7 @@
}
EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume);
-static void drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up)
+static bool drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up)
{
int len;
u8 replyblock[32];
@@ -2178,12 +2185,12 @@
replyblock, len);
if (ret != len) {
DRM_DEBUG_KMS("failed to read DPCD down rep %d %d\n", len, ret);
- return;
+ return false;
}
ret = drm_dp_sideband_msg_build(msg, replyblock, len, true);
if (!ret) {
DRM_DEBUG_KMS("sideband msg build failed %d\n", replyblock[0]);
- return;
+ return false;
}
replylen = msg->curchunk_len + msg->curchunk_hdrlen;
@@ -2195,21 +2202,32 @@
ret = drm_dp_dpcd_read(mgr->aux, basereg + curreply,
replyblock, len);
if (ret != len) {
- DRM_DEBUG_KMS("failed to read a chunk\n");
+ DRM_DEBUG_KMS("failed to read a chunk (len %d, ret %d)\n",
+ len, ret);
+ return false;
}
+
ret = drm_dp_sideband_msg_build(msg, replyblock, len, false);
- if (ret == false)
+ if (!ret) {
DRM_DEBUG_KMS("failed to build sideband msg\n");
+ return false;
+ }
+
curreply += len;
replylen -= len;
}
+ return true;
}
static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
{
int ret = 0;
- drm_dp_get_one_sb_msg(mgr, false);
+ if (!drm_dp_get_one_sb_msg(mgr, false)) {
+ memset(&mgr->down_rep_recv, 0,
+ sizeof(struct drm_dp_sideband_msg_rx));
+ return 0;
+ }
if (mgr->down_rep_recv.have_eomt) {
struct drm_dp_sideband_msg_tx *txmsg;
@@ -2265,7 +2283,12 @@
static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
{
int ret = 0;
- drm_dp_get_one_sb_msg(mgr, true);
+
+ if (!drm_dp_get_one_sb_msg(mgr, true)) {
+ memset(&mgr->up_req_recv, 0,
+ sizeof(struct drm_dp_sideband_msg_rx));
+ return 0;
+ }
if (mgr->up_req_recv.have_eomt) {
struct drm_dp_sideband_msg_req_body msg;
@@ -2317,7 +2340,9 @@
DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n", msg.u.resource_stat.port_number, msg.u.resource_stat.available_pbn);
}
- drm_dp_put_mst_branch_device(mstb);
+ if (mstb)
+ drm_dp_put_mst_branch_device(mstb);
+
memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
}
return ret;
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 4679c7d..2cc1c37d 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -75,6 +75,8 @@
#define EDID_QUIRK_FORCE_12BPC (1 << 9)
/* Force 6bpc */
#define EDID_QUIRK_FORCE_6BPC (1 << 10)
+/* Force 10bpc */
+#define EDID_QUIRK_FORCE_10BPC (1 << 11)
struct detailed_mode_closure {
struct drm_connector *connector;
@@ -117,6 +119,9 @@
{ "FCM", 13600, EDID_QUIRK_PREFER_LARGE_75 |
EDID_QUIRK_DETAILED_IN_CM },
+ /* LGD panel of HP zBook 17 G2, eDP 10 bpc, but reports unknown bpc */
+ { "LGD", 764, EDID_QUIRK_FORCE_10BPC },
+
/* LG Philips LCD LP154W01-A5 */
{ "LPL", 0, EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE },
{ "LPL", 0x2a00, EDID_QUIRK_DETAILED_USE_MAXIMUM_SIZE },
@@ -3858,6 +3863,9 @@
if (quirks & EDID_QUIRK_FORCE_8BPC)
connector->display_info.bpc = 8;
+ if (quirks & EDID_QUIRK_FORCE_10BPC)
+ connector->display_info.bpc = 10;
+
if (quirks & EDID_QUIRK_FORCE_12BPC)
connector->display_info.bpc = 12;
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index b205224..9147113 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -715,13 +715,13 @@
struct drm_gem_object *obj = ptr;
struct drm_device *dev = obj->dev;
+ if (dev->driver->gem_close_object)
+ dev->driver->gem_close_object(obj, file_priv);
+
if (drm_core_check_feature(dev, DRIVER_PRIME))
drm_gem_remove_prime_handles(obj, file_priv);
drm_vma_node_revoke(&obj->vma_node, file_priv->filp);
- if (dev->driver->gem_close_object)
- dev->driver->gem_close_object(obj, file_priv);
-
drm_gem_object_handle_unreference_unlocked(obj);
return 0;
diff --git a/drivers/gpu/drm/gma500/psb_intel_lvds.c b/drivers/gpu/drm/gma500/psb_intel_lvds.c
index ce0645d..61e3a09 100644
--- a/drivers/gpu/drm/gma500/psb_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/psb_intel_lvds.c
@@ -783,20 +783,23 @@
if (scan->type & DRM_MODE_TYPE_PREFERRED) {
mode_dev->panel_fixed_mode =
drm_mode_duplicate(dev, scan);
+ DRM_DEBUG_KMS("Using mode from DDC\n");
goto out; /* FIXME: check for quirks */
}
}
/* Failed to get EDID, what about VBT? do we need this? */
- if (mode_dev->vbt_mode)
+ if (dev_priv->lfp_lvds_vbt_mode) {
mode_dev->panel_fixed_mode =
- drm_mode_duplicate(dev, mode_dev->vbt_mode);
+ drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
- if (!mode_dev->panel_fixed_mode)
- if (dev_priv->lfp_lvds_vbt_mode)
- mode_dev->panel_fixed_mode =
- drm_mode_duplicate(dev,
- dev_priv->lfp_lvds_vbt_mode);
+ if (mode_dev->panel_fixed_mode) {
+ mode_dev->panel_fixed_mode->type |=
+ DRM_MODE_TYPE_PREFERRED;
+ DRM_DEBUG_KMS("Using mode from VBT\n");
+ goto out;
+ }
+ }
/*
* If we didn't get EDID, try checking if the panel is already turned
@@ -813,6 +816,7 @@
if (mode_dev->panel_fixed_mode) {
mode_dev->panel_fixed_mode->type |=
DRM_MODE_TYPE_PREFERRED;
+ DRM_DEBUG_KMS("Using pre-programmed mode\n");
goto out; /* FIXME: check for quirks */
}
}
diff --git a/drivers/gpu/drm/i2c/adv7511.c b/drivers/gpu/drm/i2c/adv7511.c
index 00416f2..dba5c0e 100644
--- a/drivers/gpu/drm/i2c/adv7511.c
+++ b/drivers/gpu/drm/i2c/adv7511.c
@@ -36,7 +36,10 @@
bool edid_read;
wait_queue_head_t wq;
+ struct work_struct hpd_work;
+
struct drm_encoder *encoder;
+ struct drm_connector connector;
bool embedded_sync;
enum adv7511_sync_polarity vsync_polarity;
@@ -48,6 +51,10 @@
struct gpio_desc *gpio_pd;
};
+static const int edid_i2c_addr = 0x7e;
+static const int packet_i2c_addr = 0x70;
+static const int cec_i2c_addr = 0x78;
+
static struct adv7511 *encoder_to_adv7511(struct drm_encoder *encoder)
{
return to_encoder_slave(encoder)->slave_priv;
@@ -362,12 +369,19 @@
{
adv7511->current_edid_segment = -1;
- regmap_write(adv7511->regmap, ADV7511_REG_INT(0),
- ADV7511_INT0_EDID_READY);
- regmap_write(adv7511->regmap, ADV7511_REG_INT(1),
- ADV7511_INT1_DDC_ERROR);
regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER,
ADV7511_POWER_POWER_DOWN, 0);
+ if (adv7511->i2c_main->irq) {
+ /*
+ * Documentation says the INT_ENABLE registers are reset in
+ * POWER_DOWN mode. My 7511w preserved the bits, however.
+ * Still, let's be safe and stick to the documentation.
+ */
+ regmap_write(adv7511->regmap, ADV7511_REG_INT_ENABLE(0),
+ ADV7511_INT0_EDID_READY);
+ regmap_write(adv7511->regmap, ADV7511_REG_INT_ENABLE(1),
+ ADV7511_INT1_DDC_ERROR);
+ }
/*
* Per spec it is allowed to pulse the HDP signal to indicate that the
@@ -422,7 +436,27 @@
return false;
}
-static int adv7511_irq_process(struct adv7511 *adv7511)
+static void adv7511_hpd_work(struct work_struct *work)
+{
+ struct adv7511 *adv7511 = container_of(work, struct adv7511, hpd_work);
+ enum drm_connector_status status;
+ unsigned int val;
+ int ret;
+ ret = regmap_read(adv7511->regmap, ADV7511_REG_STATUS, &val);
+ if (ret < 0)
+ status = connector_status_disconnected;
+ else if (val & ADV7511_STATUS_HPD)
+ status = connector_status_connected;
+ else
+ status = connector_status_disconnected;
+
+ if (adv7511->connector.status != status) {
+ adv7511->connector.status = status;
+ drm_kms_helper_hotplug_event(adv7511->connector.dev);
+ }
+}
+
+static int adv7511_irq_process(struct adv7511 *adv7511, bool process_hpd)
{
unsigned int irq0, irq1;
int ret;
@@ -438,8 +472,8 @@
regmap_write(adv7511->regmap, ADV7511_REG_INT(0), irq0);
regmap_write(adv7511->regmap, ADV7511_REG_INT(1), irq1);
- if (irq0 & ADV7511_INT0_HDP && adv7511->encoder)
- drm_helper_hpd_irq_event(adv7511->encoder->dev);
+ if (process_hpd && irq0 & ADV7511_INT0_HDP && adv7511->encoder)
+ schedule_work(&adv7511->hpd_work);
if (irq0 & ADV7511_INT0_EDID_READY || irq1 & ADV7511_INT1_DDC_ERROR) {
adv7511->edid_read = true;
@@ -456,7 +490,7 @@
struct adv7511 *adv7511 = devid;
int ret;
- ret = adv7511_irq_process(adv7511);
+ ret = adv7511_irq_process(adv7511, true);
return ret < 0 ? IRQ_NONE : IRQ_HANDLED;
}
@@ -473,7 +507,7 @@
adv7511->edid_read, msecs_to_jiffies(timeout));
} else {
for (; timeout > 0; timeout -= 25) {
- ret = adv7511_irq_process(adv7511);
+ ret = adv7511_irq_process(adv7511, false);
if (ret < 0)
break;
@@ -567,13 +601,18 @@
/* Reading the EDID only works if the device is powered */
if (!adv7511->powered) {
- regmap_write(adv7511->regmap, ADV7511_REG_INT(0),
- ADV7511_INT0_EDID_READY);
- regmap_write(adv7511->regmap, ADV7511_REG_INT(1),
- ADV7511_INT1_DDC_ERROR);
regmap_update_bits(adv7511->regmap, ADV7511_REG_POWER,
ADV7511_POWER_POWER_DOWN, 0);
+ if (adv7511->i2c_main->irq) {
+ regmap_write(adv7511->regmap, ADV7511_REG_INT_ENABLE(0),
+ ADV7511_INT0_EDID_READY);
+ regmap_write(adv7511->regmap, ADV7511_REG_INT_ENABLE(1),
+ ADV7511_INT1_DDC_ERROR);
+ }
adv7511->current_edid_segment = -1;
+ /* Reset the EDID_I2C_ADDR register as it might be cleared */
+ regmap_write(adv7511->regmap, ADV7511_REG_EDID_I2C_ADDR,
+ edid_i2c_addr);
}
edid = drm_do_get_edid(connector, adv7511_get_edid_block, adv7511);
@@ -849,10 +888,6 @@
return 0;
}
-static const int edid_i2c_addr = 0x7e;
-static const int packet_i2c_addr = 0x70;
-static const int cec_i2c_addr = 0x78;
-
static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
{
struct adv7511_link_config link_config;
@@ -913,6 +948,8 @@
if (!adv7511->i2c_edid)
return -ENOMEM;
+ INIT_WORK(&adv7511->hpd_work, adv7511_hpd_work);
+
if (i2c->irq) {
init_waitqueue_head(&adv7511->wq);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index fb9f647..5044f22 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1159,7 +1159,7 @@
struct intel_rps_client semaphores, mmioflips;
/* manual wa residency calculations */
- struct intel_rps_ei up_ei, down_ei;
+ struct intel_rps_ei ei;
/*
* Protects RPS/RC6 register access and PCU communication.
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 0f42a27..b7b0a38 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -994,68 +994,51 @@
ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT);
}
-static bool vlv_c0_above(struct drm_i915_private *dev_priv,
- const struct intel_rps_ei *old,
- const struct intel_rps_ei *now,
- int threshold)
-{
- u64 time, c0;
- unsigned int mul = 100;
-
- if (old->cz_clock == 0)
- return false;
-
- if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
- mul <<= 8;
-
- time = now->cz_clock - old->cz_clock;
- time *= threshold * dev_priv->czclk_freq;
-
- /* Workload can be split between render + media, e.g. SwapBuffers
- * being blitted in X after being rendered in mesa. To account for
- * this we need to combine both engines into our activity counter.
- */
- c0 = now->render_c0 - old->render_c0;
- c0 += now->media_c0 - old->media_c0;
- c0 *= mul * VLV_CZ_CLOCK_TO_MILLI_SEC;
-
- return c0 >= time;
-}
-
void gen6_rps_reset_ei(struct drm_i915_private *dev_priv)
{
- vlv_c0_read(dev_priv, &dev_priv->rps.down_ei);
- dev_priv->rps.up_ei = dev_priv->rps.down_ei;
+ memset(&dev_priv->rps.ei, 0, sizeof(dev_priv->rps.ei));
}
static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
{
+ const struct intel_rps_ei *prev = &dev_priv->rps.ei;
struct intel_rps_ei now;
u32 events = 0;
- if ((pm_iir & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED)) == 0)
+ if ((pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) == 0)
return 0;
vlv_c0_read(dev_priv, &now);
if (now.cz_clock == 0)
return 0;
- if (pm_iir & GEN6_PM_RP_DOWN_EI_EXPIRED) {
- if (!vlv_c0_above(dev_priv,
- &dev_priv->rps.down_ei, &now,
- dev_priv->rps.down_threshold))
- events |= GEN6_PM_RP_DOWN_THRESHOLD;
- dev_priv->rps.down_ei = now;
+ if (prev->cz_clock) {
+ u64 time, c0;
+ unsigned int mul;
+
+ mul = VLV_CZ_CLOCK_TO_MILLI_SEC * 100; /* scale to threshold% */
+ if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
+ mul <<= 8;
+
+ time = now.cz_clock - prev->cz_clock;
+ time *= dev_priv->czclk_freq;
+
+ /* Workload can be split between render + media,
+ * e.g. SwapBuffers being blitted in X after being rendered in
+ * mesa. To account for this we need to combine both engines
+ * into our activity counter.
+ */
+ c0 = now.render_c0 - prev->render_c0;
+ c0 += now.media_c0 - prev->media_c0;
+ c0 *= mul;
+
+ if (c0 > time * dev_priv->rps.up_threshold)
+ events = GEN6_PM_RP_UP_THRESHOLD;
+ else if (c0 < time * dev_priv->rps.down_threshold)
+ events = GEN6_PM_RP_DOWN_THRESHOLD;
}
- if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) {
- if (vlv_c0_above(dev_priv,
- &dev_priv->rps.up_ei, &now,
- dev_priv->rps.up_threshold))
- events |= GEN6_PM_RP_UP_THRESHOLD;
- dev_priv->rps.up_ei = now;
- }
-
+ dev_priv->rps.ei = now;
return events;
}
@@ -4390,7 +4373,7 @@
/* Let's track the enabled rps events */
if (IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
/* WaGsvRC0ResidencyMethod:vlv */
- dev_priv->pm_rps_events = GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED;
+ dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED;
else
dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index e7c1851..fd4690e 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -4376,6 +4376,12 @@
break;
}
+ /* When byt can survive without system hang with dynamic
+ * sw freq adjustments, this restriction can be lifted.
+ */
+ if (IS_VALLEYVIEW(dev_priv))
+ goto skip_hw_write;
+
I915_WRITE(GEN6_RP_UP_EI,
GT_INTERVAL_FROM_US(dev_priv, ei_up));
I915_WRITE(GEN6_RP_UP_THRESHOLD,
@@ -4394,6 +4400,7 @@
GEN6_RP_UP_BUSY_AVG |
GEN6_RP_DOWN_IDLE_AVG);
+skip_hw_write:
dev_priv->rps.power = new_power;
dev_priv->rps.up_threshold = threshold_up;
dev_priv->rps.down_threshold = threshold_down;
@@ -4404,8 +4411,9 @@
{
u32 mask = 0;
+ /* We use UP_EI_EXPIRED interupts for both up/down in manual mode */
if (val > dev_priv->rps.min_freq_softlimit)
- mask |= GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT;
+ mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT;
if (val < dev_priv->rps.max_freq_softlimit)
mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_UP_THRESHOLD;
@@ -4509,7 +4517,7 @@
{
mutex_lock(&dev_priv->rps.hw_lock);
if (dev_priv->rps.enabled) {
- if (dev_priv->pm_rps_events & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED))
+ if (dev_priv->pm_rps_events & GEN6_PM_RP_UP_EI_EXPIRED)
gen6_rps_reset_ei(dev_priv);
I915_WRITE(GEN6_PMINTRMSK,
gen6_rps_pm_mask(dev_priv, dev_priv->rps.cur_freq));
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index cc91ae8..6fd7b50 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -635,7 +635,8 @@
"enabling oneshot unclaimed register reporting. "
"Please use i915.mmio_debug=N for more information.\n");
__raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
- i915.mmio_debug = mmio_debug_once--;
+ i915.mmio_debug = mmio_debug_once;
+ mmio_debug_once = false;
}
}
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 276329b..dfdaed2 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -1717,6 +1717,7 @@
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_export = drm_gem_prime_export,
.gem_prime_import = drm_gem_prime_import,
+ .gem_prime_res_obj = msm_gem_prime_res_obj,
.gem_prime_pin = msm_gem_prime_pin,
.gem_prime_unpin = msm_gem_prime_unpin,
.gem_prime_get_sg_table = msm_gem_prime_get_sg_table,
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index d2d118c..266fc44 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -448,6 +448,7 @@
void *msm_gem_prime_vmap(struct drm_gem_object *obj);
void msm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
+struct reservation_object *msm_gem_prime_res_obj(struct drm_gem_object *obj);
struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach, struct sg_table *sg);
int msm_gem_prime_pin(struct drm_gem_object *obj);
diff --git a/drivers/gpu/drm/msm/msm_gem_prime.c b/drivers/gpu/drm/msm/msm_gem_prime.c
index 121975b..1fbddc5 100644
--- a/drivers/gpu/drm/msm/msm_gem_prime.c
+++ b/drivers/gpu/drm/msm/msm_gem_prime.c
@@ -70,3 +70,10 @@
if (!obj->import_attach)
msm_gem_put_pages(obj);
}
+
+struct reservation_object *msm_gem_prime_res_obj(struct drm_gem_object *obj)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+
+ return msm_obj->resv;
+}
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h
index 82d3e28..7e4f24a 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/timer.h
@@ -4,6 +4,7 @@
struct nvkm_alarm {
struct list_head head;
+ struct list_head exec;
u64 timestamp;
void (*func)(struct nvkm_alarm *);
};
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 58a3f7c..00de1bf 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -370,7 +370,8 @@
return ret;
/* enable polling for external displays */
- drm_kms_helper_poll_enable(dev);
+ if (!dev->mode_config.poll_enabled)
+ drm_kms_helper_poll_enable(dev);
/* enable hotplug interrupts */
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index d236fc7..91a61d2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -743,7 +743,10 @@
pci_set_master(pdev);
ret = nouveau_do_resume(drm_dev, true);
- drm_kms_helper_poll_enable(drm_dev);
+
+ if (!drm_dev->mode_config.poll_enabled)
+ drm_kms_helper_poll_enable(drm_dev);
+
/* do magic */
nvif_mask(&device->object, 0x088488, (1 << 25), (1 << 25));
vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_ON);
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h
index 2e3a62d..1621c8a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.h
@@ -99,6 +99,7 @@
struct nouveau_bo *bo;
struct nouveau_bo *bo_gart;
u32 *suspend;
+ struct mutex mutex;
};
u64 nv84_fence_crtc(struct nouveau_channel *, int);
diff --git a/drivers/gpu/drm/nouveau/nouveau_usif.c b/drivers/gpu/drm/nouveau/nouveau_usif.c
index 6ae1b34..b7b9612 100644
--- a/drivers/gpu/drm/nouveau/nouveau_usif.c
+++ b/drivers/gpu/drm/nouveau/nouveau_usif.c
@@ -313,7 +313,8 @@
if (nvif_unpack(argv->v0, 0, 0, true)) {
/* block access to objects not created via this interface */
owner = argv->v0.owner;
- if (argv->v0.object == 0ULL)
+ if (argv->v0.object == 0ULL &&
+ argv->v0.type != NVIF_IOCTL_V0_DEL)
argv->v0.owner = NVDRM_OBJECT_ANY; /* except client */
else
argv->v0.owner = NVDRM_OBJECT_USIF;
diff --git a/drivers/gpu/drm/nouveau/nv84_fence.c b/drivers/gpu/drm/nouveau/nv84_fence.c
index 412c5be..7bc26ece 100644
--- a/drivers/gpu/drm/nouveau/nv84_fence.c
+++ b/drivers/gpu/drm/nouveau/nv84_fence.c
@@ -121,8 +121,10 @@
}
nouveau_bo_wr32(priv->bo, chan->chid * 16 / 4, fctx->base.sequence);
+ mutex_lock(&priv->mutex);
nouveau_bo_vma_del(priv->bo, &fctx->vma_gart);
nouveau_bo_vma_del(priv->bo, &fctx->vma);
+ mutex_unlock(&priv->mutex);
nouveau_fence_context_del(&fctx->base);
chan->fence = NULL;
nouveau_fence_context_free(&fctx->base);
@@ -148,11 +150,13 @@
fctx->base.sync32 = nv84_fence_sync32;
fctx->base.sequence = nv84_fence_read(chan);
+ mutex_lock(&priv->mutex);
ret = nouveau_bo_vma_add(priv->bo, cli->vm, &fctx->vma);
if (ret == 0) {
ret = nouveau_bo_vma_add(priv->bo_gart, cli->vm,
&fctx->vma_gart);
}
+ mutex_unlock(&priv->mutex);
/* map display semaphore buffers into channel's vm */
for (i = 0; !ret && i < chan->drm->dev->mode_config.num_crtc; i++) {
@@ -232,6 +236,8 @@
priv->base.context_base = fence_context_alloc(priv->base.contexts);
priv->base.uevent = true;
+ mutex_init(&priv->mutex);
+
/* Use VRAM if there is any ; otherwise fallback to system memory */
domain = drm->device.info.ram_size != 0 ? TTM_PL_FLAG_VRAM :
/*
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index ece9f41..7f8acb3 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -714,7 +714,7 @@
.i2c = nv04_i2c_new,
.imem = nv40_instmem_new,
.mc = nv44_mc_new,
- .mmu = nv44_mmu_new,
+ .mmu = nv04_mmu_new,
.pci = nv40_pci_new,
.therm = nv40_therm_new,
.timer = nv41_timer_new,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c
index d4d8942..e55f830 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv31.c
@@ -198,7 +198,7 @@
}
if (type == 0x00000010) {
- if (!nv31_mpeg_mthd(mpeg, mthd, data))
+ if (nv31_mpeg_mthd(mpeg, mthd, data))
show &= ~0x01000000;
}
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c
index d433cfa..36af0a8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/mpeg/nv44.c
@@ -172,7 +172,7 @@
}
if (type == 0x00000010) {
- if (!nv44_mpeg_mthd(subdev->device, mthd, data))
+ if (nv44_mpeg_mthd(subdev->device, mthd, data))
show &= ~0x01000000;
}
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c
index c794b2c..6d8f212 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/gf100.c
@@ -129,7 +129,7 @@
if (bar->bar[0].mem) {
addr = nvkm_memory_addr(bar->bar[0].mem) >> 12;
- nvkm_wr32(device, 0x001714, 0xc0000000 | addr);
+ nvkm_wr32(device, 0x001714, 0x80000000 | addr);
}
return 0;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c
index d671dcf..4896474 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c
@@ -180,6 +180,10 @@
}
}
+#ifdef __BIG_ENDIAN
+ pci->msi = false;
+#endif
+
pci->msi = nvkm_boolopt(device->cfgopt, "NvMSI", pci->msi);
if (pci->msi && func->msi_rearm) {
pci->msi = pci_enable_msi(pci->pdev) == 0;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
index 949dc61..7c0b586 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/base.c
@@ -130,7 +130,7 @@
poll = false;
}
- if (list_empty(&therm->alarm.head) && poll)
+ if (poll)
nvkm_timer_alarm(tmr, 1000000000ULL, &therm->alarm);
spin_unlock_irqrestore(&therm->lock, flags);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fan.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fan.c
index 91198d7..e2fecce 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fan.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fan.c
@@ -83,7 +83,7 @@
spin_unlock_irqrestore(&fan->lock, flags);
/* schedule next fan update, if not at target speed already */
- if (list_empty(&fan->alarm.head) && target != duty) {
+ if (target != duty) {
u16 bump_period = fan->bios.bump_period;
u16 slow_down_period = fan->bios.slow_down_period;
u64 delay;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fantog.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fantog.c
index 59701b7..ff9fbe7 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fantog.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/fantog.c
@@ -53,7 +53,7 @@
duty = !nvkm_gpio_get(gpio, 0, DCB_GPIO_FAN, 0xff);
nvkm_gpio_set(gpio, 0, DCB_GPIO_FAN, 0xff, duty);
- if (list_empty(&fan->alarm.head) && percent != (duty * 100)) {
+ if (percent != (duty * 100)) {
u64 next_change = (percent * fan->period_us) / 100;
if (!duty)
next_change = fan->period_us - next_change;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/temp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/temp.c
index b9703c02..9a79e91 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/therm/temp.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/therm/temp.c
@@ -185,7 +185,7 @@
spin_unlock_irqrestore(&therm->sensor.alarm_program_lock, flags);
/* schedule the next poll in one second */
- if (therm->func->temp_get(therm) >= 0 && list_empty(&alarm->head))
+ if (therm->func->temp_get(therm) >= 0)
nvkm_timer_alarm(tmr, 1000000000ULL, alarm);
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c
index d4dae1f..4603390 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c
@@ -36,25 +36,32 @@
unsigned long flags;
LIST_HEAD(exec);
- /* move any due alarms off the pending list */
+ /* Process pending alarms. */
spin_lock_irqsave(&tmr->lock, flags);
list_for_each_entry_safe(alarm, atemp, &tmr->alarms, head) {
- if (alarm->timestamp <= nvkm_timer_read(tmr))
- list_move_tail(&alarm->head, &exec);
+ /* Have we hit the earliest alarm that hasn't gone off? */
+ if (alarm->timestamp > nvkm_timer_read(tmr)) {
+ /* Schedule it. If we didn't race, we're done. */
+ tmr->func->alarm_init(tmr, alarm->timestamp);
+ if (alarm->timestamp > nvkm_timer_read(tmr))
+ break;
+ }
+
+ /* Move to completed list. We'll drop the lock before
+ * executing the callback so it can reschedule itself.
+ */
+ list_del_init(&alarm->head);
+ list_add(&alarm->exec, &exec);
}
- /* reschedule interrupt for next alarm time */
- if (!list_empty(&tmr->alarms)) {
- alarm = list_first_entry(&tmr->alarms, typeof(*alarm), head);
- tmr->func->alarm_init(tmr, alarm->timestamp);
- } else {
+ /* Shut down interrupt if no more pending alarms. */
+ if (list_empty(&tmr->alarms))
tmr->func->alarm_fini(tmr);
- }
spin_unlock_irqrestore(&tmr->lock, flags);
- /* execute any pending alarm handlers */
- list_for_each_entry_safe(alarm, atemp, &exec, head) {
- list_del_init(&alarm->head);
+ /* Execute completed callbacks. */
+ list_for_each_entry_safe(alarm, atemp, &exec, exec) {
+ list_del(&alarm->exec);
alarm->func(alarm);
}
}
@@ -65,24 +72,37 @@
struct nvkm_alarm *list;
unsigned long flags;
- alarm->timestamp = nvkm_timer_read(tmr) + nsec;
-
- /* append new alarm to list, in soonest-alarm-first order */
+ /* Remove alarm from pending list.
+ *
+ * This both protects against the corruption of the list,
+ * and implements alarm rescheduling/cancellation.
+ */
spin_lock_irqsave(&tmr->lock, flags);
- if (!nsec) {
- if (!list_empty(&alarm->head))
- list_del(&alarm->head);
- } else {
+ list_del_init(&alarm->head);
+
+ if (nsec) {
+ /* Insert into pending list, ordered earliest to latest. */
+ alarm->timestamp = nvkm_timer_read(tmr) + nsec;
list_for_each_entry(list, &tmr->alarms, head) {
if (list->timestamp > alarm->timestamp)
break;
}
+
list_add_tail(&alarm->head, &list->head);
+
+ /* Update HW if this is now the earliest alarm. */
+ list = list_first_entry(&tmr->alarms, typeof(*list), head);
+ if (list == alarm) {
+ tmr->func->alarm_init(tmr, alarm->timestamp);
+ /* This shouldn't happen if callers aren't stupid.
+ *
+ * Worst case scenario is that it'll take roughly
+ * 4 seconds for the next alarm to trigger.
+ */
+ WARN_ON(alarm->timestamp <= nvkm_timer_read(tmr));
+ }
}
spin_unlock_irqrestore(&tmr->lock, flags);
-
- /* process pending alarms */
- nvkm_timer_alarm_trigger(tmr);
}
void
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.c
index 7b9ce87..7f48249 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/timer/nv04.c
@@ -76,8 +76,8 @@
u32 stat = nvkm_rd32(device, NV04_PTIMER_INTR_0);
if (stat & 0x00000001) {
- nvkm_timer_alarm_trigger(tmr);
nvkm_wr32(device, NV04_PTIMER_INTR_0, 0x00000001);
+ nvkm_timer_alarm_trigger(tmr);
stat &= ~0x00000001;
}
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index d4ac8c8..8e86cf7 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -30,6 +30,7 @@
#include "radeon_audio.h"
#include "atom.h"
#include <linux/backlight.h>
+#include <linux/dmi.h>
extern int atom_debug;
@@ -2183,9 +2184,17 @@
goto assigned;
}
- /* on DCE32 and encoder can driver any block so just crtc id */
+ /*
+ * On DCE32 any encoder can drive any block so usually just use crtc id,
+ * but Apple thinks different at least on iMac10,1, so there use linkb,
+ * otherwise the internal eDP panel will stay dark.
+ */
if (ASIC_IS_DCE32(rdev)) {
- enc_idx = radeon_crtc->crtc_id;
+ if (dmi_match(DMI_PRODUCT_NAME, "iMac10,1"))
+ enc_idx = (dig->linkb) ? 1 : 0;
+ else
+ enc_idx = radeon_crtc->crtc_id;
+
goto assigned;
}
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
index 4a09947..2ccf811 100644
--- a/drivers/gpu/drm/radeon/ci_dpm.c
+++ b/drivers/gpu/drm/radeon/ci_dpm.c
@@ -776,6 +776,18 @@
u32 vblank_time = r600_dpm_get_vblank_time(rdev);
u32 switch_limit = pi->mem_gddr5 ? 450 : 300;
+ /* disable mclk switching if the refresh is >120Hz, even if the
+ * blanking period would allow it
+ */
+ if (r600_dpm_get_vrefresh(rdev) > 120)
+ return true;
+
+ /* disable mclk switching if the refresh is >120Hz, even if the
+ * blanking period would allow it
+ */
+ if (r600_dpm_get_vrefresh(rdev) > 120)
+ return true;
+
if (vblank_time < switch_limit)
return true;
else
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index f81fb26..134874c 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -7762,7 +7762,7 @@
WREG32(DC_HPD5_INT_CONTROL, tmp);
}
if (rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_INTERRUPT) {
- tmp = RREG32(DC_HPD5_INT_CONTROL);
+ tmp = RREG32(DC_HPD6_INT_CONTROL);
tmp |= DC_HPDx_INT_ACK;
WREG32(DC_HPD6_INT_CONTROL, tmp);
}
@@ -7792,7 +7792,7 @@
WREG32(DC_HPD5_INT_CONTROL, tmp);
}
if (rdev->irq.stat_regs.cik.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) {
- tmp = RREG32(DC_HPD5_INT_CONTROL);
+ tmp = RREG32(DC_HPD6_INT_CONTROL);
tmp |= DC_HPDx_RX_INT_ACK;
WREG32(DC_HPD6_INT_CONTROL, tmp);
}
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 3249135..ba9e6ed 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -4924,7 +4924,7 @@
WREG32(DC_HPD5_INT_CONTROL, tmp);
}
if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {
- tmp = RREG32(DC_HPD5_INT_CONTROL);
+ tmp = RREG32(DC_HPD6_INT_CONTROL);
tmp |= DC_HPDx_INT_ACK;
WREG32(DC_HPD6_INT_CONTROL, tmp);
}
@@ -4955,7 +4955,7 @@
WREG32(DC_HPD5_INT_CONTROL, tmp);
}
if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) {
- tmp = RREG32(DC_HPD5_INT_CONTROL);
+ tmp = RREG32(DC_HPD6_INT_CONTROL);
tmp |= DC_HPDx_RX_INT_ACK;
WREG32(DC_HPD6_INT_CONTROL, tmp);
}
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index cc2fdf0..0e20c08 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -3945,7 +3945,7 @@
WREG32(DC_HPD5_INT_CONTROL, tmp);
}
if (rdev->irq.stat_regs.r600.disp_int_cont2 & DC_HPD6_INTERRUPT) {
- tmp = RREG32(DC_HPD5_INT_CONTROL);
+ tmp = RREG32(DC_HPD6_INT_CONTROL);
tmp |= DC_HPDx_INT_ACK;
WREG32(DC_HPD6_INT_CONTROL, tmp);
}
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index a9b01bc..fcecaf5 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -3394,6 +3394,13 @@
rdev->pdev->subsystem_vendor == 0x103c &&
rdev->pdev->subsystem_device == 0x280a)
return;
+ /* quirk for rs4xx Toshiba Sattellite L20-183 latop to make it resume
+ * - it hangs on resume inside the dynclk 1 table.
+ */
+ if (rdev->family == CHIP_RS400 &&
+ rdev->pdev->subsystem_vendor == 0x1179 &&
+ rdev->pdev->subsystem_device == 0xff31)
+ return;
/* DYN CLK 1 */
table = combios_get_table_offset(dev, COMBIOS_DYN_CLK_1_TABLE);
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 4aa2cbe..a775216 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -127,6 +127,10 @@
* https://bugzilla.kernel.org/show_bug.cgi?id=51381
*/
{ PCI_VENDOR_ID_ATI, 0x6840, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX },
+ /* Asus K53TK laptop with AMD A6-3420M APU and Radeon 7670m GPU
+ * https://bugs.freedesktop.org/show_bug.cgi?id=101491
+ */
+ { PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX },
/* macbook pro 8.2 */
{ PCI_VENDOR_ID_ATI, 0x6741, PCI_VENDOR_ID_APPLE, 0x00e2, RADEON_PX_QUIRK_LONG_WAKEUP },
{ 0, 0, 0, 0, 0 },
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 35310336..d684e2b 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -213,8 +213,8 @@
rbo->placement.num_busy_placement = 0;
for (i = 0; i < rbo->placement.num_placement; i++) {
if (rbo->placements[i].flags & TTM_PL_FLAG_VRAM) {
- if (rbo->placements[0].fpfn < fpfn)
- rbo->placements[0].fpfn = fpfn;
+ if (rbo->placements[i].fpfn < fpfn)
+ rbo->placements[i].fpfn = fpfn;
} else {
rbo->placement.busy_placement =
&rbo->placements[i];
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index f878d69..5cf3a2c 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -6335,7 +6335,7 @@
WREG32(DC_HPD5_INT_CONTROL, tmp);
}
if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_INTERRUPT) {
- tmp = RREG32(DC_HPD5_INT_CONTROL);
+ tmp = RREG32(DC_HPD6_INT_CONTROL);
tmp |= DC_HPDx_INT_ACK;
WREG32(DC_HPD6_INT_CONTROL, tmp);
}
@@ -6366,7 +6366,7 @@
WREG32(DC_HPD5_INT_CONTROL, tmp);
}
if (rdev->irq.stat_regs.evergreen.disp_int_cont5 & DC_HPD6_RX_INTERRUPT) {
- tmp = RREG32(DC_HPD5_INT_CONTROL);
+ tmp = RREG32(DC_HPD6_INT_CONTROL);
tmp |= DC_HPDx_RX_INT_ACK;
WREG32(DC_HPD6_INT_CONTROL, tmp);
}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
index 48cb199..9befd62 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
@@ -148,8 +148,8 @@
rcar_du_group_write(rcrtc->group, rcrtc->index % 2 ? OTAR2 : OTAR, 0);
/* Signal polarities */
- value = ((mode->flags & DRM_MODE_FLAG_PVSYNC) ? 0 : DSMR_VSL)
- | ((mode->flags & DRM_MODE_FLAG_PHSYNC) ? 0 : DSMR_HSL)
+ value = ((mode->flags & DRM_MODE_FLAG_PVSYNC) ? DSMR_VSL : 0)
+ | ((mode->flags & DRM_MODE_FLAG_PHSYNC) ? DSMR_HSL : 0)
| DSMR_DIPM_DE | DSMR_CSPM;
rcar_du_crtc_write(rcrtc, DSMR, value);
@@ -171,7 +171,7 @@
mode->crtc_vsync_start - 1);
rcar_du_crtc_write(rcrtc, VCR, mode->crtc_vtotal - 1);
- rcar_du_crtc_write(rcrtc, DESR, mode->htotal - mode->hsync_start);
+ rcar_du_crtc_write(rcrtc, DESR, mode->htotal - mode->hsync_start - 1);
rcar_du_crtc_write(rcrtc, DEWR, mode->hdisplay);
}
@@ -282,26 +282,6 @@
* Page Flip
*/
-void rcar_du_crtc_cancel_page_flip(struct rcar_du_crtc *rcrtc,
- struct drm_file *file)
-{
- struct drm_pending_vblank_event *event;
- struct drm_device *dev = rcrtc->crtc.dev;
- unsigned long flags;
-
- /* Destroy the pending vertical blanking event associated with the
- * pending page flip, if any, and disable vertical blanking interrupts.
- */
- spin_lock_irqsave(&dev->event_lock, flags);
- event = rcrtc->event;
- if (event && event->base.file_priv == file) {
- rcrtc->event = NULL;
- event->base.destroy(&event->base);
- drm_crtc_vblank_put(&rcrtc->crtc);
- }
- spin_unlock_irqrestore(&dev->event_lock, flags);
-}
-
static void rcar_du_crtc_finish_page_flip(struct rcar_du_crtc *rcrtc)
{
struct drm_pending_vblank_event *event;
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.h b/drivers/gpu/drm/rcar-du/rcar_du_crtc.h
index 4b95d9d..2bbe3f5 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.h
@@ -67,8 +67,6 @@
int rcar_du_crtc_create(struct rcar_du_group *rgrp, unsigned int index);
void rcar_du_crtc_enable_vblank(struct rcar_du_crtc *rcrtc, bool enable);
-void rcar_du_crtc_cancel_page_flip(struct rcar_du_crtc *rcrtc,
- struct drm_file *file);
void rcar_du_crtc_suspend(struct rcar_du_crtc *rcrtc);
void rcar_du_crtc_resume(struct rcar_du_crtc *rcrtc);
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
index 40422f6..bb9cd35 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c
@@ -144,91 +144,6 @@
* DRM operations
*/
-static int rcar_du_unload(struct drm_device *dev)
-{
- struct rcar_du_device *rcdu = dev->dev_private;
-
- if (rcdu->fbdev)
- drm_fbdev_cma_fini(rcdu->fbdev);
-
- drm_kms_helper_poll_fini(dev);
- drm_mode_config_cleanup(dev);
- drm_vblank_cleanup(dev);
-
- dev->irq_enabled = 0;
- dev->dev_private = NULL;
-
- return 0;
-}
-
-static int rcar_du_load(struct drm_device *dev, unsigned long flags)
-{
- struct platform_device *pdev = dev->platformdev;
- struct device_node *np = pdev->dev.of_node;
- struct rcar_du_device *rcdu;
- struct resource *mem;
- int ret;
-
- if (np == NULL) {
- dev_err(dev->dev, "no platform data\n");
- return -ENODEV;
- }
-
- rcdu = devm_kzalloc(&pdev->dev, sizeof(*rcdu), GFP_KERNEL);
- if (rcdu == NULL) {
- dev_err(dev->dev, "failed to allocate private data\n");
- return -ENOMEM;
- }
-
- init_waitqueue_head(&rcdu->commit.wait);
-
- rcdu->dev = &pdev->dev;
- rcdu->info = of_match_device(rcar_du_of_table, rcdu->dev)->data;
- rcdu->ddev = dev;
- dev->dev_private = rcdu;
-
- /* I/O resources */
- mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- rcdu->mmio = devm_ioremap_resource(&pdev->dev, mem);
- if (IS_ERR(rcdu->mmio))
- return PTR_ERR(rcdu->mmio);
-
- /* Initialize vertical blanking interrupts handling. Start with vblank
- * disabled for all CRTCs.
- */
- ret = drm_vblank_init(dev, (1 << rcdu->info->num_crtcs) - 1);
- if (ret < 0) {
- dev_err(&pdev->dev, "failed to initialize vblank\n");
- goto done;
- }
-
- /* DRM/KMS objects */
- ret = rcar_du_modeset_init(rcdu);
- if (ret < 0) {
- dev_err(&pdev->dev, "failed to initialize DRM/KMS (%d)\n", ret);
- goto done;
- }
-
- dev->irq_enabled = 1;
-
- platform_set_drvdata(pdev, rcdu);
-
-done:
- if (ret)
- rcar_du_unload(dev);
-
- return ret;
-}
-
-static void rcar_du_preclose(struct drm_device *dev, struct drm_file *file)
-{
- struct rcar_du_device *rcdu = dev->dev_private;
- unsigned int i;
-
- for (i = 0; i < rcdu->num_crtcs; ++i)
- rcar_du_crtc_cancel_page_flip(&rcdu->crtcs[i], file);
-}
-
static void rcar_du_lastclose(struct drm_device *dev)
{
struct rcar_du_device *rcdu = dev->dev_private;
@@ -269,11 +184,7 @@
static struct drm_driver rcar_du_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_PRIME
| DRIVER_ATOMIC,
- .load = rcar_du_load,
- .unload = rcar_du_unload,
- .preclose = rcar_du_preclose,
.lastclose = rcar_du_lastclose,
- .set_busid = drm_platform_set_busid,
.get_vblank_counter = drm_vblank_no_hw_counter,
.enable_vblank = rcar_du_enable_vblank,
.disable_vblank = rcar_du_disable_vblank,
@@ -333,20 +244,106 @@
* Platform driver
*/
-static int rcar_du_probe(struct platform_device *pdev)
-{
- return drm_platform_init(&rcar_du_driver, pdev);
-}
-
static int rcar_du_remove(struct platform_device *pdev)
{
struct rcar_du_device *rcdu = platform_get_drvdata(pdev);
+ struct drm_device *ddev = rcdu->ddev;
- drm_put_dev(rcdu->ddev);
+ mutex_lock(&ddev->mode_config.mutex);
+ drm_connector_unplug_all(ddev);
+ mutex_unlock(&ddev->mode_config.mutex);
+
+ drm_dev_unregister(ddev);
+
+ if (rcdu->fbdev)
+ drm_fbdev_cma_fini(rcdu->fbdev);
+
+ drm_kms_helper_poll_fini(ddev);
+ drm_mode_config_cleanup(ddev);
+
+ drm_dev_unref(ddev);
return 0;
}
+static int rcar_du_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct rcar_du_device *rcdu;
+ struct drm_connector *connector;
+ struct drm_device *ddev;
+ struct resource *mem;
+ int ret;
+
+ if (np == NULL) {
+ dev_err(&pdev->dev, "no device tree node\n");
+ return -ENODEV;
+ }
+
+ /* Allocate and initialize the DRM and R-Car device structures. */
+ rcdu = devm_kzalloc(&pdev->dev, sizeof(*rcdu), GFP_KERNEL);
+ if (rcdu == NULL)
+ return -ENOMEM;
+
+ init_waitqueue_head(&rcdu->commit.wait);
+
+ rcdu->dev = &pdev->dev;
+ rcdu->info = of_match_device(rcar_du_of_table, rcdu->dev)->data;
+
+ platform_set_drvdata(pdev, rcdu);
+
+ /* I/O resources */
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ rcdu->mmio = devm_ioremap_resource(&pdev->dev, mem);
+ if (IS_ERR(rcdu->mmio))
+ return PTR_ERR(rcdu->mmio);
+
+ /* DRM/KMS objects */
+ ddev = drm_dev_alloc(&rcar_du_driver, &pdev->dev);
+ if (!ddev)
+ return -ENOMEM;
+
+ drm_dev_set_unique(ddev, dev_name(&pdev->dev));
+
+ rcdu->ddev = ddev;
+ ddev->dev_private = rcdu;
+
+ ret = rcar_du_modeset_init(rcdu);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to initialize DRM/KMS (%d)\n", ret);
+ goto error;
+ }
+
+ ddev->irq_enabled = 1;
+
+ /* Register the DRM device with the core and the connectors with
+ * sysfs.
+ */
+ ret = drm_dev_register(ddev, 0);
+ if (ret)
+ goto error;
+
+ mutex_lock(&ddev->mode_config.mutex);
+ drm_for_each_connector(connector, ddev) {
+ ret = drm_connector_register(connector);
+ if (ret < 0)
+ break;
+ }
+ mutex_unlock(&ddev->mode_config.mutex);
+
+ if (ret < 0)
+ goto error;
+
+ DRM_INFO("Device %s probed\n", dev_name(&pdev->dev));
+
+ return 0;
+
+error:
+ rcar_du_remove(pdev);
+
+ return ret;
+}
+
static struct platform_driver rcar_du_platform_driver = {
.probe = rcar_du_probe,
.remove = rcar_du_remove,
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c b/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c
index 96f2eb4..6038be9 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_hdmicon.c
@@ -55,12 +55,6 @@
.best_encoder = rcar_du_connector_best_encoder,
};
-static void rcar_du_hdmi_connector_destroy(struct drm_connector *connector)
-{
- drm_connector_unregister(connector);
- drm_connector_cleanup(connector);
-}
-
static enum drm_connector_status
rcar_du_hdmi_connector_detect(struct drm_connector *connector, bool force)
{
@@ -79,7 +73,7 @@
.reset = drm_atomic_helper_connector_reset,
.detect = rcar_du_hdmi_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
- .destroy = rcar_du_hdmi_connector_destroy,
+ .destroy = drm_connector_cleanup,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
@@ -108,9 +102,6 @@
return ret;
drm_connector_helper_add(connector, &connector_helper_funcs);
- ret = drm_connector_register(connector);
- if (ret < 0)
- return ret;
connector->dpms = DRM_MODE_DPMS_OFF;
drm_object_property_set_value(&connector->base,
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
index ca12e8c..2b75a48 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
@@ -642,13 +642,13 @@
}
ret = rcar_du_encoder_init(rcdu, enc_type, output, encoder, connector);
- of_node_put(encoder);
- of_node_put(connector);
-
if (ret && ret != -EPROBE_DEFER)
dev_warn(rcdu->dev,
- "failed to initialize encoder %s (%d), skipping\n",
- encoder->full_name, ret);
+ "failed to initialize encoder %s on output %u (%d), skipping\n",
+ of_node_full_name(encoder), output, ret);
+
+ of_node_put(encoder);
+ of_node_put(connector);
return ret;
}
@@ -761,6 +761,13 @@
if (ret < 0)
return ret;
+ /* Initialize vertical blanking interrupts handling. Start with vblank
+ * disabled for all CRTCs.
+ */
+ ret = drm_vblank_init(dev, (1 << rcdu->info->num_crtcs) - 1);
+ if (ret < 0)
+ return ret;
+
/* Initialize the groups. */
num_groups = DIV_ROUND_UP(rcdu->num_crtcs, 2);
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c b/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c
index 0c43032..e905f5d 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_lvdscon.c
@@ -62,12 +62,6 @@
.best_encoder = rcar_du_connector_best_encoder,
};
-static void rcar_du_lvds_connector_destroy(struct drm_connector *connector)
-{
- drm_connector_unregister(connector);
- drm_connector_cleanup(connector);
-}
-
static enum drm_connector_status
rcar_du_lvds_connector_detect(struct drm_connector *connector, bool force)
{
@@ -79,7 +73,7 @@
.reset = drm_atomic_helper_connector_reset,
.detect = rcar_du_lvds_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
- .destroy = rcar_du_lvds_connector_destroy,
+ .destroy = drm_connector_cleanup,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
@@ -117,9 +111,6 @@
return ret;
drm_connector_helper_add(connector, &connector_helper_funcs);
- ret = drm_connector_register(connector);
- if (ret < 0)
- return ret;
connector->dpms = DRM_MODE_DPMS_OFF;
drm_object_property_set_value(&connector->base,
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c b/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c
index 85043c5..873e04a 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_lvdsenc.c
@@ -56,11 +56,11 @@
return ret;
/* PLL clock configuration */
- if (freq <= 38000)
+ if (freq < 39000)
pllcr = LVDPLLCR_CEEN | LVDPLLCR_COSEL | LVDPLLCR_PLLDLYCNT_38M;
- else if (freq <= 60000)
+ else if (freq < 61000)
pllcr = LVDPLLCR_CEEN | LVDPLLCR_COSEL | LVDPLLCR_PLLDLYCNT_60M;
- else if (freq <= 121000)
+ else if (freq < 121000)
pllcr = LVDPLLCR_CEEN | LVDPLLCR_COSEL | LVDPLLCR_PLLDLYCNT_121M;
else
pllcr = LVDPLLCR_PLLDLYCNT_150M;
@@ -102,7 +102,7 @@
/* Turn the PLL on, wait for the startup delay, and turn the output
* on.
*/
- lvdcr0 |= LVDCR0_PLLEN;
+ lvdcr0 |= LVDCR0_PLLON;
rcar_lvds_write(lvds, LVDCR0, lvdcr0);
usleep_range(100, 150);
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c b/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c
index e0a5d8f..9d7e5c9 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_vgacon.c
@@ -31,12 +31,6 @@
.best_encoder = rcar_du_connector_best_encoder,
};
-static void rcar_du_vga_connector_destroy(struct drm_connector *connector)
-{
- drm_connector_unregister(connector);
- drm_connector_cleanup(connector);
-}
-
static enum drm_connector_status
rcar_du_vga_connector_detect(struct drm_connector *connector, bool force)
{
@@ -48,7 +42,7 @@
.reset = drm_atomic_helper_connector_reset,
.detect = rcar_du_vga_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
- .destroy = rcar_du_vga_connector_destroy,
+ .destroy = drm_connector_cleanup,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
@@ -76,9 +70,6 @@
return ret;
drm_connector_helper_add(connector, &connector_helper_funcs);
- ret = drm_connector_register(connector);
- if (ret < 0)
- return ret;
connector->dpms = DRM_MODE_DPMS_OFF;
drm_object_property_set_value(&connector->base,
diff --git a/drivers/gpu/drm/rcar-du/rcar_lvds_regs.h b/drivers/gpu/drm/rcar-du/rcar_lvds_regs.h
index 77cf928..b1eafd0 100644
--- a/drivers/gpu/drm/rcar-du/rcar_lvds_regs.h
+++ b/drivers/gpu/drm/rcar-du/rcar_lvds_regs.h
@@ -18,7 +18,7 @@
#define LVDCR0_DMD (1 << 12)
#define LVDCR0_LVMD_MASK (0xf << 8)
#define LVDCR0_LVMD_SHIFT 8
-#define LVDCR0_PLLEN (1 << 4)
+#define LVDCR0_PLLON (1 << 4)
#define LVDCR0_BEN (1 << 2)
#define LVDCR0_LVEN (1 << 1)
#define LVDCR0_LVRES (1 << 0)
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 8fb7213..b753914 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -66,8 +66,11 @@
if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT)
goto out_unlock;
+ ttm_bo_reference(bo);
up_read(&vma->vm_mm->mmap_sem);
(void) ttm_bo_wait(bo, false, true, false);
+ ttm_bo_unreserve(bo);
+ ttm_bo_unref(&bo);
goto out_unlock;
}
@@ -114,8 +117,10 @@
if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) {
if (!(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
+ ttm_bo_reference(bo);
up_read(&vma->vm_mm->mmap_sem);
(void) ttm_bo_wait_unreserved(bo);
+ ttm_bo_unref(&bo);
}
return VM_FAULT_RETRY;
@@ -160,6 +165,13 @@
ret = ttm_bo_vm_fault_idle(bo, vma, vmf);
if (unlikely(ret != 0)) {
retval = ret;
+
+ if (retval == VM_FAULT_RETRY &&
+ !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
+ /* The BO has already been unreserved. */
+ return retval;
+ }
+
goto out_unlock;
}
diff --git a/drivers/gpu/drm/ttm/ttm_object.c b/drivers/gpu/drm/ttm/ttm_object.c
index 4f5fa8d..144367c 100644
--- a/drivers/gpu/drm/ttm/ttm_object.c
+++ b/drivers/gpu/drm/ttm/ttm_object.c
@@ -179,7 +179,7 @@
if (unlikely(ret != 0))
goto out_err0;
- ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL);
+ ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL, false);
if (unlikely(ret != 0))
goto out_err1;
@@ -318,7 +318,8 @@
int ttm_ref_object_add(struct ttm_object_file *tfile,
struct ttm_base_object *base,
- enum ttm_ref_type ref_type, bool *existed)
+ enum ttm_ref_type ref_type, bool *existed,
+ bool require_existed)
{
struct drm_open_hash *ht = &tfile->ref_hash[ref_type];
struct ttm_ref_object *ref;
@@ -345,6 +346,9 @@
}
rcu_read_unlock();
+ if (require_existed)
+ return -EPERM;
+
ret = ttm_mem_global_alloc(mem_glob, sizeof(*ref),
false, false);
if (unlikely(ret != 0))
@@ -635,7 +639,7 @@
prime = (struct ttm_prime_object *) dma_buf->priv;
base = &prime->base;
*handle = base->hash.key;
- ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL);
+ ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL, false);
dma_buf_put(dma_buf);
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index 025c429..5d8dfe0 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -612,7 +612,7 @@
} else {
pr_err("Failed to fill pool (%p)\n", pool);
/* If we have any pages left put them to the pool. */
- list_for_each_entry(p, &pool->list, lru) {
+ list_for_each_entry(p, &new_pages, lru) {
++cpages;
}
list_splice(&new_pages, &pool->list);
diff --git a/drivers/gpu/drm/virtio/virtgpu_fb.c b/drivers/gpu/drm/virtio/virtgpu_fb.c
index 6a81e08..2b59d80 100644
--- a/drivers/gpu/drm/virtio/virtgpu_fb.c
+++ b/drivers/gpu/drm/virtio/virtgpu_fb.c
@@ -338,7 +338,7 @@
info->fbops = &virtio_gpufb_ops;
info->pixmap.flags = FB_PIXMAP_SYSTEM;
- info->screen_base = obj->vmap;
+ info->screen_buffer = obj->vmap;
info->screen_size = obj->gem_base.size;
drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
drm_fb_helper_fill_var(info, &vfbdev->helper,
diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c
index f300eba..1244cdf 100644
--- a/drivers/gpu/drm/virtio/virtgpu_object.c
+++ b/drivers/gpu/drm/virtio/virtgpu_object.c
@@ -81,8 +81,10 @@
return -ENOMEM;
size = roundup(size, PAGE_SIZE);
ret = drm_gem_object_init(vgdev->ddev, &bo->gem_base, size);
- if (ret != 0)
+ if (ret != 0) {
+ kfree(bo);
return ret;
+ }
bo->dumb = false;
virtio_gpu_init_ttm_placement(bo, pinned);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
index 13db8a2..1f013d4 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
@@ -321,6 +321,7 @@
list_for_each_entry_safe(entry, next, &man->list, head)
vmw_cmdbuf_res_free(man, entry);
+ drm_ht_remove(&man->resources);
kfree(man);
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index ecf15cf..04fd0f2 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -471,7 +471,7 @@
struct vmw_sw_context *sw_context,
SVGA3dCmdHeader *header)
{
- return capable(CAP_SYS_ADMIN) ? : -EINVAL;
+ return -EINVAL;
}
static int vmw_cmd_ok(struct vmw_private *dev_priv,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index 8e689b4..6c649f7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -539,7 +539,7 @@
struct vmw_fence_obj **p_fence)
{
struct vmw_fence_obj *fence;
- int ret;
+ int ret;
fence = kzalloc(sizeof(*fence), GFP_KERNEL);
if (unlikely(fence == NULL))
@@ -702,6 +702,41 @@
}
+/**
+ * vmw_fence_obj_lookup - Look up a user-space fence object
+ *
+ * @tfile: A struct ttm_object_file identifying the caller.
+ * @handle: A handle identifying the fence object.
+ * @return: A struct vmw_user_fence base ttm object on success or
+ * an error pointer on failure.
+ *
+ * The fence object is looked up and type-checked. The caller needs
+ * to have opened the fence object first, but since that happens on
+ * creation and fence objects aren't shareable, that's not an
+ * issue currently.
+ */
+static struct ttm_base_object *
+vmw_fence_obj_lookup(struct ttm_object_file *tfile, u32 handle)
+{
+ struct ttm_base_object *base = ttm_base_object_lookup(tfile, handle);
+
+ if (!base) {
+ pr_err("Invalid fence object handle 0x%08lx.\n",
+ (unsigned long)handle);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (base->refcount_release != vmw_user_fence_base_release) {
+ pr_err("Invalid fence object handle 0x%08lx.\n",
+ (unsigned long)handle);
+ ttm_base_object_unref(&base);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return base;
+}
+
+
int vmw_fence_obj_wait_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
@@ -727,13 +762,9 @@
arg->kernel_cookie = jiffies + wait_timeout;
}
- base = ttm_base_object_lookup(tfile, arg->handle);
- if (unlikely(base == NULL)) {
- printk(KERN_ERR "Wait invalid fence object handle "
- "0x%08lx.\n",
- (unsigned long)arg->handle);
- return -EINVAL;
- }
+ base = vmw_fence_obj_lookup(tfile, arg->handle);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
fence = &(container_of(base, struct vmw_user_fence, base)->fence);
@@ -772,13 +803,9 @@
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
struct vmw_private *dev_priv = vmw_priv(dev);
- base = ttm_base_object_lookup(tfile, arg->handle);
- if (unlikely(base == NULL)) {
- printk(KERN_ERR "Fence signaled invalid fence object handle "
- "0x%08lx.\n",
- (unsigned long)arg->handle);
- return -EINVAL;
- }
+ base = vmw_fence_obj_lookup(tfile, arg->handle);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
fence = &(container_of(base, struct vmw_user_fence, base)->fence);
fman = fman_from_fence(fence);
@@ -1093,6 +1120,7 @@
(struct drm_vmw_fence_event_arg *) data;
struct vmw_fence_obj *fence = NULL;
struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
+ struct ttm_object_file *tfile = vmw_fp->tfile;
struct drm_vmw_fence_rep __user *user_fence_rep =
(struct drm_vmw_fence_rep __user *)(unsigned long)
arg->fence_rep;
@@ -1106,24 +1134,18 @@
*/
if (arg->handle) {
struct ttm_base_object *base =
- ttm_base_object_lookup_for_ref(dev_priv->tdev,
- arg->handle);
+ vmw_fence_obj_lookup(tfile, arg->handle);
- if (unlikely(base == NULL)) {
- DRM_ERROR("Fence event invalid fence object handle "
- "0x%08lx.\n",
- (unsigned long)arg->handle);
- return -EINVAL;
- }
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
fence = &(container_of(base, struct vmw_user_fence,
base)->fence);
(void) vmw_fence_obj_reference(fence);
if (user_fence_rep != NULL) {
- bool existed;
-
ret = ttm_ref_object_add(vmw_fp->tfile, base,
- TTM_REF_USAGE, &existed);
+ TTM_REF_USAGE, NULL, false);
if (unlikely(ret != 0)) {
DRM_ERROR("Failed to reference a fence "
"object.\n");
@@ -1166,8 +1188,7 @@
return 0;
out_no_create:
if (user_fence_rep != NULL)
- ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
- handle, TTM_REF_USAGE);
+ ttm_ref_object_base_unref(tfile, handle, TTM_REF_USAGE);
out_no_ref_obj:
vmw_fence_obj_unreference(&fence);
return ret;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
index b6a0806..a1c68e6 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
@@ -368,6 +368,8 @@
return fifo_state->static_buffer;
else {
fifo_state->dynamic_buffer = vmalloc(bytes);
+ if (!fifo_state->dynamic_buffer)
+ goto out_err;
return fifo_state->dynamic_buffer;
}
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index b8c6a03..5ec24fd 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -114,8 +114,6 @@
param->value = dev_priv->has_dx;
break;
default:
- DRM_ERROR("Illegal vmwgfx get param request: %d\n",
- param->param);
return -EINVAL;
}
@@ -186,7 +184,7 @@
bool gb_objects = !!(dev_priv->capabilities & SVGA_CAP_GBOBJECTS);
struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
- if (unlikely(arg->pad64 != 0)) {
+ if (unlikely(arg->pad64 != 0 || arg->max_size == 0)) {
DRM_ERROR("Illegal GET_3D_CAP argument.\n");
return -EINVAL;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index e57667c..dbca128 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -591,7 +591,7 @@
return ret;
ret = ttm_ref_object_add(tfile, &user_bo->prime.base,
- TTM_REF_SYNCCPU_WRITE, &existed);
+ TTM_REF_SYNCCPU_WRITE, &existed, false);
if (ret != 0 || existed)
ttm_bo_synccpu_write_release(&user_bo->dma.base);
@@ -775,7 +775,7 @@
*handle = user_bo->prime.base.hash.key;
return ttm_ref_object_add(tfile, &user_bo->prime.base,
- TTM_REF_USAGE, NULL);
+ TTM_REF_USAGE, NULL, false);
}
/*
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index 7d620e8..0279870 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -715,11 +715,14 @@
128;
num_sizes = 0;
- for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
+ for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
+ if (req->mip_levels[i] > DRM_VMW_MAX_MIP_LEVELS)
+ return -EINVAL;
num_sizes += req->mip_levels[i];
+ }
- if (num_sizes > DRM_VMW_MAX_SURFACE_FACES *
- DRM_VMW_MAX_MIP_LEVELS)
+ if (num_sizes > DRM_VMW_MAX_SURFACE_FACES * DRM_VMW_MAX_MIP_LEVELS ||
+ num_sizes == 0)
return -EINVAL;
size = vmw_user_surface_size + 128 +
@@ -904,17 +907,16 @@
uint32_t handle;
struct ttm_base_object *base;
int ret;
+ bool require_exist = false;
if (handle_type == DRM_VMW_HANDLE_PRIME) {
ret = ttm_prime_fd_to_handle(tfile, u_handle, &handle);
if (unlikely(ret != 0))
return ret;
} else {
- if (unlikely(drm_is_render_client(file_priv))) {
- DRM_ERROR("Render client refused legacy "
- "surface reference.\n");
- return -EACCES;
- }
+ if (unlikely(drm_is_render_client(file_priv)))
+ require_exist = true;
+
if (ACCESS_ONCE(vmw_fpriv(file_priv)->locked_master)) {
DRM_ERROR("Locked master refused legacy "
"surface reference.\n");
@@ -942,17 +944,14 @@
/*
* Make sure the surface creator has the same
- * authenticating master.
+ * authenticating master, or is already registered with us.
*/
if (drm_is_primary_client(file_priv) &&
- user_srf->master != file_priv->master) {
- DRM_ERROR("Trying to reference surface outside of"
- " master domain.\n");
- ret = -EACCES;
- goto out_bad_resource;
- }
+ user_srf->master != file_priv->master)
+ require_exist = true;
- ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL);
+ ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL,
+ require_exist);
if (unlikely(ret != 0)) {
DRM_ERROR("Could not add a reference to a surface.\n");
goto out_bad_resource;
@@ -1289,11 +1288,14 @@
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
int ret;
uint32_t size;
- uint32_t backup_handle;
+ uint32_t backup_handle = 0;
if (req->multisample_count != 0)
return -EINVAL;
+ if (req->mip_levels > DRM_VMW_MAX_MIP_LEVELS)
+ return -EINVAL;
+
if (unlikely(vmw_user_surface_size == 0))
vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
128;
@@ -1329,12 +1331,16 @@
ret = vmw_user_dmabuf_lookup(tfile, req->buffer_handle,
&res->backup,
&user_srf->backup_base);
- if (ret == 0 && res->backup->base.num_pages * PAGE_SIZE <
- res->backup_size) {
- DRM_ERROR("Surface backup buffer is too small.\n");
- vmw_dmabuf_unreference(&res->backup);
- ret = -EINVAL;
- goto out_unlock;
+ if (ret == 0) {
+ if (res->backup->base.num_pages * PAGE_SIZE <
+ res->backup_size) {
+ DRM_ERROR("Surface backup buffer is too small.\n");
+ vmw_dmabuf_unreference(&res->backup);
+ ret = -EINVAL;
+ goto out_unlock;
+ } else {
+ backup_handle = req->buffer_handle;
+ }
}
} else if (req->drm_surface_flags & drm_vmw_surface_flag_create_buffer)
ret = vmw_user_dmabuf_alloc(dev_priv, tfile,
diff --git a/drivers/gpu/msm/adreno_ringbuffer.c b/drivers/gpu/msm/adreno_ringbuffer.c
index d79d961..ddc53ed 100644
--- a/drivers/gpu/msm/adreno_ringbuffer.c
+++ b/drivers/gpu/msm/adreno_ringbuffer.c
@@ -26,6 +26,7 @@
#include "adreno_iommu.h"
#include "adreno_pm4types.h"
#include "adreno_ringbuffer.h"
+#include "adreno_trace.h"
#include "a3xx_reg.h"
#include "adreno_a5xx.h"
@@ -58,6 +59,7 @@
}
static void adreno_get_submit_time(struct adreno_device *adreno_dev,
+ struct adreno_ringbuffer *rb,
struct adreno_submit_time *time)
{
unsigned long flags;
@@ -87,6 +89,9 @@
} else
time->ticks = 0;
+ /* Trace the GPU time to create a mapping to ftrace time */
+ trace_adreno_cmdbatch_sync(rb->drawctxt_active, time->ticks);
+
/* Get the kernel clock for time since boot */
time->ktime = local_clock();
@@ -128,7 +133,7 @@
_cff_write_ringbuffer(rb);
if (time != NULL)
- adreno_get_submit_time(adreno_dev, time);
+ adreno_get_submit_time(adreno_dev, rb, time);
adreno_ringbuffer_wptr(adreno_dev, rb);
}
diff --git a/drivers/gpu/msm/adreno_trace.h b/drivers/gpu/msm/adreno_trace.h
index 16ca098..74c4c4e 100644
--- a/drivers/gpu/msm/adreno_trace.h
+++ b/drivers/gpu/msm/adreno_trace.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -148,6 +148,29 @@
)
);
+TRACE_EVENT(adreno_cmdbatch_sync,
+ TP_PROTO(struct adreno_context *drawctxt,
+ uint64_t ticks),
+ TP_ARGS(drawctxt, ticks),
+ TP_STRUCT__entry(
+ __field(unsigned int, id)
+ __field(unsigned int, timestamp)
+ __field(uint64_t, ticks)
+ __field(int, prio)
+ ),
+ TP_fast_assign(
+ __entry->id = drawctxt->base.id;
+ __entry->timestamp = drawctxt->timestamp;
+ __entry->ticks = ticks;
+ __entry->prio = drawctxt->base.priority;
+ ),
+ TP_printk(
+ "ctx=%u ctx_prio=%d ts=%u ticks=%lld",
+ __entry->id, __entry->prio, __entry->timestamp,
+ __entry->ticks
+ )
+);
+
TRACE_EVENT(adreno_cmdbatch_fault,
TP_PROTO(struct kgsl_drawobj_cmd *cmdobj, unsigned int fault),
TP_ARGS(cmdobj, fault),
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 34ea83d..13dc273 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -2428,6 +2428,7 @@
{ HID_USB_DEVICE(USB_VENDOR_ID_PANJIT, 0x0002) },
{ HID_USB_DEVICE(USB_VENDOR_ID_PANJIT, 0x0003) },
{ HID_USB_DEVICE(USB_VENDOR_ID_PANJIT, 0x0004) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_PETZL, USB_DEVICE_ID_PETZL_HEADLAMP) },
{ HID_USB_DEVICE(USB_VENDOR_ID_PHILIPS, USB_DEVICE_ID_PHILIPS_IEEE802154_DONGLE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_POWERCOM, USB_DEVICE_ID_POWERCOM_UPS) },
#if defined(CONFIG_MOUSE_SYNAPTICS_USB) || defined(CONFIG_MOUSE_SYNAPTICS_USB_MODULE)
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index e370306..37cbc2e 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -285,6 +285,9 @@
#define USB_VENDOR_ID_DEALEXTREAME 0x10c5
#define USB_DEVICE_ID_DEALEXTREAME_RADIO_SI4701 0x819a
+#define USB_VENDOR_ID_DELL 0x413c
+#define USB_DEVICE_ID_DELL_PIXART_USB_OPTICAL_MOUSE 0x301a
+
#define USB_VENDOR_ID_DELORME 0x1163
#define USB_DEVICE_ID_DELORME_EARTHMATE 0x0100
#define USB_DEVICE_ID_DELORME_EM_LT20 0x0200
@@ -774,6 +777,9 @@
#define USB_VENDOR_ID_PETALYNX 0x18b1
#define USB_DEVICE_ID_PETALYNX_MAXTER_REMOTE 0x0037
+#define USB_VENDOR_ID_PETZL 0x2122
+#define USB_DEVICE_ID_PETZL_HEADLAMP 0x1234
+
#define USB_VENDOR_ID_PHILIPS 0x0471
#define USB_DEVICE_ID_PHILIPS_IEEE802154_DONGLE 0x0617
diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
index 0b80633..d4d655a 100644
--- a/drivers/hid/i2c-hid/i2c-hid.c
+++ b/drivers/hid/i2c-hid/i2c-hid.c
@@ -364,6 +364,15 @@
if (ret)
return ret;
+ /*
+ * The HID over I2C specification states that if a DEVICE needs time
+ * after the PWR_ON request, it should utilise CLOCK stretching.
+ * However, it has been observered that the Windows driver provides a
+ * 1ms sleep between the PWR_ON and RESET requests and that some devices
+ * rely on this.
+ */
+ usleep_range(1000, 5000);
+
i2c_hid_dbg(ihid, "resetting...\n");
ret = i2c_hid_command(client, &hid_reset_cmd, NULL, 0);
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 6ca6ab0..ce1543d 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -72,6 +72,7 @@
{ USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_AXIS_295, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_PIXART_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL },
{ USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_CREATIVE_SB_OMNI_SURROUND_51, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_DELL, USB_DEVICE_ID_DELL_PIXART_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL },
{ USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_WIIU, HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_ELAN, HID_ANY_ID, HID_QUIRK_ALWAYS_POLL },
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index 35e3fd9..b62c50d 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -1440,37 +1440,38 @@
{
unsigned char *data = wacom->data;
- if (wacom->pen_input)
+ if (wacom->pen_input) {
dev_dbg(wacom->pen_input->dev.parent,
"%s: received report #%d\n", __func__, data[0]);
- else if (wacom->touch_input)
+
+ if (len == WACOM_PKGLEN_PENABLED ||
+ data[0] == WACOM_REPORT_PENABLED)
+ return wacom_tpc_pen(wacom);
+ }
+ else if (wacom->touch_input) {
dev_dbg(wacom->touch_input->dev.parent,
"%s: received report #%d\n", __func__, data[0]);
- switch (len) {
- case WACOM_PKGLEN_TPC1FG:
- return wacom_tpc_single_touch(wacom, len);
-
- case WACOM_PKGLEN_TPC2FG:
- return wacom_tpc_mt_touch(wacom);
-
- case WACOM_PKGLEN_PENABLED:
- return wacom_tpc_pen(wacom);
-
- default:
- switch (data[0]) {
- case WACOM_REPORT_TPC1FG:
- case WACOM_REPORT_TPCHID:
- case WACOM_REPORT_TPCST:
- case WACOM_REPORT_TPC1FGE:
+ switch (len) {
+ case WACOM_PKGLEN_TPC1FG:
return wacom_tpc_single_touch(wacom, len);
- case WACOM_REPORT_TPCMT:
- case WACOM_REPORT_TPCMT2:
- return wacom_mt_touch(wacom);
+ case WACOM_PKGLEN_TPC2FG:
+ return wacom_tpc_mt_touch(wacom);
- case WACOM_REPORT_PENABLED:
- return wacom_tpc_pen(wacom);
+ default:
+ switch (data[0]) {
+ case WACOM_REPORT_TPC1FG:
+ case WACOM_REPORT_TPCHID:
+ case WACOM_REPORT_TPCST:
+ case WACOM_REPORT_TPC1FGE:
+ return wacom_tpc_single_touch(wacom, len);
+
+ case WACOM_REPORT_TPCMT:
+ case WACOM_REPORT_TPCMT2:
+ return wacom_mt_touch(wacom);
+
+ }
}
}
diff --git a/drivers/htc_radio/htc_radio_smem.c b/drivers/htc_radio/htc_radio_smem.c
index 1396711..08e8da8 100644
--- a/drivers/htc_radio/htc_radio_smem.c
+++ b/drivers/htc_radio/htc_radio_smem.c
@@ -21,37 +21,20 @@
#include <linux/fs.h>
#include <linux/of.h>
#include <linux/ctype.h>
+#include <asm/io.h>
#include "htc_radio_smem.h"
#include <soc/qcom/smem.h>
#define DEVICE_TREE_RADIO_PATH "/chosen/radio"
-static void smem_init(struct htc_smem_type *smem)
-{
- int i = 0;
-
- smem->version = 0;
- smem->struct_size = 0;
- smem->htc_smem_pid = 0;
- smem->htc_smem_app_run_mode = 0;
- smem->htc_smem_flag = 0;
- smem->htc_smem_factory_reset = 0;
-
- for (i = 0; i < sizeof(smem->htc_rom_ver); i++)
- smem->htc_rom_ver[i] = 0;
-
- for (i = 0; i < sizeof(smem->htc_smem_skuid); i++)
- smem->htc_smem_skuid[i] = 0;
-
- for (i = 0; i < sizeof(smem->reserved); i++)
- smem->reserved[i] = 0;
-}
-
static int htc_radio_smem_probe(struct platform_device *pdev)
{
int ret = -1;
struct device_node *dnp;
struct htc_smem_type *htc_radio_smem;
+ u32 value;
+ u8 buffer[max(sizeof(htc_radio_smem->htc_rom_ver),
+ sizeof(htc_radio_smem->htc_smem_skuid))];
pr_info("[smem]%s: start.\n", __func__);
@@ -68,28 +51,37 @@
return ret;
}
- /* set smem init 0 */
- smem_init(htc_radio_smem);
+ /* set smem to 0 */
+ memset_io(htc_radio_smem, 0, sizeof(*htc_radio_smem));
/* get smem data from radio data note */
dnp = of_find_node_by_path(DEVICE_TREE_RADIO_PATH);
if (dnp) {
- htc_radio_smem->version = HTC_RADIO_SMEM_VERSION;
- htc_radio_smem->struct_size = sizeof(struct htc_smem_type);
- of_property_read_u32(dnp, "htc_smem_radio_dbg_flag",
- &htc_radio_smem->htc_smem_flag);
- of_property_read_u32(dnp, "htc_smem_app_run_mode",
- &htc_radio_smem->htc_smem_app_run_mode);
- of_property_read_u32(dnp, "htc_smem_pid",
- &htc_radio_smem->htc_smem_pid);
- of_property_read_u32(dnp, "htc_smem_factory_reset",
- &htc_radio_smem->htc_smem_factory_reset);
- of_property_read_u8_array(dnp, "htc_rom_ver",
- &htc_radio_smem->htc_rom_ver[0],
+ htc_smem_set_u32(htc_radio_smem, version,
+ HTC_RADIO_SMEM_VERSION);
+ htc_smem_set_u32(htc_radio_smem, struct_size,
+ sizeof(struct htc_smem_type));
+
+ of_property_read_u32(dnp, "htc_smem_radio_dbg_flag", &value);
+ htc_smem_set_u32(htc_radio_smem, htc_smem_flag, value);
+
+ of_property_read_u32(dnp, "htc_smem_app_run_mode", &value);
+ htc_smem_set_u32(htc_radio_smem, htc_smem_app_run_mode, value);
+
+ of_property_read_u32(dnp, "htc_smem_pid", &value);
+ htc_smem_set_u32(htc_radio_smem, htc_smem_pid, value);
+
+ of_property_read_u32(dnp, "htc_smem_factory_reset", &value);
+ htc_smem_set_u32(htc_radio_smem, htc_smem_factory_reset,
+ value);
+
+ of_property_read_u8_array(dnp, "htc_rom_ver", buffer,
sizeof(htc_radio_smem->htc_rom_ver));
- of_property_read_u8_array(dnp, "sku_id",
- &htc_radio_smem->htc_smem_skuid[0],
+ htc_smem_copy(htc_radio_smem, htc_rom_ver, buffer);
+
+ of_property_read_u8_array(dnp, "sku_id", buffer,
sizeof(htc_radio_smem->htc_smem_skuid));
+ htc_smem_copy(htc_radio_smem, htc_smem_skuid, buffer);
} else
pr_err("[smem]%s: cannot find path %s.\n", __func__,
DEVICE_TREE_RADIO_PATH);
diff --git a/drivers/htc_radio/htc_radio_smem.h b/drivers/htc_radio/htc_radio_smem.h
index 7bb0829..bacd924 100644
--- a/drivers/htc_radio/htc_radio_smem.h
+++ b/drivers/htc_radio/htc_radio_smem.h
@@ -34,4 +34,17 @@
/* totally 2048 bytes */
};
+#define htc_smem_addr(smem, field) \
+ ({ \
+ volatile void __iomem *__p = (smem); \
+ __p += offsetof(typeof(*(smem)), field); \
+ __p; \
+ })
+
+#define htc_smem_set_u32(p, field, value) \
+ writel((value), htc_smem_addr((p), field))
+
+#define htc_smem_copy(p, field, src) \
+ memcpy_toio(htc_smem_addr((p), field), (src), sizeof((p)->field))
+
#endif /* end of _HTC_RADIO_SMEM_H */
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
index 1ef37c7..d037454 100644
--- a/drivers/hv/channel.c
+++ b/drivers/hv/channel.c
@@ -73,7 +73,6 @@
void *in, *out;
unsigned long flags;
int ret, err = 0;
- unsigned long t;
struct page *page;
spin_lock_irqsave(&newchannel->lock, flags);
@@ -183,11 +182,7 @@
goto error1;
}
- t = wait_for_completion_timeout(&open_info->waitevent, 5*HZ);
- if (t == 0) {
- err = -ETIMEDOUT;
- goto error1;
- }
+ wait_for_completion(&open_info->waitevent);
spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
list_del(&open_info->msglistentry);
@@ -375,7 +370,7 @@
struct vmbus_channel_gpadl_header *gpadlmsg;
struct vmbus_channel_gpadl_body *gpadl_body;
struct vmbus_channel_msginfo *msginfo = NULL;
- struct vmbus_channel_msginfo *submsginfo;
+ struct vmbus_channel_msginfo *submsginfo, *tmp;
u32 msgcount;
struct list_head *curr;
u32 next_gpadl_handle;
@@ -437,6 +432,13 @@
list_del(&msginfo->msglistentry);
spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
+ if (msgcount > 1) {
+ list_for_each_entry_safe(submsginfo, tmp, &msginfo->submsglist,
+ msglistentry) {
+ kfree(submsginfo);
+ }
+ }
+
kfree(msginfo);
return ret;
}
diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
index 4fc2e88..2bbc530 100644
--- a/drivers/hv/connection.c
+++ b/drivers/hv/connection.c
@@ -429,7 +429,7 @@
union hv_connection_id conn_id;
int ret = 0;
int retries = 0;
- u32 msec = 1;
+ u32 usec = 1;
conn_id.asu32 = 0;
conn_id.u.id = VMBUS_MESSAGE_CONNECTION_ID;
@@ -462,9 +462,9 @@
}
retries++;
- msleep(msec);
- if (msec < 2048)
- msec *= 2;
+ udelay(usec);
+ if (usec < 2048)
+ usec *= 2;
}
return ret;
}
diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
index 57c1917..8ce1f2e 100644
--- a/drivers/hv/hv.c
+++ b/drivers/hv/hv.c
@@ -274,7 +274,7 @@
*
* This routine is called normally during driver unloading or exiting.
*/
-void hv_cleanup(void)
+void hv_cleanup(bool crash)
{
union hv_x64_msr_hypercall_contents hypercall_msr;
@@ -284,7 +284,8 @@
if (hv_context.hypercall_page) {
hypercall_msr.as_uint64 = 0;
wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
- vfree(hv_context.hypercall_page);
+ if (!crash)
+ vfree(hv_context.hypercall_page);
hv_context.hypercall_page = NULL;
}
@@ -304,8 +305,10 @@
hypercall_msr.as_uint64 = 0;
wrmsrl(HV_X64_MSR_REFERENCE_TSC, hypercall_msr.as_uint64);
- vfree(hv_context.tsc_page);
- hv_context.tsc_page = NULL;
+ if (!crash) {
+ vfree(hv_context.tsc_page);
+ hv_context.tsc_page = NULL;
+ }
}
#endif
}
diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
index b853b4b..354da7f 100644
--- a/drivers/hv/hv_balloon.c
+++ b/drivers/hv/hv_balloon.c
@@ -430,16 +430,27 @@
* currently hot added. We hot add in multiples of 128M
* chunks; it is possible that we may not be able to bring
* online all the pages in the region. The range
- * covered_end_pfn defines the pages that can
+ * covered_start_pfn:covered_end_pfn defines the pages that can
* be brough online.
*/
struct hv_hotadd_state {
struct list_head list;
unsigned long start_pfn;
+ unsigned long covered_start_pfn;
unsigned long covered_end_pfn;
unsigned long ha_end_pfn;
unsigned long end_pfn;
+ /*
+ * A list of gaps.
+ */
+ struct list_head gap_list;
+};
+
+struct hv_hotadd_gap {
+ struct list_head list;
+ unsigned long start_pfn;
+ unsigned long end_pfn;
};
struct balloon_state {
@@ -595,18 +606,46 @@
.priority = 0
};
+/* Check if the particular page is backed and can be onlined and online it. */
+static void hv_page_online_one(struct hv_hotadd_state *has, struct page *pg)
+{
+ unsigned long cur_start_pgp;
+ unsigned long cur_end_pgp;
+ struct hv_hotadd_gap *gap;
-static void hv_bring_pgs_online(unsigned long start_pfn, unsigned long size)
+ cur_start_pgp = (unsigned long)pfn_to_page(has->covered_start_pfn);
+ cur_end_pgp = (unsigned long)pfn_to_page(has->covered_end_pfn);
+
+ /* The page is not backed. */
+ if (((unsigned long)pg < cur_start_pgp) ||
+ ((unsigned long)pg >= cur_end_pgp))
+ return;
+
+ /* Check for gaps. */
+ list_for_each_entry(gap, &has->gap_list, list) {
+ cur_start_pgp = (unsigned long)
+ pfn_to_page(gap->start_pfn);
+ cur_end_pgp = (unsigned long)
+ pfn_to_page(gap->end_pfn);
+ if (((unsigned long)pg >= cur_start_pgp) &&
+ ((unsigned long)pg < cur_end_pgp)) {
+ return;
+ }
+ }
+
+ /* This frame is currently backed; online the page. */
+ __online_page_set_limits(pg);
+ __online_page_increment_counters(pg);
+ __online_page_free(pg);
+}
+
+static void hv_bring_pgs_online(struct hv_hotadd_state *has,
+ unsigned long start_pfn, unsigned long size)
{
int i;
- for (i = 0; i < size; i++) {
- struct page *pg;
- pg = pfn_to_page(start_pfn + i);
- __online_page_set_limits(pg);
- __online_page_increment_counters(pg);
- __online_page_free(pg);
- }
+ for (i = 0; i < size; i++)
+ hv_page_online_one(has, pfn_to_page(start_pfn + i));
}
static void hv_mem_hot_add(unsigned long start, unsigned long size,
@@ -682,26 +721,25 @@
list_for_each(cur, &dm_device.ha_region_list) {
has = list_entry(cur, struct hv_hotadd_state, list);
- cur_start_pgp = (unsigned long)pfn_to_page(has->start_pfn);
- cur_end_pgp = (unsigned long)pfn_to_page(has->covered_end_pfn);
+ cur_start_pgp = (unsigned long)
+ pfn_to_page(has->start_pfn);
+ cur_end_pgp = (unsigned long)pfn_to_page(has->end_pfn);
- if (((unsigned long)pg >= cur_start_pgp) &&
- ((unsigned long)pg < cur_end_pgp)) {
- /*
- * This frame is currently backed; online the
- * page.
- */
- __online_page_set_limits(pg);
- __online_page_increment_counters(pg);
- __online_page_free(pg);
- }
+ /* The page belongs to a different HAS. */
+ if (((unsigned long)pg < cur_start_pgp) ||
+ ((unsigned long)pg >= cur_end_pgp))
+ continue;
+
+ hv_page_online_one(has, pg);
+ break;
}
}
-static bool pfn_covered(unsigned long start_pfn, unsigned long pfn_cnt)
+static int pfn_covered(unsigned long start_pfn, unsigned long pfn_cnt)
{
struct list_head *cur;
struct hv_hotadd_state *has;
+ struct hv_hotadd_gap *gap;
unsigned long residual, new_inc;
if (list_empty(&dm_device.ha_region_list))
@@ -714,8 +752,26 @@
* If the pfn range we are dealing with is not in the current
* "hot add block", move on.
*/
- if ((start_pfn >= has->end_pfn))
+ if (start_pfn < has->start_pfn || start_pfn >= has->end_pfn)
continue;
+
+ /*
+ * If the current start pfn is not where the covered_end
+ * is, create a gap and update covered_end_pfn.
+ */
+ if (has->covered_end_pfn != start_pfn) {
+ gap = kzalloc(sizeof(struct hv_hotadd_gap), GFP_ATOMIC);
+ if (!gap)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&gap->list);
+ gap->start_pfn = has->covered_end_pfn;
+ gap->end_pfn = start_pfn;
+ list_add_tail(&gap->list, &has->gap_list);
+
+ has->covered_end_pfn = start_pfn;
+ }
+
/*
* If the current hot add-request extends beyond
* our current limit; extend it.
@@ -732,19 +788,10 @@
has->end_pfn += new_inc;
}
- /*
- * If the current start pfn is not where the covered_end
- * is, update it.
- */
-
- if (has->covered_end_pfn != start_pfn)
- has->covered_end_pfn = start_pfn;
-
- return true;
-
+ return 1;
}
- return false;
+ return 0;
}
static unsigned long handle_pg_range(unsigned long pg_start,
@@ -768,7 +815,7 @@
* If the pfn range we are dealing with is not in the current
* "hot add block", move on.
*/
- if ((start_pfn >= has->end_pfn))
+ if (start_pfn < has->start_pfn || start_pfn >= has->end_pfn)
continue;
old_covered_state = has->covered_end_pfn;
@@ -783,6 +830,8 @@
if (pgs_ol > pfn_cnt)
pgs_ol = pfn_cnt;
+ has->covered_end_pfn += pgs_ol;
+ pfn_cnt -= pgs_ol;
/*
* Check if the corresponding memory block is already
* online by checking its last previously backed page.
@@ -791,10 +840,8 @@
*/
if (start_pfn > has->start_pfn &&
!PageReserved(pfn_to_page(start_pfn - 1)))
- hv_bring_pgs_online(start_pfn, pgs_ol);
+ hv_bring_pgs_online(has, start_pfn, pgs_ol);
- has->covered_end_pfn += pgs_ol;
- pfn_cnt -= pgs_ol;
}
if ((has->ha_end_pfn < has->end_pfn) && (pfn_cnt > 0)) {
@@ -832,13 +879,19 @@
unsigned long rg_size)
{
struct hv_hotadd_state *ha_region = NULL;
+ int covered;
if (pfn_cnt == 0)
return 0;
- if (!dm_device.host_specified_ha_region)
- if (pfn_covered(pg_start, pfn_cnt))
+ if (!dm_device.host_specified_ha_region) {
+ covered = pfn_covered(pg_start, pfn_cnt);
+ if (covered < 0)
+ return 0;
+
+ if (covered)
goto do_pg_range;
+ }
/*
* If the host has specified a hot-add range; deal with it first.
@@ -850,10 +903,12 @@
return 0;
INIT_LIST_HEAD(&ha_region->list);
+ INIT_LIST_HEAD(&ha_region->gap_list);
list_add_tail(&ha_region->list, &dm_device.ha_region_list);
ha_region->start_pfn = rg_start;
ha_region->ha_end_pfn = rg_start;
+ ha_region->covered_start_pfn = pg_start;
ha_region->covered_end_pfn = pg_start;
ha_region->end_pfn = rg_start + rg_size;
}
@@ -1581,6 +1636,7 @@
struct hv_dynmem_device *dm = hv_get_drvdata(dev);
struct list_head *cur, *tmp;
struct hv_hotadd_state *has;
+ struct hv_hotadd_gap *gap, *tmp_gap;
if (dm->num_pages_ballooned != 0)
pr_warn("Ballooned pages: %d\n", dm->num_pages_ballooned);
@@ -1597,6 +1653,10 @@
#endif
list_for_each_safe(cur, tmp, &dm->ha_region_list) {
has = list_entry(cur, struct hv_hotadd_state, list);
+ list_for_each_entry_safe(gap, tmp_gap, &has->gap_list, list) {
+ list_del(&gap->list);
+ kfree(gap);
+ }
list_del(&has->list);
kfree(has);
}
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
index 12156db..75e383e 100644
--- a/drivers/hv/hyperv_vmbus.h
+++ b/drivers/hv/hyperv_vmbus.h
@@ -581,7 +581,7 @@
extern int hv_init(void);
-extern void hv_cleanup(void);
+extern void hv_cleanup(bool crash);
extern int hv_post_message(union hv_connection_id connection_id,
enum hv_message_type message_type,
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
index 509ed973..802dcb40 100644
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
@@ -889,7 +889,7 @@
bus_unregister(&hv_bus);
err_cleanup:
- hv_cleanup();
+ hv_cleanup(false);
return ret;
}
@@ -1254,7 +1254,7 @@
vmbus_initiate_unload();
for_each_online_cpu(cpu)
smp_call_function_single(cpu, hv_synic_cleanup, NULL, 1);
- hv_cleanup();
+ hv_cleanup(false);
};
static void hv_crash_handler(struct pt_regs *regs)
@@ -1266,7 +1266,7 @@
* for kdump.
*/
hv_synic_cleanup(NULL);
- hv_cleanup();
+ hv_cleanup(true);
};
static int __init hv_acpi_init(void)
@@ -1330,7 +1330,7 @@
&hyperv_panic_block);
}
bus_unregister(&hv_bus);
- hv_cleanup();
+ hv_cleanup(false);
for_each_online_cpu(cpu) {
tasklet_kill(hv_context.event_dpc[cpu]);
smp_call_function_single(cpu, hv_synic_cleanup, NULL, 1);
diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c
index d57a2f7..32c6a40 100644
--- a/drivers/hwtracing/intel_th/pci.c
+++ b/drivers/hwtracing/intel_th/pci.c
@@ -72,6 +72,16 @@
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa2a6),
.driver_data = (kernel_ulong_t)0,
},
+ {
+ /* Cannon Lake H */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xa326),
+ .driver_data = (kernel_ulong_t)0,
+ },
+ {
+ /* Cannon Lake LP */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x9da6),
+ .driver_data = (kernel_ulong_t)0,
+ },
{ 0 },
};
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
index 6b00061c..a2ae221 100644
--- a/drivers/i2c/busses/i2c-designware-platdrv.c
+++ b/drivers/i2c/busses/i2c-designware-platdrv.c
@@ -294,7 +294,7 @@
#endif
#ifdef CONFIG_PM
-static int dw_i2c_plat_suspend(struct device *dev)
+static int dw_i2c_plat_runtime_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct dw_i2c_dev *i_dev = platform_get_drvdata(pdev);
@@ -318,11 +318,21 @@
return 0;
}
+#ifdef CONFIG_PM_SLEEP
+static int dw_i2c_plat_suspend(struct device *dev)
+{
+ pm_runtime_resume(dev);
+ return dw_i2c_plat_runtime_suspend(dev);
+}
+#endif
+
static const struct dev_pm_ops dw_i2c_dev_pm_ops = {
.prepare = dw_i2c_plat_prepare,
.complete = dw_i2c_plat_complete,
SET_SYSTEM_SLEEP_PM_OPS(dw_i2c_plat_suspend, dw_i2c_plat_resume)
- SET_RUNTIME_PM_OPS(dw_i2c_plat_suspend, dw_i2c_plat_resume, NULL)
+ SET_RUNTIME_PM_OPS(dw_i2c_plat_runtime_suspend,
+ dw_i2c_plat_resume,
+ NULL)
};
#define DW_I2C_DEV_PMOPS (&dw_i2c_dev_pm_ops)
diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c
index 7ba795b..639d1a9 100644
--- a/drivers/i2c/busses/i2c-ismt.c
+++ b/drivers/i2c/busses/i2c-ismt.c
@@ -339,8 +339,10 @@
break;
case I2C_SMBUS_BLOCK_DATA:
case I2C_SMBUS_I2C_BLOCK_DATA:
- memcpy(&data->block[1], dma_buffer, desc->rxbytes);
- data->block[0] = desc->rxbytes;
+ if (desc->rxbytes != dma_buffer[0] + 1)
+ return -EMSGSIZE;
+
+ memcpy(data->block, dma_buffer, desc->rxbytes);
break;
}
return 0;
diff --git a/drivers/i2c/busses/i2c-jz4780.c b/drivers/i2c/busses/i2c-jz4780.c
index f325663..4b58e8a 100644
--- a/drivers/i2c/busses/i2c-jz4780.c
+++ b/drivers/i2c/busses/i2c-jz4780.c
@@ -786,10 +786,6 @@
jz4780_i2c_writew(i2c, JZ4780_I2C_INTM, 0x0);
- i2c->cmd = 0;
- memset(i2c->cmd_buf, 0, BUFSIZE);
- memset(i2c->data_buf, 0, BUFSIZE);
-
i2c->irq = platform_get_irq(pdev, 0);
ret = devm_request_irq(&pdev->dev, i2c->irq, jz4780_i2c_irq, 0,
dev_name(&pdev->dev), i2c);
diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c
index 630bce6..b61db9d 100644
--- a/drivers/i2c/busses/i2c-piix4.c
+++ b/drivers/i2c/busses/i2c-piix4.c
@@ -54,7 +54,7 @@
#define SMBSLVDAT (0xC + piix4_smba)
/* count for request_region */
-#define SMBIOSIZE 8
+#define SMBIOSIZE 9
/* PCI Address Constants */
#define SMBBA 0x090
diff --git a/drivers/i2c/busses/i2c-tiny-usb.c b/drivers/i2c/busses/i2c-tiny-usb.c
index 0ed77ee..a2e3dd7 100644
--- a/drivers/i2c/busses/i2c-tiny-usb.c
+++ b/drivers/i2c/busses/i2c-tiny-usb.c
@@ -178,22 +178,39 @@
int value, int index, void *data, int len)
{
struct i2c_tiny_usb *dev = (struct i2c_tiny_usb *)adapter->algo_data;
+ void *dmadata = kmalloc(len, GFP_KERNEL);
+ int ret;
+
+ if (!dmadata)
+ return -ENOMEM;
/* do control transfer */
- return usb_control_msg(dev->usb_dev, usb_rcvctrlpipe(dev->usb_dev, 0),
+ ret = usb_control_msg(dev->usb_dev, usb_rcvctrlpipe(dev->usb_dev, 0),
cmd, USB_TYPE_VENDOR | USB_RECIP_INTERFACE |
- USB_DIR_IN, value, index, data, len, 2000);
+ USB_DIR_IN, value, index, dmadata, len, 2000);
+
+ memcpy(data, dmadata, len);
+ kfree(dmadata);
+ return ret;
}
static int usb_write(struct i2c_adapter *adapter, int cmd,
int value, int index, void *data, int len)
{
struct i2c_tiny_usb *dev = (struct i2c_tiny_usb *)adapter->algo_data;
+ void *dmadata = kmemdup(data, len, GFP_KERNEL);
+ int ret;
+
+ if (!dmadata)
+ return -ENOMEM;
/* do control transfer */
- return usb_control_msg(dev->usb_dev, usb_sndctrlpipe(dev->usb_dev, 0),
+ ret = usb_control_msg(dev->usb_dev, usb_sndctrlpipe(dev->usb_dev, 0),
cmd, USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
- value, index, data, len, 2000);
+ value, index, dmadata, len, 2000);
+
+ kfree(dmadata);
+ return ret;
}
static void i2c_tiny_usb_free(struct i2c_tiny_usb *dev)
diff --git a/drivers/iio/accel/bmc150-accel-core.c b/drivers/iio/accel/bmc150-accel-core.c
index fa24d51..c712291 100644
--- a/drivers/iio/accel/bmc150-accel-core.c
+++ b/drivers/iio/accel/bmc150-accel-core.c
@@ -194,7 +194,6 @@
struct device *dev;
int irq;
struct bmc150_accel_interrupt interrupts[BMC150_ACCEL_INTERRUPTS];
- atomic_t active_intr;
struct bmc150_accel_trigger triggers[BMC150_ACCEL_TRIGGERS];
struct mutex mutex;
u8 fifo_mode, watermark;
@@ -489,11 +488,6 @@
goto out_fix_power_state;
}
- if (state)
- atomic_inc(&data->active_intr);
- else
- atomic_dec(&data->active_intr);
-
return 0;
out_fix_power_state:
@@ -1704,8 +1698,7 @@
struct bmc150_accel_data *data = iio_priv(indio_dev);
mutex_lock(&data->mutex);
- if (atomic_read(&data->active_intr))
- bmc150_accel_set_mode(data, BMC150_ACCEL_SLEEP_MODE_NORMAL, 0);
+ bmc150_accel_set_mode(data, BMC150_ACCEL_SLEEP_MODE_NORMAL, 0);
bmc150_accel_fifo_set_mode(data);
mutex_unlock(&data->mutex);
diff --git a/drivers/iio/adc/ti_am335x_adc.c b/drivers/iio/adc/ti_am335x_adc.c
index 0470fc8..9b68546 100644
--- a/drivers/iio/adc/ti_am335x_adc.c
+++ b/drivers/iio/adc/ti_am335x_adc.c
@@ -151,7 +151,9 @@
{
struct iio_dev *indio_dev = private;
struct tiadc_device *adc_dev = iio_priv(indio_dev);
- unsigned int status, config;
+ unsigned int status, config, adc_fsm;
+ unsigned short count = 0;
+
status = tiadc_readl(adc_dev, REG_IRQSTATUS);
/*
@@ -165,6 +167,15 @@
tiadc_writel(adc_dev, REG_CTRL, config);
tiadc_writel(adc_dev, REG_IRQSTATUS, IRQENB_FIFO1OVRRUN
| IRQENB_FIFO1UNDRFLW | IRQENB_FIFO1THRES);
+
+ /* wait for idle state.
+ * ADC needs to finish the current conversion
+ * before disabling the module
+ */
+ do {
+ adc_fsm = tiadc_readl(adc_dev, REG_ADCFSM);
+ } while (adc_fsm != 0x10 && count++ < 100);
+
tiadc_writel(adc_dev, REG_CTRL, (config | CNTRLREG_TSCSSENB));
return IRQ_HANDLED;
} else if (status & IRQENB_FIFO1THRES) {
diff --git a/drivers/iio/adc/vf610_adc.c b/drivers/iio/adc/vf610_adc.c
index b10f629..1dbc214 100644
--- a/drivers/iio/adc/vf610_adc.c
+++ b/drivers/iio/adc/vf610_adc.c
@@ -77,7 +77,7 @@
#define VF610_ADC_ADSTS_MASK 0x300
#define VF610_ADC_ADLPC_EN 0x80
#define VF610_ADC_ADHSC_EN 0x400
-#define VF610_ADC_REFSEL_VALT 0x100
+#define VF610_ADC_REFSEL_VALT 0x800
#define VF610_ADC_REFSEL_VBG 0x1000
#define VF610_ADC_ADTRG_HARD 0x2000
#define VF610_ADC_AVGS_8 0x4000
diff --git a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
index 59551102..a8db38d 100644
--- a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
+++ b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
@@ -36,8 +36,6 @@
s32 poll_value = 0;
if (state) {
- if (!atomic_read(&st->user_requested_state))
- return 0;
if (sensor_hub_device_open(st->hsdev))
return -EIO;
@@ -51,8 +49,6 @@
st->report_state.report_id,
st->report_state.index,
HID_USAGE_SENSOR_PROP_REPORTING_STATE_ALL_EVENTS_ENUM);
-
- poll_value = hid_sensor_read_poll_value(st);
} else {
int val;
@@ -86,10 +82,15 @@
&report_val);
}
+ pr_debug("HID_SENSOR %s set power_state %d report_state %d\n",
+ st->pdev->name, state_val, report_val);
+
sensor_hub_get_feature(st->hsdev, st->power_state.report_id,
st->power_state.index,
sizeof(state_val), &state_val);
- if (state && poll_value)
+ if (state)
+ poll_value = hid_sensor_read_poll_value(st);
+ if (poll_value > 0)
msleep_interruptible(poll_value * 2);
return 0;
@@ -107,6 +108,7 @@
ret = pm_runtime_get_sync(&st->pdev->dev);
else {
pm_runtime_mark_last_busy(&st->pdev->dev);
+ pm_runtime_use_autosuspend(&st->pdev->dev);
ret = pm_runtime_put_autosuspend(&st->pdev->dev);
}
if (ret < 0) {
@@ -175,8 +177,6 @@
/* Default to 3 seconds, but can be changed from sysfs */
pm_runtime_set_autosuspend_delay(&attrb->pdev->dev,
3000);
- pm_runtime_use_autosuspend(&attrb->pdev->dev);
-
return ret;
error_unreg_trigger:
iio_trigger_unregister(trig);
diff --git a/drivers/iio/dac/ad7303.c b/drivers/iio/dac/ad7303.c
index e690dd1..4b0f942 100644
--- a/drivers/iio/dac/ad7303.c
+++ b/drivers/iio/dac/ad7303.c
@@ -184,9 +184,9 @@
.address = (chan), \
.scan_type = { \
.sign = 'u', \
- .realbits = '8', \
- .storagebits = '8', \
- .shift = '0', \
+ .realbits = 8, \
+ .storagebits = 8, \
+ .shift = 0, \
}, \
.ext_info = ad7303_ext_info, \
}
diff --git a/drivers/iio/gyro/bmg160_core.c b/drivers/iio/gyro/bmg160_core.c
index acb3b30..90841ab 100644
--- a/drivers/iio/gyro/bmg160_core.c
+++ b/drivers/iio/gyro/bmg160_core.c
@@ -28,6 +28,7 @@
#include <linux/iio/trigger_consumer.h>
#include <linux/iio/triggered_buffer.h>
#include <linux/regmap.h>
+#include <linux/delay.h>
#include "bmg160.h"
#define BMG160_IRQ_NAME "bmg160_event"
@@ -53,6 +54,9 @@
#define BMG160_NO_FILTER 0
#define BMG160_DEF_BW 100
+#define BMG160_GYRO_REG_RESET 0x14
+#define BMG160_GYRO_RESET_VAL 0xb6
+
#define BMG160_REG_INT_MAP_0 0x17
#define BMG160_INT_MAP_0_BIT_ANY BIT(1)
@@ -186,6 +190,14 @@
int ret;
unsigned int val;
+ /*
+ * Reset chip to get it in a known good state. A delay of 30ms after
+ * reset is required according to the datasheet.
+ */
+ regmap_write(data->regmap, BMG160_GYRO_REG_RESET,
+ BMG160_GYRO_RESET_VAL);
+ usleep_range(30000, 30700);
+
ret = regmap_read(data->regmap, BMG160_REG_CHIP_ID, &val);
if (ret < 0) {
dev_err(data->dev, "Error reading reg_chip_id\n");
diff --git a/drivers/iio/imu/adis16480.c b/drivers/iio/imu/adis16480.c
index 2485b88..1880105 100644
--- a/drivers/iio/imu/adis16480.c
+++ b/drivers/iio/imu/adis16480.c
@@ -696,7 +696,7 @@
.gyro_max_val = IIO_RAD_TO_DEGREE(22500),
.gyro_max_scale = 450,
.accel_max_val = IIO_M_S_2_TO_G(12500),
- .accel_max_scale = 5,
+ .accel_max_scale = 10,
},
[ADIS16485] = {
.channels = adis16485_channels,
diff --git a/drivers/iio/light/ltr501.c b/drivers/iio/light/ltr501.c
index 6bf89d8..b9d1e5c 100644
--- a/drivers/iio/light/ltr501.c
+++ b/drivers/iio/light/ltr501.c
@@ -74,9 +74,9 @@
static const struct reg_field reg_field_it =
REG_FIELD(LTR501_ALS_MEAS_RATE, 3, 4);
static const struct reg_field reg_field_als_intr =
- REG_FIELD(LTR501_INTR, 0, 0);
-static const struct reg_field reg_field_ps_intr =
REG_FIELD(LTR501_INTR, 1, 1);
+static const struct reg_field reg_field_ps_intr =
+ REG_FIELD(LTR501_INTR, 0, 0);
static const struct reg_field reg_field_als_rate =
REG_FIELD(LTR501_ALS_MEAS_RATE, 0, 2);
static const struct reg_field reg_field_ps_rate =
diff --git a/drivers/iio/light/tsl2563.c b/drivers/iio/light/tsl2563.c
index 12731d6..ec1b2e7 100644
--- a/drivers/iio/light/tsl2563.c
+++ b/drivers/iio/light/tsl2563.c
@@ -626,7 +626,7 @@
struct tsl2563_chip *chip = iio_priv(dev_info);
iio_push_event(dev_info,
- IIO_UNMOD_EVENT_CODE(IIO_LIGHT,
+ IIO_UNMOD_EVENT_CODE(IIO_INTENSITY,
0,
IIO_EV_TYPE_THRESH,
IIO_EV_DIR_EITHER),
diff --git a/drivers/iio/proximity/as3935.c b/drivers/iio/proximity/as3935.c
index a0aedda..4204789 100644
--- a/drivers/iio/proximity/as3935.c
+++ b/drivers/iio/proximity/as3935.c
@@ -40,9 +40,9 @@
#define AS3935_AFE_PWR_BIT BIT(0)
#define AS3935_INT 0x03
-#define AS3935_INT_MASK 0x07
+#define AS3935_INT_MASK 0x0f
#define AS3935_EVENT_INT BIT(3)
-#define AS3935_NOISE_INT BIT(1)
+#define AS3935_NOISE_INT BIT(0)
#define AS3935_DATA 0x07
#define AS3935_DATA_MASK 0x3F
@@ -50,7 +50,6 @@
#define AS3935_TUNE_CAP 0x08
#define AS3935_CALIBRATE 0x3D
-#define AS3935_WRITE_DATA BIT(15)
#define AS3935_READ_DATA BIT(14)
#define AS3935_ADDRESS(x) ((x) << 8)
@@ -105,7 +104,7 @@
{
u8 *buf = st->buf;
- buf[0] = (AS3935_WRITE_DATA | AS3935_ADDRESS(reg)) >> 8;
+ buf[0] = AS3935_ADDRESS(reg) >> 8;
buf[1] = val;
return spi_write(st->spi, buf, 2);
@@ -264,8 +263,6 @@
static void calibrate_as3935(struct as3935_state *st)
{
- mutex_lock(&st->lock);
-
/* mask disturber interrupt bit */
as3935_write(st, AS3935_INT, BIT(5));
@@ -275,8 +272,6 @@
mdelay(2);
as3935_write(st, AS3935_TUNE_CAP, (st->tune_cap / TUNE_CAP_DIV));
-
- mutex_unlock(&st->lock);
}
#ifdef CONFIG_PM_SLEEP
@@ -313,6 +308,8 @@
val &= ~AS3935_AFE_PWR_BIT;
ret = as3935_write(st, AS3935_AFE_GAIN, val);
+ calibrate_as3935(st);
+
err_resume:
mutex_unlock(&st->lock);
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index 34b1ada..6a8024d 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -277,8 +277,8 @@
fl6.saddr = src_in->sin6_addr;
fl6.flowi6_oif = addr->bound_dev_if;
- dst = ip6_route_output(addr->net, NULL, &fl6);
- if ((ret = dst->error))
+ ret = ipv6_stub->ipv6_dst_lookup(addr->net, NULL, &dst, &fl6);
+ if (ret < 0)
goto put;
if (ipv6_addr_any(&fl6.saddr)) {
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 3f5741a..43d5166 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -857,6 +857,8 @@
} else
ret = iw_cm_init_qp_attr(id_priv->cm_id.iw, qp_attr,
qp_attr_mask);
+ qp_attr->port_num = id_priv->id.port_num;
+ *qp_attr_mask |= IB_QP_PORT;
} else
ret = -ENOSYS;
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
index b1f37d4..e76d52a 100644
--- a/drivers/infiniband/core/sysfs.c
+++ b/drivers/infiniband/core/sysfs.c
@@ -863,7 +863,7 @@
free_port_list_attributes(device);
err_unregister:
- device_unregister(class_dev);
+ device_del(class_dev);
err:
return ret;
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 1c02dea..b7a73f1 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -2287,6 +2287,11 @@
if (copy_from_user(&cmd, buf, sizeof cmd))
return -EFAULT;
+ if ((cmd.attr_mask & IB_QP_PORT) &&
+ (cmd.port_num < rdma_start_port(ib_dev) ||
+ cmd.port_num > rdma_end_port(ib_dev)))
+ return -EINVAL;
+
INIT_UDATA(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd,
out_len);
@@ -2827,6 +2832,10 @@
if (copy_from_user(&cmd, buf, sizeof cmd))
return -EFAULT;
+ if (cmd.attr.port_num < rdma_start_port(ib_dev) ||
+ cmd.attr.port_num > rdma_end_port(ib_dev))
+ return -EINVAL;
+
uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
if (!uobj)
return -ENOMEM;
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 77ddf2f..8763fb8 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -2491,6 +2491,7 @@
mlx4_ib_delete_counters_table(ibdev, &ibdev->counters_table[i]);
err_map:
+ mlx4_ib_free_eqs(dev, ibdev);
iounmap(ibdev->uar_map);
err_uar:
diff --git a/drivers/infiniband/hw/mlx4/mcg.c b/drivers/infiniband/hw/mlx4/mcg.c
index 36ec8aa..0b5bb0c 100644
--- a/drivers/infiniband/hw/mlx4/mcg.c
+++ b/drivers/infiniband/hw/mlx4/mcg.c
@@ -1105,7 +1105,8 @@
while ((p = rb_first(&ctx->mcg_table)) != NULL) {
group = rb_entry(p, struct mcast_group, node);
if (atomic_read(&group->refcount))
- mcg_warn_group(group, "group refcount %d!!! (pointer %p)\n", atomic_read(&group->refcount), group);
+ mcg_debug_group(group, "group refcount %d!!! (pointer %p)\n",
+ atomic_read(&group->refcount), group);
force_clean_group(group);
}
diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c
index 3eff35c..2684605 100644
--- a/drivers/infiniband/hw/qib/qib_qp.c
+++ b/drivers/infiniband/hw/qib/qib_qp.c
@@ -41,13 +41,13 @@
#include "qib.h"
-#define BITS_PER_PAGE (PAGE_SIZE*BITS_PER_BYTE)
-#define BITS_PER_PAGE_MASK (BITS_PER_PAGE-1)
+#define RVT_BITS_PER_PAGE (PAGE_SIZE*BITS_PER_BYTE)
+#define RVT_BITS_PER_PAGE_MASK (RVT_BITS_PER_PAGE-1)
static inline unsigned mk_qpn(struct qib_qpn_table *qpt,
struct qpn_map *map, unsigned off)
{
- return (map - qpt->map) * BITS_PER_PAGE + off;
+ return (map - qpt->map) * RVT_BITS_PER_PAGE + off;
}
static inline unsigned find_next_offset(struct qib_qpn_table *qpt,
@@ -59,7 +59,7 @@
if (((off & qpt->mask) >> 1) >= n)
off = (off | qpt->mask) + 2;
} else
- off = find_next_zero_bit(map->page, BITS_PER_PAGE, off);
+ off = find_next_zero_bit(map->page, RVT_BITS_PER_PAGE, off);
return off;
}
@@ -147,8 +147,8 @@
qpn = 2;
if (qpt->mask && ((qpn & qpt->mask) >> 1) >= dd->n_krcv_queues)
qpn = (qpn | qpt->mask) + 2;
- offset = qpn & BITS_PER_PAGE_MASK;
- map = &qpt->map[qpn / BITS_PER_PAGE];
+ offset = qpn & RVT_BITS_PER_PAGE_MASK;
+ map = &qpt->map[qpn / RVT_BITS_PER_PAGE];
max_scan = qpt->nmaps - !offset;
for (i = 0;;) {
if (unlikely(!map->page)) {
@@ -173,7 +173,7 @@
* We just need to be sure we don't loop
* forever.
*/
- } while (offset < BITS_PER_PAGE && qpn < QPN_MAX);
+ } while (offset < RVT_BITS_PER_PAGE && qpn < QPN_MAX);
/*
* In order to keep the number of pages allocated to a
* minimum, we scan the all existing pages before increasing
@@ -204,9 +204,9 @@
{
struct qpn_map *map;
- map = qpt->map + qpn / BITS_PER_PAGE;
+ map = qpt->map + qpn / RVT_BITS_PER_PAGE;
if (map->page)
- clear_bit(qpn & BITS_PER_PAGE_MASK, map->page);
+ clear_bit(qpn & RVT_BITS_PER_PAGE_MASK, map->page);
}
static inline unsigned qpn_hash(struct qib_ibdev *dev, u32 qpn)
diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c
index e6b7556..cbc4216 100644
--- a/drivers/infiniband/hw/qib/qib_rc.c
+++ b/drivers/infiniband/hw/qib/qib_rc.c
@@ -2088,8 +2088,10 @@
ret = qib_get_rwqe(qp, 1);
if (ret < 0)
goto nack_op_err;
- if (!ret)
+ if (!ret) {
+ qib_put_ss(&qp->r_sge);
goto rnr_nak;
+ }
wc.ex.imm_data = ohdr->u.rc.imm_data;
hdrsize += 4;
wc.wc_flags = IB_WC_WITH_IMM;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_fs.c b/drivers/infiniband/ulp/ipoib/ipoib_fs.c
index 6bd5740..09396bd 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_fs.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_fs.c
@@ -281,8 +281,11 @@
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
+ WARN_ONCE(!priv->mcg_dentry, "null mcg debug file\n");
+ WARN_ONCE(!priv->path_dentry, "null path debug file\n");
debugfs_remove(priv->mcg_dentry);
debugfs_remove(priv->path_dentry);
+ priv->mcg_dentry = priv->path_dentry = NULL;
}
int ipoib_register_debugfs(void)
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index 8efcff1..6699ecd 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -106,6 +106,33 @@
.get_net_dev_by_params = ipoib_get_net_dev_by_params,
};
+#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
+static int ipoib_netdev_event(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ struct netdev_notifier_info *ni = ptr;
+ struct net_device *dev = ni->dev;
+
+ if (dev->netdev_ops->ndo_open != ipoib_open)
+ return NOTIFY_DONE;
+
+ switch (event) {
+ case NETDEV_REGISTER:
+ ipoib_create_debug_files(dev);
+ break;
+ case NETDEV_CHANGENAME:
+ ipoib_delete_debug_files(dev);
+ ipoib_create_debug_files(dev);
+ break;
+ case NETDEV_UNREGISTER:
+ ipoib_delete_debug_files(dev);
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+#endif
+
int ipoib_open(struct net_device *dev)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
@@ -1595,8 +1622,6 @@
ASSERT_RTNL();
- ipoib_delete_debug_files(dev);
-
/* Delete any child interfaces first */
list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs, list) {
/* Stop GC on child */
@@ -1908,8 +1933,6 @@
goto register_failed;
}
- ipoib_create_debug_files(priv->dev);
-
if (ipoib_cm_add_mode_attr(priv->dev))
goto sysfs_failed;
if (ipoib_add_pkey_attr(priv->dev))
@@ -1924,7 +1947,6 @@
return priv->dev;
sysfs_failed:
- ipoib_delete_debug_files(priv->dev);
unregister_netdev(priv->dev);
register_failed:
@@ -2006,6 +2028,12 @@
kfree(dev_list);
}
+#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
+static struct notifier_block ipoib_netdev_notifier = {
+ .notifier_call = ipoib_netdev_event,
+};
+#endif
+
static int __init ipoib_init_module(void)
{
int ret;
@@ -2057,6 +2085,9 @@
if (ret)
goto err_client;
+#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
+ register_netdevice_notifier(&ipoib_netdev_notifier);
+#endif
return 0;
err_client:
@@ -2074,6 +2105,9 @@
static void __exit ipoib_cleanup_module(void)
{
+#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
+ unregister_netdevice_notifier(&ipoib_netdev_notifier);
+#endif
ipoib_netlink_fini();
ib_unregister_client(&ipoib_client);
ib_sa_unregister_client(&ipoib_sa_client);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
index fca1a88..57a34f8 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
@@ -85,8 +85,6 @@
goto register_failed;
}
- ipoib_create_debug_files(priv->dev);
-
/* RTNL childs don't need proprietary sysfs entries */
if (type == IPOIB_LEGACY_CHILD) {
if (ipoib_cm_add_mode_attr(priv->dev))
@@ -107,7 +105,6 @@
sysfs_failed:
result = -ENOMEM;
- ipoib_delete_debug_files(priv->dev);
unregister_netdevice(priv->dev);
register_failed:
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index b0edb66a..0b7f5a7 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -1581,7 +1581,7 @@
struct isert_conn *isert_conn,
u32 xfer_len)
{
- struct ib_device *ib_dev = isert_conn->cm_id->device;
+ struct ib_device *ib_dev = isert_conn->device->ib_device;
struct iscsi_hdr *hdr;
u64 rx_dma;
int rx_buflen;
diff --git a/drivers/input/joystick/iforce/iforce-usb.c b/drivers/input/joystick/iforce/iforce-usb.c
index d96aa27..db64adf 100644
--- a/drivers/input/joystick/iforce/iforce-usb.c
+++ b/drivers/input/joystick/iforce/iforce-usb.c
@@ -141,6 +141,9 @@
interface = intf->cur_altsetting;
+ if (interface->desc.bNumEndpoints < 2)
+ return -ENODEV;
+
epirq = &interface->endpoint[0].desc;
epout = &interface->endpoint[1].desc;
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index 16f000a..3258baf 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -189,6 +189,7 @@
{ 0x1430, 0x8888, "TX6500+ Dance Pad (first generation)", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
{ 0x146b, 0x0601, "BigBen Interactive XBOX 360 Controller", 0, XTYPE_XBOX360 },
{ 0x1532, 0x0037, "Razer Sabertooth", 0, XTYPE_XBOX360 },
+ { 0x1532, 0x0a03, "Razer Wildcat", 0, XTYPE_XBOXONE },
{ 0x15e4, 0x3f00, "Power A Mini Pro Elite", 0, XTYPE_XBOX360 },
{ 0x15e4, 0x3f0a, "Xbox Airflo wired controller", 0, XTYPE_XBOX360 },
{ 0x15e4, 0x3f10, "Batarang Xbox 360 controller", 0, XTYPE_XBOX360 },
@@ -310,6 +311,7 @@
XPAD_XBOX360_VENDOR(0x1689), /* Razer Onza */
XPAD_XBOX360_VENDOR(0x24c6), /* PowerA Controllers */
XPAD_XBOX360_VENDOR(0x1532), /* Razer Sabertooth */
+ XPAD_XBOXONE_VENDOR(0x1532), /* Razer Wildcat */
XPAD_XBOX360_VENDOR(0x15e4), /* Numark X-Box 360 controllers */
XPAD_XBOX360_VENDOR(0x162e), /* Joytech X-Box 360 controllers */
{ }
diff --git a/drivers/input/misc/cm109.c b/drivers/input/misc/cm109.c
index 9365535..50a7faa 100644
--- a/drivers/input/misc/cm109.c
+++ b/drivers/input/misc/cm109.c
@@ -675,6 +675,10 @@
int error = -ENOMEM;
interface = intf->cur_altsetting;
+
+ if (interface->desc.bNumEndpoints < 1)
+ return -ENODEV;
+
endpoint = &interface->endpoint[0].desc;
if (!usb_endpoint_is_int_in(endpoint))
diff --git a/drivers/input/misc/ims-pcu.c b/drivers/input/misc/ims-pcu.c
index 9c0ea36..f4e8fbe 100644
--- a/drivers/input/misc/ims-pcu.c
+++ b/drivers/input/misc/ims-pcu.c
@@ -1667,6 +1667,10 @@
return -EINVAL;
alt = pcu->ctrl_intf->cur_altsetting;
+
+ if (alt->desc.bNumEndpoints < 1)
+ return -ENODEV;
+
pcu->ep_ctrl = &alt->endpoint[0].desc;
pcu->max_ctrl_size = usb_endpoint_maxp(pcu->ep_ctrl);
diff --git a/drivers/input/misc/qpnp-power-on.c b/drivers/input/misc/qpnp-power-on.c
index b3a9688f..e2f79ae 100644
--- a/drivers/input/misc/qpnp-power-on.c
+++ b/drivers/input/misc/qpnp-power-on.c
@@ -324,6 +324,30 @@
pon->subtype == PON_GEN2_SECONDARY;
}
+static void qpnp_pon_dump_regs(struct qpnp_pon *pon)
+{
+ uint8_t regs[16];
+ int i;
+ int j;
+ int rc;
+
+ dev_info(&pon->pdev->dev, "PMIC@SID%d register dump:\n",
+ to_spmi_device(pon->pdev->dev.parent)->usid);
+ for (i = 0; i < 0xF0; i += ARRAY_SIZE(regs)) {
+ rc = regmap_bulk_read(pon->regmap, pon->base + i, ®s,
+ ARRAY_SIZE(regs));
+ if (rc) {
+ dev_err(&pon->pdev->dev,
+ "Unable to read at ofs: %x (%d)\n", i, rc);
+ return;
+ }
+ printk("%04x: ", pon->base + i);
+ for (j = 0; j < ARRAY_SIZE(regs); j++)
+ printk("%02x ", regs[j]);
+ printk("\n");
+ }
+}
+
/**
* qpnp_pon_set_restart_reason - Store device restart reason in PMIC register.
*
@@ -2141,6 +2165,7 @@
"PMIC@SID%d Power-on reason: Unknown and '%s' boot\n",
to_spmi_device(pon->pdev->dev.parent)->usid,
cold_boot ? "cold" : "warm");
+ qpnp_pon_dump_regs(pon);
} else {
pon->pon_trigger_reason = index;
dev_info(&pon->pdev->dev,
@@ -2171,12 +2196,16 @@
dev_info(&pon->pdev->dev,
"PMIC@SID%d: Unknown power-off reason\n",
to_spmi_device(pon->pdev->dev.parent)->usid);
+ qpnp_pon_dump_regs(pon);
} else {
pon->pon_power_off_reason = index;
dev_info(&pon->pdev->dev,
"PMIC@SID%d: Power-off reason: %s\n",
to_spmi_device(pon->pdev->dev.parent)->usid,
qpnp_poff_reason[index]);
+ /* OTST3 (14, 31) and Stage 3 (15) */
+ if (index == 14 || index == 15 || index == 31)
+ qpnp_pon_dump_regs(pon);
}
if (pon->pon_trigger_reason == PON_SMPL ||
diff --git a/drivers/input/misc/vl53L0/stmvl53l0_module.c b/drivers/input/misc/vl53L0/stmvl53l0_module.c
index 9313f1b..35e649f 100644
--- a/drivers/input/misc/vl53L0/stmvl53l0_module.c
+++ b/drivers/input/misc/vl53L0/stmvl53l0_module.c
@@ -2730,6 +2730,9 @@
g_PhaseCal = data->PhaseCal;
g_refSpadCount = data->refSpadCount;
g_isApertureSpads = data->isApertureSpads;
+ } else {
+ vl53l0_errmsg("failed: no calibration data\n");
+ return -EIO;
}
}
/* -Taimen */
diff --git a/drivers/input/misc/yealink.c b/drivers/input/misc/yealink.c
index 79c964c..6e7ff95 100644
--- a/drivers/input/misc/yealink.c
+++ b/drivers/input/misc/yealink.c
@@ -875,6 +875,10 @@
int ret, pipe, i;
interface = intf->cur_altsetting;
+
+ if (interface->desc.bNumEndpoints < 1)
+ return -ENODEV;
+
endpoint = &interface->endpoint[0].desc;
if (!usb_endpoint_is_int_in(endpoint))
return -ENODEV;
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
index ed1935f..681dce1 100644
--- a/drivers/input/mouse/elan_i2c_core.c
+++ b/drivers/input/mouse/elan_i2c_core.c
@@ -218,17 +218,19 @@
static int elan_check_ASUS_special_fw(struct elan_tp_data *data)
{
- if (data->ic_type != 0x0E)
- return false;
-
- switch (data->product_id) {
- case 0x05 ... 0x07:
- case 0x09:
- case 0x13:
+ if (data->ic_type == 0x0E) {
+ switch (data->product_id) {
+ case 0x05 ... 0x07:
+ case 0x09:
+ case 0x13:
+ return true;
+ }
+ } else if (data->ic_type == 0x08 && data->product_id == 0x26) {
+ /* ASUS EeeBook X205TA */
return true;
- default:
- return false;
}
+
+ return false;
}
static int __elan_initialize(struct elan_tp_data *data)
@@ -1232,7 +1234,12 @@
{ "ELAN0000", 0 },
{ "ELAN0100", 0 },
{ "ELAN0600", 0 },
+ { "ELAN0602", 0 },
{ "ELAN0605", 0 },
+ { "ELAN0608", 0 },
+ { "ELAN0605", 0 },
+ { "ELAN0609", 0 },
+ { "ELAN060B", 0 },
{ "ELAN1000", 0 },
{ }
};
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index 43482ae..6f4dc0f 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -1122,7 +1122,10 @@
* Asus UX32VD 0x361f02 00, 15, 0e clickpad
* Avatar AVIU-145A2 0x361f00 ? clickpad
* Fujitsu LIFEBOOK E544 0x470f00 d0, 12, 09 2 hw buttons
+ * Fujitsu LIFEBOOK E546 0x470f00 50, 12, 09 2 hw buttons
+ * Fujitsu LIFEBOOK E547 0x470f00 50, 12, 09 2 hw buttons
* Fujitsu LIFEBOOK E554 0x570f01 40, 14, 0c 2 hw buttons
+ * Fujitsu LIFEBOOK E557 0x570f01 40, 14, 0c 2 hw buttons
* Fujitsu T725 0x470f01 05, 12, 09 2 hw buttons
* Fujitsu H730 0x570f00 c0, 14, 0c 3 hw buttons (**)
* Gigabyte U2442 0x450f01 58, 17, 0c 2 hw buttons
@@ -1528,6 +1531,20 @@
},
},
{
+ /* Fujitsu LIFEBOOK E546 does not work with crc_enabled == 0 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E546"),
+ },
+ },
+ {
+ /* Fujitsu LIFEBOOK E547 does not work with crc_enabled == 0 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E547"),
+ },
+ },
+ {
/* Fujitsu LIFEBOOK E554 does not work with crc_enabled == 0 */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
@@ -1542,6 +1559,13 @@
},
},
{
+ /* Fujitsu LIFEBOOK E557 does not work with crc_enabled == 0 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E557"),
+ },
+ },
+ {
/* Fujitsu LIFEBOOK U745 does not work with crc_enabled == 0 */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
diff --git a/drivers/input/mouse/trackpoint.c b/drivers/input/mouse/trackpoint.c
index 354d47e..7e2dc5e 100644
--- a/drivers/input/mouse/trackpoint.c
+++ b/drivers/input/mouse/trackpoint.c
@@ -265,7 +265,8 @@
if (ps2_command(&psmouse->ps2dev, param, MAKE_PS2_CMD(0, 2, TP_READ_ID)))
return -1;
- if (param[0] != TP_MAGIC_IDENT)
+ /* add new TP ID. */
+ if (!(param[0] & TP_MAGIC_IDENT))
return -1;
if (firmware_id)
@@ -380,8 +381,8 @@
return 0;
if (trackpoint_read(&psmouse->ps2dev, TP_EXT_BTN, &button_info)) {
- psmouse_warn(psmouse, "failed to get extended button data\n");
- button_info = 0;
+ psmouse_warn(psmouse, "failed to get extended button data, assuming 3 buttons\n");
+ button_info = 0x33;
}
psmouse->private = kzalloc(sizeof(struct trackpoint_data), GFP_KERNEL);
diff --git a/drivers/input/mouse/trackpoint.h b/drivers/input/mouse/trackpoint.h
index 5617ed3..8805575 100644
--- a/drivers/input/mouse/trackpoint.h
+++ b/drivers/input/mouse/trackpoint.h
@@ -21,8 +21,9 @@
#define TP_COMMAND 0xE2 /* Commands start with this */
#define TP_READ_ID 0xE1 /* Sent for device identification */
-#define TP_MAGIC_IDENT 0x01 /* Sent after a TP_READ_ID followed */
+#define TP_MAGIC_IDENT 0x03 /* Sent after a TP_READ_ID followed */
/* by the firmware ID */
+ /* Firmware ID includes 0x1, 0x2, 0x3 */
/*
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index 0cdd958..5be14ad 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -120,6 +120,13 @@
},
},
{
+ /* Dell Embedded Box PC 3000 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Embedded Box PC 3000"),
+ },
+ },
+ {
/* OQO Model 01 */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "OQO"),
@@ -678,6 +685,13 @@
DMI_MATCH(DMI_PRODUCT_NAME, "20046"),
},
},
+ {
+ /* Clevo P650RS, 650RP6, Sager NP8152-S, and others */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Notebook"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "P65xRP"),
+ },
+ },
{ }
};
@@ -774,6 +788,13 @@
DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK U574"),
},
},
+ {
+ /* Fujitsu UH554 laptop */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK UH544"),
+ },
+ },
{ }
};
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
index 89abfdb..c84c685 100644
--- a/drivers/input/serio/i8042.c
+++ b/drivers/input/serio/i8042.c
@@ -434,8 +434,10 @@
{
struct i8042_port *port = serio->port_data;
+ spin_lock_irq(&i8042_lock);
port->exists = true;
- mb();
+ spin_unlock_irq(&i8042_lock);
+
return 0;
}
@@ -448,16 +450,20 @@
{
struct i8042_port *port = serio->port_data;
+ spin_lock_irq(&i8042_lock);
port->exists = false;
+ port->serio = NULL;
+ spin_unlock_irq(&i8042_lock);
/*
+ * We need to make sure that interrupt handler finishes using
+ * our serio port before we return from this function.
* We synchronize with both AUX and KBD IRQs because there is
* a (very unlikely) chance that AUX IRQ is raised for KBD port
* and vice versa.
*/
synchronize_irq(I8042_AUX_IRQ);
synchronize_irq(I8042_KBD_IRQ);
- port->serio = NULL;
}
/*
@@ -574,7 +580,7 @@
spin_unlock_irqrestore(&i8042_lock, flags);
- if (likely(port->exists && !filtered))
+ if (likely(serio && !filtered))
serio_interrupt(serio, data, dfl);
out:
diff --git a/drivers/input/tablet/hanwang.c b/drivers/input/tablet/hanwang.c
index cd85205..df4bea9 100644
--- a/drivers/input/tablet/hanwang.c
+++ b/drivers/input/tablet/hanwang.c
@@ -340,6 +340,9 @@
int error;
int i;
+ if (intf->cur_altsetting->desc.bNumEndpoints < 1)
+ return -ENODEV;
+
hanwang = kzalloc(sizeof(struct hanwang), GFP_KERNEL);
input_dev = input_allocate_device();
if (!hanwang || !input_dev) {
diff --git a/drivers/input/tablet/kbtab.c b/drivers/input/tablet/kbtab.c
index d2ac7c2..2812f92 100644
--- a/drivers/input/tablet/kbtab.c
+++ b/drivers/input/tablet/kbtab.c
@@ -122,6 +122,9 @@
struct input_dev *input_dev;
int error = -ENOMEM;
+ if (intf->cur_altsetting->desc.bNumEndpoints < 1)
+ return -ENODEV;
+
kbtab = kzalloc(sizeof(struct kbtab), GFP_KERNEL);
input_dev = input_allocate_device();
if (!kbtab || !input_dev)
diff --git a/drivers/input/touchscreen/stm/ftm4_ts.c b/drivers/input/touchscreen/stm/ftm4_ts.c
index 6c1f1fb..38db4ae 100644
--- a/drivers/input/touchscreen/stm/ftm4_ts.c
+++ b/drivers/input/touchscreen/stm/ftm4_ts.c
@@ -1208,9 +1208,10 @@
(EventID == EVENTID_MOTION_POINTER) ||
(EventID == EVENTID_LEAVE_POINTER))
info->finger[TouchID].state = EventID;
+
+ input_sync(info->input_dev);
}
- input_sync(info->input_dev);
#if defined(CONFIG_INPUT_BOOSTER)
if ((EventID == EVENTID_ENTER_POINTER)
diff --git a/drivers/input/touchscreen/sur40.c b/drivers/input/touchscreen/sur40.c
index 45b466e..0146e2c 100644
--- a/drivers/input/touchscreen/sur40.c
+++ b/drivers/input/touchscreen/sur40.c
@@ -500,6 +500,9 @@
if (iface_desc->desc.bInterfaceClass != 0xFF)
return -ENODEV;
+ if (iface_desc->desc.bNumEndpoints < 5)
+ return -ENODEV;
+
/* Use endpoint #4 (0x86). */
endpoint = &iface_desc->endpoint[4].desc;
if (endpoint->bEndpointAddress != TOUCH_ENDPOINT)
diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c
index 4831eb9..22160e4 100644
--- a/drivers/iommu/amd_iommu_v2.c
+++ b/drivers/iommu/amd_iommu_v2.c
@@ -699,9 +699,9 @@
out_unregister:
mmu_notifier_unregister(&pasid_state->mn, mm);
+ mmput(mm);
out_free:
- mmput(mm);
free_pasid_state(pasid_state);
out:
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index f0fc6f7..f9711ac 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -908,7 +908,7 @@
* which we used for the IOMMU lookup. Strictly speaking
* we could do this for all PCI devices; we only need to
* get the BDF# from the scope table for ACPI matches. */
- if (pdev->is_virtfn)
+ if (pdev && pdev->is_virtfn)
goto got_pdev;
*bus = drhd->devices[i].bus;
@@ -1137,7 +1137,7 @@
if (!dma_pte_present(pte) || dma_pte_superpage(pte))
goto next;
- level_pfn = pfn & level_mask(level - 1);
+ level_pfn = pfn & level_mask(level);
level_pte = phys_to_virt(dma_pte_addr(pte));
if (level > 2)
@@ -2005,11 +2005,14 @@
if (context_copied(context)) {
u16 did_old = context_domain_id(context);
- if (did_old >= 0 && did_old < cap_ndoms(iommu->cap))
+ if (did_old >= 0 && did_old < cap_ndoms(iommu->cap)) {
iommu->flush.flush_context(iommu, did_old,
(((u16)bus) << 8) | devfn,
DMA_CCMD_MASK_NOBIT,
DMA_CCMD_DEVICE_INVL);
+ iommu->flush.flush_iotlb(iommu, did_old, 0, 0,
+ DMA_TLB_DSI_FLUSH);
+ }
}
pgd = domain->pgd;
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index f9be96c..e61f3a79 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -394,36 +394,30 @@
device->dev = dev;
ret = sysfs_create_link(&dev->kobj, &group->kobj, "iommu_group");
- if (ret) {
- kfree(device);
- return ret;
- }
+ if (ret)
+ goto err_free_device;
device->name = kasprintf(GFP_KERNEL, "%s", kobject_name(&dev->kobj));
rename:
if (!device->name) {
- sysfs_remove_link(&dev->kobj, "iommu_group");
- kfree(device);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto err_remove_link;
}
ret = sysfs_create_link_nowarn(group->devices_kobj,
&dev->kobj, device->name);
if (ret) {
- kfree(device->name);
if (ret == -EEXIST && i >= 0) {
/*
* Account for the slim chance of collision
* and append an instance to the name.
*/
+ kfree(device->name);
device->name = kasprintf(GFP_KERNEL, "%s.%d",
kobject_name(&dev->kobj), i++);
goto rename;
}
-
- sysfs_remove_link(&dev->kobj, "iommu_group");
- kfree(device);
- return ret;
+ goto err_free_name;
}
kobject_get(group->devices_kobj);
@@ -435,8 +429,10 @@
mutex_lock(&group->mutex);
list_add_tail(&device->list, &group->devices);
if (group->domain)
- __iommu_attach_device(group->domain, dev);
+ ret = __iommu_attach_device(group->domain, dev);
mutex_unlock(&group->mutex);
+ if (ret)
+ goto err_put_group;
/* Notify any listeners about change to group. */
blocking_notifier_call_chain(&group->notifier,
@@ -447,6 +443,21 @@
pr_info("Adding device %s to group %d\n", dev_name(dev), group->id);
return 0;
+
+err_put_group:
+ mutex_lock(&group->mutex);
+ list_del(&device->list);
+ mutex_unlock(&group->mutex);
+ dev->iommu_group = NULL;
+ kobject_put(group->devices_kobj);
+err_free_name:
+ kfree(device->name);
+err_remove_link:
+ sysfs_remove_link(&dev->kobj, "iommu_group");
+err_free_device:
+ kfree(device);
+ pr_err("Failed to add device %s to group %d: %d\n", dev_name(dev), group->id, ret);
+ return ret;
}
EXPORT_SYMBOL_GPL(iommu_group_add_device);
diff --git a/drivers/irqchip/irq-atmel-aic-common.c b/drivers/irqchip/irq-atmel-aic-common.c
index 37199b9..831a195 100644
--- a/drivers/irqchip/irq-atmel-aic-common.c
+++ b/drivers/irqchip/irq-atmel-aic-common.c
@@ -148,9 +148,9 @@
struct device_node *np;
void __iomem *regs;
- np = of_find_compatible_node(root, NULL, "atmel,at91rm9200-rtc");
+ np = of_find_compatible_node(NULL, NULL, "atmel,at91rm9200-rtc");
if (!np)
- np = of_find_compatible_node(root, NULL,
+ np = of_find_compatible_node(NULL, NULL,
"atmel,at91sam9x5-rtc");
if (!np)
@@ -202,7 +202,6 @@
return;
match = of_match_node(matches, root);
- of_node_put(root);
if (match) {
void (*fixup)(struct device_node *) = match->data;
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index bdf0b15..6687d9a 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -795,6 +795,9 @@
int enabled;
u64 val;
+ if (cpu >= nr_cpu_ids)
+ return -EINVAL;
+
if (gic_irq_in_rdist(d))
return -EINVAL;
diff --git a/drivers/irqchip/irq-imx-gpcv2.c b/drivers/irqchip/irq-imx-gpcv2.c
index 15af9a9..2d203b4 100644
--- a/drivers/irqchip/irq-imx-gpcv2.c
+++ b/drivers/irqchip/irq-imx-gpcv2.c
@@ -230,6 +230,8 @@
return -ENOMEM;
}
+ raw_spin_lock_init(&cd->rlock);
+
cd->gpc_base = of_iomap(node, 0);
if (!cd->gpc_base) {
pr_err("fsl-gpcv2: unable to map gpc registers\n");
diff --git a/drivers/irqchip/irq-keystone.c b/drivers/irqchip/irq-keystone.c
index deb89d6..e684be1 100644
--- a/drivers/irqchip/irq-keystone.c
+++ b/drivers/irqchip/irq-keystone.c
@@ -19,9 +19,9 @@
#include <linux/bitops.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
+#include <linux/interrupt.h>
#include <linux/irqdomain.h>
#include <linux/irqchip.h>
-#include <linux/irqchip/chained_irq.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/mfd/syscon.h>
@@ -39,6 +39,7 @@
struct irq_domain *irqd;
struct regmap *devctrl_regs;
u32 devctrl_offset;
+ raw_spinlock_t wa_lock;
};
static inline u32 keystone_irq_readl(struct keystone_irq_device *kirq)
@@ -83,17 +84,15 @@
/* nothing to do here */
}
-static void keystone_irq_handler(struct irq_desc *desc)
+static irqreturn_t keystone_irq_handler(int irq, void *keystone_irq)
{
- unsigned int irq = irq_desc_get_irq(desc);
- struct keystone_irq_device *kirq = irq_desc_get_handler_data(desc);
+ struct keystone_irq_device *kirq = keystone_irq;
+ unsigned long wa_lock_flags;
unsigned long pending;
int src, virq;
dev_dbg(kirq->dev, "start irq %d\n", irq);
- chained_irq_enter(irq_desc_get_chip(desc), desc);
-
pending = keystone_irq_readl(kirq);
keystone_irq_writel(kirq, pending);
@@ -111,13 +110,15 @@
if (!virq)
dev_warn(kirq->dev, "sporious irq detected hwirq %d, virq %d\n",
src, virq);
+ raw_spin_lock_irqsave(&kirq->wa_lock, wa_lock_flags);
generic_handle_irq(virq);
+ raw_spin_unlock_irqrestore(&kirq->wa_lock,
+ wa_lock_flags);
}
}
- chained_irq_exit(irq_desc_get_chip(desc), desc);
-
dev_dbg(kirq->dev, "end irq %d\n", irq);
+ return IRQ_HANDLED;
}
static int keystone_irq_map(struct irq_domain *h, unsigned int virq,
@@ -182,9 +183,16 @@
return -ENODEV;
}
+ raw_spin_lock_init(&kirq->wa_lock);
+
platform_set_drvdata(pdev, kirq);
- irq_set_chained_handler_and_data(kirq->irq, keystone_irq_handler, kirq);
+ ret = request_irq(kirq->irq, keystone_irq_handler,
+ 0, dev_name(dev), kirq);
+ if (ret) {
+ irq_domain_remove(kirq->irqd);
+ return ret;
+ }
/* clear all source bits */
keystone_irq_writel(kirq, ~0x0);
@@ -199,6 +207,8 @@
struct keystone_irq_device *kirq = platform_get_drvdata(pdev);
int hwirq;
+ free_irq(kirq->irq, kirq);
+
for (hwirq = 0; hwirq < KEYSTONE_N_IRQ; hwirq++)
irq_dispose_mapping(irq_find_mapping(kirq->irqd, hwirq));
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
index 9e17ef2..6f1dbd5 100644
--- a/drivers/irqchip/irq-mips-gic.c
+++ b/drivers/irqchip/irq-mips-gic.c
@@ -915,8 +915,11 @@
gic_len = resource_size(&res);
}
- if (mips_cm_present())
+ if (mips_cm_present()) {
write_gcr_gic_base(gic_base | CM_GCR_GIC_BASE_GICEN_MSK);
+ /* Ensure GIC region is enabled before trying to access it */
+ __sync();
+ }
gic_present = true;
__gic_init(gic_base, gic_len, cpu_vec, 0, node);
diff --git a/drivers/irqchip/irq-mxs.c b/drivers/irqchip/irq-mxs.c
index 1730470..05fa9f7 100644
--- a/drivers/irqchip/irq-mxs.c
+++ b/drivers/irqchip/irq-mxs.c
@@ -131,12 +131,16 @@
.irq_ack = icoll_ack_irq,
.irq_mask = icoll_mask_irq,
.irq_unmask = icoll_unmask_irq,
+ .flags = IRQCHIP_MASK_ON_SUSPEND |
+ IRQCHIP_SKIP_SET_WAKE,
};
static struct irq_chip asm9260_icoll_chip = {
.irq_ack = icoll_ack_irq,
.irq_mask = asm9260_mask_irq,
.irq_unmask = asm9260_unmask_irq,
+ .flags = IRQCHIP_MASK_ON_SUSPEND |
+ IRQCHIP_SKIP_SET_WAKE,
};
asmlinkage void __exception_irq_entry icoll_handle_irq(struct pt_regs *regs)
diff --git a/drivers/irqchip/irq-xtensa-mx.c b/drivers/irqchip/irq-xtensa-mx.c
index bb3ac5f..72a391e 100644
--- a/drivers/irqchip/irq-xtensa-mx.c
+++ b/drivers/irqchip/irq-xtensa-mx.c
@@ -142,7 +142,7 @@
int __init xtensa_mx_init_legacy(struct device_node *interrupt_parent)
{
struct irq_domain *root_domain =
- irq_domain_add_legacy(NULL, NR_IRQS, 0, 0,
+ irq_domain_add_legacy(NULL, NR_IRQS - 1, 1, 0,
&xtensa_mx_irq_domain_ops,
&xtensa_mx_irq_chip);
irq_set_default_host(root_domain);
diff --git a/drivers/irqchip/irq-xtensa-pic.c b/drivers/irqchip/irq-xtensa-pic.c
index 472ae17..f728755 100644
--- a/drivers/irqchip/irq-xtensa-pic.c
+++ b/drivers/irqchip/irq-xtensa-pic.c
@@ -89,7 +89,7 @@
int __init xtensa_pic_init_legacy(struct device_node *interrupt_parent)
{
struct irq_domain *root_domain =
- irq_domain_add_legacy(NULL, NR_IRQS, 0, 0,
+ irq_domain_add_legacy(NULL, NR_IRQS - 1, 1, 0,
&xtensa_irq_domain_ops, &xtensa_irq_chip);
irq_set_default_host(root_domain);
return 0;
diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c
index aecec6d..7f1c625 100644
--- a/drivers/isdn/gigaset/bas-gigaset.c
+++ b/drivers/isdn/gigaset/bas-gigaset.c
@@ -2317,6 +2317,9 @@
return -ENODEV;
}
+ if (hostif->desc.bNumEndpoints < 1)
+ return -ENODEV;
+
dev_info(&udev->dev,
"%s: Device matched (Vendor: 0x%x, Product: 0x%x)\n",
__func__, le16_to_cpu(udev->descriptor.idVendor),
diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c
index 9b856e1..e4c43a1 100644
--- a/drivers/isdn/i4l/isdn_common.c
+++ b/drivers/isdn/i4l/isdn_common.c
@@ -1379,6 +1379,7 @@
if (arg) {
if (copy_from_user(bname, argp, sizeof(bname) - 1))
return -EFAULT;
+ bname[sizeof(bname)-1] = 0;
} else
return -EINVAL;
ret = mutex_lock_interruptible(&dev->mtx);
diff --git a/drivers/isdn/i4l/isdn_net.c b/drivers/isdn/i4l/isdn_net.c
index aa5dd56..dbad5c4 100644
--- a/drivers/isdn/i4l/isdn_net.c
+++ b/drivers/isdn/i4l/isdn_net.c
@@ -2611,10 +2611,9 @@
char newname[10];
if (p) {
- /* Slave-Name MUST not be empty */
- if (!strlen(p + 1))
+ /* Slave-Name MUST not be empty or overflow 'newname' */
+ if (strscpy(newname, p + 1, sizeof(newname)) <= 0)
return NULL;
- strcpy(newname, p + 1);
*p = 0;
/* Master must already exist */
if (!(n = isdn_net_findif(parm)))
diff --git a/drivers/isdn/i4l/isdn_ppp.c b/drivers/isdn/i4l/isdn_ppp.c
index 9c1e8ad..bf3fbd0 100644
--- a/drivers/isdn/i4l/isdn_ppp.c
+++ b/drivers/isdn/i4l/isdn_ppp.c
@@ -2364,7 +2364,7 @@
id);
return NULL;
} else {
- rs = kzalloc(sizeof(struct ippp_ccp_reset_state), GFP_KERNEL);
+ rs = kzalloc(sizeof(struct ippp_ccp_reset_state), GFP_ATOMIC);
if (!rs)
return NULL;
rs->state = CCPResetIdle;
diff --git a/drivers/leds/leds-ktd2692.c b/drivers/leds/leds-ktd2692.c
index feca07b..1eb9fb3 100644
--- a/drivers/leds/leds-ktd2692.c
+++ b/drivers/leds/leds-ktd2692.c
@@ -296,15 +296,15 @@
return -ENXIO;
led->ctrl_gpio = devm_gpiod_get(dev, "ctrl", GPIOD_ASIS);
- if (IS_ERR(led->ctrl_gpio)) {
- ret = PTR_ERR(led->ctrl_gpio);
+ ret = PTR_ERR_OR_ZERO(led->ctrl_gpio);
+ if (ret) {
dev_err(dev, "cannot get ctrl-gpios %d\n", ret);
return ret;
}
led->aux_gpio = devm_gpiod_get(dev, "aux", GPIOD_ASIS);
- if (IS_ERR(led->aux_gpio)) {
- ret = PTR_ERR(led->aux_gpio);
+ ret = PTR_ERR_OR_ZERO(led->aux_gpio);
+ if (ret) {
dev_err(dev, "cannot get aux-gpios %d\n", ret);
return ret;
}
diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c
index 6a4811f..9cf826d 100644
--- a/drivers/mailbox/mailbox.c
+++ b/drivers/mailbox/mailbox.c
@@ -104,11 +104,14 @@
/* Submit next message */
msg_submit(chan);
+ if (!mssg)
+ return;
+
/* Notify the client */
- if (mssg && chan->cl->tx_done)
+ if (chan->cl->tx_done)
chan->cl->tx_done(chan->cl, mssg, r);
- if (chan->cl->tx_block)
+ if (r != -ETIME && chan->cl->tx_block)
complete(&chan->tx_complete);
}
@@ -261,7 +264,7 @@
msg_submit(chan);
- if (chan->cl->tx_block && chan->active_req) {
+ if (chan->cl->tx_block) {
unsigned long wait;
int ret;
@@ -272,8 +275,8 @@
ret = wait_for_completion_timeout(&chan->tx_complete, wait);
if (ret == 0) {
- t = -EIO;
- tx_tick(chan, -EIO);
+ t = -ETIME;
+ tx_tick(chan, t);
}
}
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index 933b52a..7945f91 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -392,6 +392,7 @@
config DM_RAID
tristate "RAID 1/4/5/6/10 target"
depends on BLK_DEV_DM
+ select MD_RAID0
select MD_RAID1
select MD_RAID10
select MD_RAID456
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index b38c6d0..7df8e28 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -222,7 +222,7 @@
* Buffers are freed after this timeout
*/
static unsigned dm_bufio_max_age = DM_BUFIO_DEFAULT_AGE_SECS;
-static unsigned dm_bufio_retain_bytes = DM_BUFIO_DEFAULT_RETAIN_BYTES;
+static unsigned long dm_bufio_retain_bytes = DM_BUFIO_DEFAULT_RETAIN_BYTES;
static unsigned long dm_bufio_peak_allocated;
static unsigned long dm_bufio_allocated_kmem_cache;
@@ -915,10 +915,11 @@
{
unsigned long buffers;
- if (ACCESS_ONCE(dm_bufio_cache_size) != dm_bufio_cache_size_latch) {
- mutex_lock(&dm_bufio_clients_lock);
- __cache_size_refresh();
- mutex_unlock(&dm_bufio_clients_lock);
+ if (unlikely(ACCESS_ONCE(dm_bufio_cache_size) != dm_bufio_cache_size_latch)) {
+ if (mutex_trylock(&dm_bufio_clients_lock)) {
+ __cache_size_refresh();
+ mutex_unlock(&dm_bufio_clients_lock);
+ }
}
buffers = dm_bufio_cache_size_per_client >>
@@ -1514,10 +1515,10 @@
return true;
}
-static unsigned get_retain_buffers(struct dm_bufio_client *c)
+static unsigned long get_retain_buffers(struct dm_bufio_client *c)
{
- unsigned retain_bytes = ACCESS_ONCE(dm_bufio_retain_bytes);
- return retain_bytes / c->block_size;
+ unsigned long retain_bytes = ACCESS_ONCE(dm_bufio_retain_bytes);
+ return retain_bytes >> (c->sectors_per_block_bits + SECTOR_SHIFT);
}
static unsigned long __scan(struct dm_bufio_client *c, unsigned long nr_to_scan,
@@ -1527,7 +1528,7 @@
struct dm_buffer *b, *tmp;
unsigned long freed = 0;
unsigned long count = nr_to_scan;
- unsigned retain_target = get_retain_buffers(c);
+ unsigned long retain_target = get_retain_buffers(c);
for (l = 0; l < LIST_SIZE; l++) {
list_for_each_entry_safe_reverse(b, tmp, &c->lru[l], lru_list) {
@@ -1744,11 +1745,19 @@
static void __evict_old_buffers(struct dm_bufio_client *c, unsigned long age_hz)
{
struct dm_buffer *b, *tmp;
- unsigned retain_target = get_retain_buffers(c);
- unsigned count;
+ unsigned long retain_target = get_retain_buffers(c);
+ unsigned long count;
+ LIST_HEAD(write_list);
dm_bufio_lock(c);
+ __check_watermark(c, &write_list);
+ if (unlikely(!list_empty(&write_list))) {
+ dm_bufio_unlock(c);
+ __flush_write_list(&write_list);
+ dm_bufio_lock(c);
+ }
+
count = c->n_buffers[LIST_CLEAN] + c->n_buffers[LIST_DIRTY];
list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_CLEAN], lru_list) {
if (count <= retain_target)
@@ -1773,6 +1782,8 @@
mutex_lock(&dm_bufio_clients_lock);
+ __cache_size_refresh();
+
list_for_each_entry(c, &dm_bufio_all_clients, client_list)
__evict_old_buffers(c, max_age_hz);
@@ -1896,7 +1907,7 @@
module_param_named(max_age_seconds, dm_bufio_max_age, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(max_age_seconds, "Max age of a buffer in seconds");
-module_param_named(retain_bytes, dm_bufio_retain_bytes, uint, S_IRUGO | S_IWUSR);
+module_param_named(retain_bytes, dm_bufio_retain_bytes, ulong, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(retain_bytes, "Try to keep at least this many bytes cached in memory");
module_param_named(peak_allocated_bytes, dm_bufio_peak_allocated, ulong, S_IRUGO | S_IWUSR);
diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
index 3970cda..d3c55d7 100644
--- a/drivers/md/dm-cache-metadata.c
+++ b/drivers/md/dm-cache-metadata.c
@@ -1326,17 +1326,19 @@
int dm_cache_commit(struct dm_cache_metadata *cmd, bool clean_shutdown)
{
- int r;
+ int r = -EINVAL;
flags_mutator mutator = (clean_shutdown ? set_clean_shutdown :
clear_clean_shutdown);
WRITE_LOCK(cmd);
+ if (cmd->fail_io)
+ goto out;
+
r = __commit_transaction(cmd, mutator);
if (r)
goto out;
r = __begin_transaction(cmd);
-
out:
WRITE_UNLOCK(cmd);
return r;
@@ -1348,7 +1350,8 @@
int r = -EINVAL;
READ_LOCK(cmd);
- r = dm_sm_get_nr_free(cmd->metadata_sm, result);
+ if (!cmd->fail_io)
+ r = dm_sm_get_nr_free(cmd->metadata_sm, result);
READ_UNLOCK(cmd);
return r;
@@ -1360,7 +1363,8 @@
int r = -EINVAL;
READ_LOCK(cmd);
- r = dm_sm_get_nr_blocks(cmd->metadata_sm, result);
+ if (!cmd->fail_io)
+ r = dm_sm_get_nr_blocks(cmd->metadata_sm, result);
READ_UNLOCK(cmd);
return r;
diff --git a/drivers/md/dm-era-target.c b/drivers/md/dm-era-target.c
index 665bf32..32e76c5 100644
--- a/drivers/md/dm-era-target.c
+++ b/drivers/md/dm-era-target.c
@@ -961,18 +961,18 @@
}
}
- r = save_sm_root(md);
- if (r) {
- DMERR("%s: save_sm_root failed", __func__);
- return r;
- }
-
r = dm_tm_pre_commit(md->tm);
if (r) {
DMERR("%s: pre commit failed", __func__);
return r;
}
+ r = save_sm_root(md);
+ if (r) {
+ DMERR("%s: save_sm_root failed", __func__);
+ return r;
+ }
+
r = superblock_lock(md, &sblock);
if (r) {
DMERR("%s: superblock lock failed", __func__);
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
index 911ada6..3b67afd 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -485,11 +485,11 @@
if (r < 0)
return r;
- r = save_sm_roots(pmd);
+ r = dm_tm_pre_commit(pmd->tm);
if (r < 0)
return r;
- r = dm_tm_pre_commit(pmd->tm);
+ r = save_sm_roots(pmd);
if (r < 0)
return r;
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index e9b34de..5d42d8f 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1481,26 +1481,29 @@
struct dm_offload *o = container_of(cb, struct dm_offload, cb);
struct bio_list list;
struct bio *bio;
+ int i;
INIT_LIST_HEAD(&o->cb.list);
if (unlikely(!current->bio_list))
return;
- list = *current->bio_list;
- bio_list_init(current->bio_list);
+ for (i = 0; i < 2; i++) {
+ list = current->bio_list[i];
+ bio_list_init(¤t->bio_list[i]);
- while ((bio = bio_list_pop(&list))) {
- struct bio_set *bs = bio->bi_pool;
- if (unlikely(!bs) || bs == fs_bio_set) {
- bio_list_add(current->bio_list, bio);
- continue;
+ while ((bio = bio_list_pop(&list))) {
+ struct bio_set *bs = bio->bi_pool;
+ if (unlikely(!bs) || bs == fs_bio_set) {
+ bio_list_add(¤t->bio_list[i], bio);
+ continue;
+ }
+
+ spin_lock(&bs->rescue_lock);
+ bio_list_add(&bs->rescue_list, bio);
+ queue_work(bs->rescue_workqueue, &bs->rescue_work);
+ spin_unlock(&bs->rescue_lock);
}
-
- spin_lock(&bs->rescue_lock);
- bio_list_add(&bs->rescue_list, bio);
- queue_work(bs->rescue_workqueue, &bs->rescue_work);
- spin_unlock(&bs->rescue_lock);
}
}
diff --git a/drivers/md/md.c b/drivers/md/md.c
index eff554a..0a856cb 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -1866,7 +1866,7 @@
}
sb = page_address(rdev->sb_page);
sb->data_size = cpu_to_le64(num_sectors);
- sb->super_offset = rdev->sb_start;
+ sb->super_offset = cpu_to_le64(rdev->sb_start);
sb->sb_csum = calc_sb_1_csum(sb);
md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
rdev->sb_page);
@@ -2273,7 +2273,7 @@
/* Check if any mddev parameters have changed */
if ((mddev->dev_sectors != le64_to_cpu(sb->size)) ||
(mddev->reshape_position != le64_to_cpu(sb->reshape_position)) ||
- (mddev->layout != le64_to_cpu(sb->layout)) ||
+ (mddev->layout != le32_to_cpu(sb->layout)) ||
(mddev->raid_disks != le32_to_cpu(sb->raid_disks)) ||
(mddev->chunk_sectors != le32_to_cpu(sb->chunksize)))
return true;
diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c
index b1ced58..a1a6820 100644
--- a/drivers/md/persistent-data/dm-btree.c
+++ b/drivers/md/persistent-data/dm-btree.c
@@ -887,8 +887,12 @@
else
*result_key = le64_to_cpu(ro_node(s)->keys[0]);
- if (next_block || flags & INTERNAL_NODE)
- block = value64(ro_node(s), i);
+ if (next_block || flags & INTERNAL_NODE) {
+ if (find_highest)
+ block = value64(ro_node(s), i);
+ else
+ block = value64(ro_node(s), 0);
+ }
} while (flags & INTERNAL_NODE);
diff --git a/drivers/md/persistent-data/dm-space-map-disk.c b/drivers/md/persistent-data/dm-space-map-disk.c
index ebb280a..32adf6b 100644
--- a/drivers/md/persistent-data/dm-space-map-disk.c
+++ b/drivers/md/persistent-data/dm-space-map-disk.c
@@ -142,10 +142,23 @@
static int sm_disk_dec_block(struct dm_space_map *sm, dm_block_t b)
{
+ int r;
+ uint32_t old_count;
enum allocation_event ev;
struct sm_disk *smd = container_of(sm, struct sm_disk, sm);
- return sm_ll_dec(&smd->ll, b, &ev);
+ r = sm_ll_dec(&smd->ll, b, &ev);
+ if (!r && (ev == SM_FREE)) {
+ /*
+ * It's only free if it's also free in the last
+ * transaction.
+ */
+ r = sm_ll_lookup(&smd->old_ll, b, &old_count);
+ if (!r && !old_count)
+ smd->nr_allocated_this_transaction--;
+ }
+
+ return r;
}
static int sm_disk_new_block(struct dm_space_map *sm, dm_block_t *b)
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 515554c..f24a9e1 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -570,7 +570,7 @@
if (best_dist_disk < 0) {
if (is_badblock(rdev, this_sector, sectors,
&first_bad, &bad_sectors)) {
- if (first_bad < this_sector)
+ if (first_bad <= this_sector)
/* Cannot use this */
continue;
best_good_sectors = first_bad - this_sector;
@@ -877,7 +877,8 @@
((conf->start_next_window <
conf->next_resync + RESYNC_SECTORS) &&
current->bio_list &&
- !bio_list_empty(current->bio_list))),
+ (!bio_list_empty(¤t->bio_list[0]) ||
+ !bio_list_empty(¤t->bio_list[1])))),
conf->resync_lock);
conf->nr_waiting--;
}
@@ -1087,7 +1088,7 @@
*/
DEFINE_WAIT(w);
for (;;) {
- flush_signals(current);
+ sigset_t full, old;
prepare_to_wait(&conf->wait_barrier,
&w, TASK_INTERRUPTIBLE);
if (bio_end_sector(bio) <= mddev->suspend_lo ||
@@ -1096,7 +1097,10 @@
!md_cluster_ops->area_resyncing(mddev, WRITE,
bio->bi_iter.bi_sector, bio_end_sector(bio))))
break;
+ sigfillset(&full);
+ sigprocmask(SIG_BLOCK, &full, &old);
schedule();
+ sigprocmask(SIG_SETMASK, &old, NULL);
}
finish_wait(&conf->wait_barrier, &w);
}
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index ebb0dd6..e5ee4e9 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -946,7 +946,8 @@
!conf->barrier ||
(conf->nr_pending &&
current->bio_list &&
- !bio_list_empty(current->bio_list)),
+ (!bio_list_empty(¤t->bio_list[0]) ||
+ !bio_list_empty(¤t->bio_list[1]))),
conf->resync_lock);
conf->nr_waiting--;
}
@@ -1072,6 +1073,8 @@
int max_sectors;
int sectors;
+ md_write_start(mddev, bio);
+
/*
* Register the new request and wait if the reconstruction
* thread has put up a bar for new requests.
@@ -1455,8 +1458,6 @@
return;
}
- md_write_start(mddev, bio);
-
do {
/*
@@ -1477,7 +1478,25 @@
split = bio;
}
+ /*
+ * If a bio is splitted, the first part of bio will pass
+ * barrier but the bio is queued in current->bio_list (see
+ * generic_make_request). If there is a raise_barrier() called
+ * here, the second part of bio can't pass barrier. But since
+ * the first part bio isn't dispatched to underlaying disks
+ * yet, the barrier is never released, hence raise_barrier will
+ * alays wait. We have a deadlock.
+ * Note, this only happens in read path. For write path, the
+ * first part of bio is dispatched in a schedule() call
+ * (because of blk plug) or offloaded to raid10d.
+ * Quitting from the function immediately can change the bio
+ * order queued in bio_list and avoid the deadlock.
+ */
__make_request(mddev, split);
+ if (split != bio && bio_data_dir(bio) == READ) {
+ generic_make_request(bio);
+ break;
+ }
} while (split != bio);
/* In case raid10d snuck in to freeze_array */
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 7af9769..8f60520c 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -2232,6 +2232,10 @@
err = -ENOMEM;
mutex_unlock(&conf->cache_size_mutex);
+
+ conf->slab_cache = sc;
+ conf->active_name = 1-conf->active_name;
+
/* Step 4, return new stripes to service */
while(!list_empty(&newstripes)) {
nsh = list_entry(newstripes.next, struct stripe_head, lru);
@@ -2249,8 +2253,6 @@
}
/* critical section pass, GFP_NOIO no longer needed */
- conf->slab_cache = sc;
- conf->active_name = 1-conf->active_name;
if (!err)
conf->pool_size = newsize;
return err;
@@ -5277,12 +5279,15 @@
* userspace, we want an interruptible
* wait.
*/
- flush_signals(current);
prepare_to_wait(&conf->wait_for_overlap,
&w, TASK_INTERRUPTIBLE);
if (logical_sector >= mddev->suspend_lo &&
logical_sector < mddev->suspend_hi) {
+ sigset_t full, old;
+ sigfillset(&full);
+ sigprocmask(SIG_BLOCK, &full, &old);
schedule();
+ sigprocmask(SIG_SETMASK, &old, NULL);
do_prepare = true;
}
goto retry;
@@ -5816,6 +5821,8 @@
pr_debug("%d stripes handled\n", handled);
spin_unlock_irq(&conf->device_lock);
+
+ async_tx_issue_pending_all();
blk_finish_plug(&plug);
pr_debug("--- raid5worker inactive\n");
@@ -7526,12 +7533,10 @@
{
if (!test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) {
- struct md_rdev *rdev;
spin_lock_irq(&conf->device_lock);
conf->previous_raid_disks = conf->raid_disks;
- rdev_for_each(rdev, conf->mddev)
- rdev->data_offset = rdev->new_data_offset;
+ md_finish_reshape(conf->mddev);
smp_wmb();
conf->reshape_progress = MaxSector;
conf->mddev->reshape_position = MaxSector;
diff --git a/drivers/media/dvb-frontends/cxd2841er.c b/drivers/media/dvb-frontends/cxd2841er.c
index fdffb2f..107853b0 100644
--- a/drivers/media/dvb-frontends/cxd2841er.c
+++ b/drivers/media/dvb-frontends/cxd2841er.c
@@ -2678,7 +2678,9 @@
FE_CAN_MUTE_TS |
FE_CAN_2G_MODULATION,
.frequency_min = 42000000,
- .frequency_max = 1002000000
+ .frequency_max = 1002000000,
+ .symbol_rate_min = 870000,
+ .symbol_rate_max = 11700000
},
.init = cxd2841er_init_tc,
.sleep = cxd2841er_sleep_tc,
diff --git a/drivers/media/i2c/s5c73m3/s5c73m3-ctrls.c b/drivers/media/i2c/s5c73m3/s5c73m3-ctrls.c
index 8001cde..503135a 100644
--- a/drivers/media/i2c/s5c73m3/s5c73m3-ctrls.c
+++ b/drivers/media/i2c/s5c73m3/s5c73m3-ctrls.c
@@ -211,7 +211,7 @@
}
if ((ctrl->val ^ ctrl->cur.val) & V4L2_LOCK_FOCUS)
- ret = s5c73m3_af_run(state, ~af_lock);
+ ret = s5c73m3_af_run(state, !af_lock);
return ret;
}
diff --git a/drivers/media/pci/cx88/cx88-cards.c b/drivers/media/pci/cx88/cx88-cards.c
index 8f2556e..61611d1 100644
--- a/drivers/media/pci/cx88/cx88-cards.c
+++ b/drivers/media/pci/cx88/cx88-cards.c
@@ -3691,7 +3691,14 @@
core->nr = nr;
sprintf(core->name, "cx88[%d]", core->nr);
- core->tvnorm = V4L2_STD_NTSC_M;
+ /*
+ * Note: Setting initial standard here would cause first call to
+ * cx88_set_tvnorm() to return without programming any registers. Leave
+ * it blank for at this point and it will get set later in
+ * cx8800_initdev()
+ */
+ core->tvnorm = 0;
+
core->width = 320;
core->height = 240;
core->field = V4L2_FIELD_INTERLACED;
diff --git a/drivers/media/pci/cx88/cx88-video.c b/drivers/media/pci/cx88/cx88-video.c
index aef9acf..abbf5b05b 100644
--- a/drivers/media/pci/cx88/cx88-video.c
+++ b/drivers/media/pci/cx88/cx88-video.c
@@ -1429,7 +1429,7 @@
/* initial device configuration */
mutex_lock(&core->lock);
- cx88_set_tvnorm(core, core->tvnorm);
+ cx88_set_tvnorm(core, V4L2_STD_NTSC_M);
v4l2_ctrl_handler_setup(&core->video_hdl);
v4l2_ctrl_handler_setup(&core->audio_hdl);
cx88_video_mux(core, 0);
diff --git a/drivers/media/pci/saa7134/saa7134-i2c.c b/drivers/media/pci/saa7134/saa7134-i2c.c
index 8ef6399..bc95752 100644
--- a/drivers/media/pci/saa7134/saa7134-i2c.c
+++ b/drivers/media/pci/saa7134/saa7134-i2c.c
@@ -355,12 +355,43 @@
/* ----------------------------------------------------------- */
+/* On Medion 7134 reading EEPROM needs DVB-T demod i2c gate open */
+static void saa7134_i2c_eeprom_md7134_gate(struct saa7134_dev *dev)
+{
+ u8 subaddr = 0x7, dmdregval;
+ u8 data[2];
+ int ret;
+ struct i2c_msg i2cgatemsg_r[] = { {.addr = 0x08, .flags = 0,
+ .buf = &subaddr, .len = 1},
+ {.addr = 0x08,
+ .flags = I2C_M_RD,
+ .buf = &dmdregval, .len = 1}
+ };
+ struct i2c_msg i2cgatemsg_w[] = { {.addr = 0x08, .flags = 0,
+ .buf = data, .len = 2} };
+
+ ret = i2c_transfer(&dev->i2c_adap, i2cgatemsg_r, 2);
+ if ((ret == 2) && (dmdregval & 0x2)) {
+ pr_debug("%s: DVB-T demod i2c gate was left closed\n",
+ dev->name);
+
+ data[0] = subaddr;
+ data[1] = (dmdregval & ~0x2);
+ if (i2c_transfer(&dev->i2c_adap, i2cgatemsg_w, 1) != 1)
+ pr_err("%s: EEPROM i2c gate open failure\n",
+ dev->name);
+ }
+}
+
static int
saa7134_i2c_eeprom(struct saa7134_dev *dev, unsigned char *eedata, int len)
{
unsigned char buf;
int i,err;
+ if (dev->board == SAA7134_BOARD_MD7134)
+ saa7134_i2c_eeprom_md7134_gate(dev);
+
dev->i2c_client.addr = 0xa0 >> 1;
buf = 0;
if (1 != (err = i2c_master_send(&dev->i2c_client,&buf,1))) {
diff --git a/drivers/media/pci/saa7164/saa7164-bus.c b/drivers/media/pci/saa7164/saa7164-bus.c
index a18fe5d..b4857cd 100644
--- a/drivers/media/pci/saa7164/saa7164-bus.c
+++ b/drivers/media/pci/saa7164/saa7164-bus.c
@@ -393,11 +393,11 @@
msg_tmp.size = le16_to_cpu((__force __le16)msg_tmp.size);
msg_tmp.command = le32_to_cpu((__force __le32)msg_tmp.command);
msg_tmp.controlselector = le16_to_cpu((__force __le16)msg_tmp.controlselector);
+ memcpy(msg, &msg_tmp, sizeof(*msg));
/* No need to update the read positions, because this was a peek */
/* If the caller specifically want to peek, return */
if (peekonly) {
- memcpy(msg, &msg_tmp, sizeof(*msg));
goto peekout;
}
@@ -442,21 +442,15 @@
space_rem = bus->m_dwSizeGetRing - curr_grp;
if (space_rem < sizeof(*msg)) {
- /* msg wraps around the ring */
- memcpy_fromio(msg, bus->m_pdwGetRing + curr_grp, space_rem);
- memcpy_fromio((u8 *)msg + space_rem, bus->m_pdwGetRing,
- sizeof(*msg) - space_rem);
if (buf)
memcpy_fromio(buf, bus->m_pdwGetRing + sizeof(*msg) -
space_rem, buf_size);
} else if (space_rem == sizeof(*msg)) {
- memcpy_fromio(msg, bus->m_pdwGetRing + curr_grp, sizeof(*msg));
if (buf)
memcpy_fromio(buf, bus->m_pdwGetRing, buf_size);
} else {
/* Additional data wraps around the ring */
- memcpy_fromio(msg, bus->m_pdwGetRing + curr_grp, sizeof(*msg));
if (buf) {
memcpy_fromio(buf, bus->m_pdwGetRing + curr_grp +
sizeof(*msg), space_rem - sizeof(*msg));
@@ -469,15 +463,10 @@
} else {
/* No wrapping */
- memcpy_fromio(msg, bus->m_pdwGetRing + curr_grp, sizeof(*msg));
if (buf)
memcpy_fromio(buf, bus->m_pdwGetRing + curr_grp + sizeof(*msg),
buf_size);
}
- /* Convert from little endian to CPU */
- msg->size = le16_to_cpu((__force __le16)msg->size);
- msg->command = le32_to_cpu((__force __le32)msg->command);
- msg->controlselector = le16_to_cpu((__force __le16)msg->controlselector);
/* Update the read positions, adjusting the ring */
saa7164_writel(bus->m_dwGetReadPos, new_grp);
diff --git a/drivers/media/platform/davinci/vpfe_capture.c b/drivers/media/platform/davinci/vpfe_capture.c
index 7767e07..1f656a3 100644
--- a/drivers/media/platform/davinci/vpfe_capture.c
+++ b/drivers/media/platform/davinci/vpfe_capture.c
@@ -1709,27 +1709,9 @@
switch (cmd) {
case VPFE_CMD_S_CCDC_RAW_PARAMS:
+ ret = -EINVAL;
v4l2_warn(&vpfe_dev->v4l2_dev,
- "VPFE_CMD_S_CCDC_RAW_PARAMS: experimental ioctl\n");
- if (ccdc_dev->hw_ops.set_params) {
- ret = ccdc_dev->hw_ops.set_params(param);
- if (ret) {
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev,
- "Error setting parameters in CCDC\n");
- goto unlock_out;
- }
- ret = vpfe_get_ccdc_image_format(vpfe_dev,
- &vpfe_dev->fmt);
- if (ret < 0) {
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev,
- "Invalid image format at CCDC\n");
- goto unlock_out;
- }
- } else {
- ret = -EINVAL;
- v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev,
- "VPFE_CMD_S_CCDC_RAW_PARAMS not supported\n");
- }
+ "VPFE_CMD_S_CCDC_RAW_PARAMS not supported\n");
break;
default:
ret = -ENOTTY;
diff --git a/drivers/media/platform/msm/camera_v2/fd/msm_fd_dev.c b/drivers/media/platform/msm/camera_v2/fd/msm_fd_dev.c
index d881b4a..e813db2 100644
--- a/drivers/media/platform/msm/camera_v2/fd/msm_fd_dev.c
+++ b/drivers/media/platform/msm/camera_v2/fd/msm_fd_dev.c
@@ -1376,7 +1376,9 @@
}
fd->hw_revision = msm_fd_hw_get_revision(fd);
+ /* Reset HW and don't wait for complete in probe */
msm_fd_hw_put(fd);
+ fd->init = true;
ret = msm_fd_hw_request_irq(pdev, fd, msm_fd_wq_handler);
if (ret < 0) {
diff --git a/drivers/media/platform/msm/camera_v2/fd/msm_fd_dev.h b/drivers/media/platform/msm/camera_v2/fd/msm_fd_dev.h
index 2b81e5b..5b7a242 100644
--- a/drivers/media/platform/msm/camera_v2/fd/msm_fd_dev.h
+++ b/drivers/media/platform/msm/camera_v2/fd/msm_fd_dev.h
@@ -267,6 +267,7 @@
struct completion hw_halt_completion;
int recovery_mode;
uint32_t clk_rate_idx;
+ bool init;
};
#endif /* __MSM_FD_DEV_H__ */
diff --git a/drivers/media/platform/msm/camera_v2/fd/msm_fd_hw.c b/drivers/media/platform/msm/camera_v2/fd/msm_fd_hw.c
index ea0db4d..0797a2f 100644
--- a/drivers/media/platform/msm/camera_v2/fd/msm_fd_hw.c
+++ b/drivers/media/platform/msm/camera_v2/fd/msm_fd_hw.c
@@ -488,10 +488,13 @@
msm_fd_hw_write_reg(fd, MSM_FD_IOMEM_MISC, MSM_FD_HW_STOP, 1);
- time = wait_for_completion_timeout(&fd->hw_halt_completion,
- msecs_to_jiffies(MSM_FD_HALT_TIMEOUT_MS));
- if (!time)
- dev_err(fd->dev, "Face detection halt timeout\n");
+ if (likely(fd->init)) {
+ time = wait_for_completion_timeout(
+ &fd->hw_halt_completion,
+ msecs_to_jiffies(MSM_FD_HALT_TIMEOUT_MS));
+ if (!time)
+ dev_err(fd->dev, "Face detection halt timeout\n");
+ }
/* Reset sequence after halt */
msm_fd_hw_write_reg(fd, MSM_FD_IOMEM_MISC, MSM_FD_MISC_SW_RESET,
diff --git a/drivers/media/platform/msm/camera_v2/msm.c b/drivers/media/platform/msm/camera_v2/msm.c
index 29b099b..ebf285c 100644
--- a/drivers/media/platform/msm/camera_v2/msm.c
+++ b/drivers/media/platform/msm/camera_v2/msm.c
@@ -218,20 +218,20 @@
static void msm_pm_qos_add_request(void)
{
- pr_info("%s: add request", __func__);
+ pr_info("%s: add request\n", __func__);
pm_qos_add_request(&msm_v4l2_pm_qos_request, PM_QOS_CPU_DMA_LATENCY,
PM_QOS_DEFAULT_VALUE);
}
static void msm_pm_qos_remove_request(void)
{
- pr_info("%s: remove request", __func__);
+ pr_info("%s: remove request\n", __func__);
pm_qos_remove_request(&msm_v4l2_pm_qos_request);
}
void msm_pm_qos_update_request(int val)
{
- pr_info("%s: update request %d", __func__, val);
+ pr_info("%s: update request %d\n", __func__, val);
pm_qos_update_request(&msm_v4l2_pm_qos_request, val);
}
diff --git a/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c b/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c
index 749dd4a..e3fe0f7 100644
--- a/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c
+++ b/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c
@@ -449,10 +449,12 @@
buff->map_info.buff_info = *buffer_info;
buff->map_info.buf_fd = buffer_info->fd;
+#if defined(CONFIG_TRACING) && defined(DEBUG)
trace_printk("fd %d index %d native_buff %d ssid %d %d\n",
buffer_info->fd, buffer_info->index,
buffer_info->native_buff, buff_queue->session_id,
buff_queue->stream_id);
+#endif
if (buff_queue->security_mode == SECURE_MODE)
rc = cam_smmu_get_stage2_phy_addr(cpp_dev->iommu_hdl,
@@ -484,10 +486,12 @@
{
int ret = -1;
+#if defined(CONFIG_TRACING) && defined(DEBUG)
trace_printk("fd %d index %d native_buf %d ssid %d %d\n",
buff->map_info.buf_fd, buff->map_info.buff_info.index,
buff->map_info.buff_info.native_buff, buff_queue->session_id,
buff_queue->stream_id);
+#endif
if (buff_queue->security_mode == SECURE_MODE)
ret = cam_smmu_put_stage2_phy_addr(cpp_dev->iommu_hdl,
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
index fd5c22e..7f11429 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
@@ -49,29 +49,36 @@
/* Macro for constructing the REGDMA command */
#define SDE_REGDMA_WRITE(p, off, data) \
do { \
- *p++ = REGDMA_OP_REGWRITE | \
- ((off) & REGDMA_ADDR_OFFSET_MASK); \
- *p++ = (data); \
+ writel_relaxed(REGDMA_OP_REGWRITE | \
+ ((off) & REGDMA_ADDR_OFFSET_MASK), (p)); \
+ (p)++; \
+ writel_relaxed((data), (p)); \
+ (p)++; \
} while (0)
#define SDE_REGDMA_MODIFY(p, off, mask, data) \
do { \
- *p++ = REGDMA_OP_REGMODIFY | \
- ((off) & REGDMA_ADDR_OFFSET_MASK); \
- *p++ = (mask); \
- *p++ = (data); \
+ writel_relaxed(REGDMA_OP_REGMODIFY | \
+ ((off) & REGDMA_ADDR_OFFSET_MASK), (p)); \
+ (p)++; \
+ writel_relaxed((mask), (p)); \
+ (p)++; \
+ writel_relaxed((data), (p)); \
+ (p)++; \
} while (0)
#define SDE_REGDMA_BLKWRITE_INC(p, off, len) \
do { \
- *p++ = REGDMA_OP_BLKWRITE_INC | \
- ((off) & REGDMA_ADDR_OFFSET_MASK); \
- *p++ = (len); \
+ writel_relaxed(REGDMA_OP_BLKWRITE_INC | \
+ ((off) & REGDMA_ADDR_OFFSET_MASK), (p)); \
+ (p)++; \
+ writel_relaxed((len), (p)); \
+ (p)++; \
} while (0)
#define SDE_REGDMA_BLKWRITE_DATA(p, data) \
do { \
- *(p) = (data); \
+ writel_relaxed((data), (p)); \
(p)++; \
} while (0)
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-core.c b/drivers/media/platform/s5p-jpeg/jpeg-core.c
index 4a608cb..9c6fc09 100644
--- a/drivers/media/platform/s5p-jpeg/jpeg-core.c
+++ b/drivers/media/platform/s5p-jpeg/jpeg-core.c
@@ -1098,10 +1098,10 @@
struct s5p_jpeg_ctx *ctx)
{
int c, components = 0, notfound, n_dht = 0, n_dqt = 0;
- unsigned int height, width, word, subsampling = 0, sos = 0, sof = 0,
- sof_len = 0;
- unsigned int dht[S5P_JPEG_MAX_MARKER], dht_len[S5P_JPEG_MAX_MARKER],
- dqt[S5P_JPEG_MAX_MARKER], dqt_len[S5P_JPEG_MAX_MARKER];
+ unsigned int height = 0, width = 0, word, subsampling = 0;
+ unsigned int sos = 0, sof = 0, sof_len = 0;
+ unsigned int dht[S5P_JPEG_MAX_MARKER], dht_len[S5P_JPEG_MAX_MARKER];
+ unsigned int dqt[S5P_JPEG_MAX_MARKER], dqt_len[S5P_JPEG_MAX_MARKER];
long length;
struct s5p_jpeg_buffer jpeg_buffer;
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c
index c8946f9..7727789 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c
@@ -173,6 +173,7 @@
}
s5p_mfc_clock_on();
ret = s5p_mfc_init_hw(dev);
+ s5p_mfc_clock_off();
if (ret)
mfc_err("Failed to reinit FW\n");
}
diff --git a/drivers/media/rc/imon.c b/drivers/media/rc/imon.c
index 65f80b8..eb9e7fe 100644
--- a/drivers/media/rc/imon.c
+++ b/drivers/media/rc/imon.c
@@ -1629,7 +1629,7 @@
if (kc == KEY_KEYBOARD && !ictx->release_code) {
ictx->last_keycode = kc;
if (!nomouse) {
- ictx->pad_mouse = ~(ictx->pad_mouse) & 0x1;
+ ictx->pad_mouse = !ictx->pad_mouse;
dev_dbg(dev, "toggling to %s mode\n",
ictx->pad_mouse ? "mouse" : "keyboard");
spin_unlock_irqrestore(&ictx->kc_lock, flags);
diff --git a/drivers/media/rc/ir-lirc-codec.c b/drivers/media/rc/ir-lirc-codec.c
index a32659f..efc21b1 100644
--- a/drivers/media/rc/ir-lirc-codec.c
+++ b/drivers/media/rc/ir-lirc-codec.c
@@ -254,7 +254,7 @@
return 0;
case LIRC_GET_REC_RESOLUTION:
- val = dev->rx_resolution;
+ val = dev->rx_resolution / 1000;
break;
case LIRC_SET_WIDEBAND_RECEIVER:
diff --git a/drivers/media/rc/mceusb.c b/drivers/media/rc/mceusb.c
index 2cdb740..f838d9c 100644
--- a/drivers/media/rc/mceusb.c
+++ b/drivers/media/rc/mceusb.c
@@ -1321,8 +1321,8 @@
}
}
}
- if (ep_in == NULL) {
- dev_dbg(&intf->dev, "inbound and/or endpoint not found");
+ if (!ep_in || !ep_out) {
+ dev_dbg(&intf->dev, "required endpoints not found\n");
return -ENODEV;
}
diff --git a/drivers/media/tuners/tuner-xc2028.c b/drivers/media/tuners/tuner-xc2028.c
index 317ef63..8d96a22 100644
--- a/drivers/media/tuners/tuner-xc2028.c
+++ b/drivers/media/tuners/tuner-xc2028.c
@@ -281,6 +281,14 @@
int i;
tuner_dbg("%s called\n", __func__);
+ /* free allocated f/w string */
+ if (priv->fname != firmware_name)
+ kfree(priv->fname);
+ priv->fname = NULL;
+
+ priv->state = XC2028_NO_FIRMWARE;
+ memset(&priv->cur_fw, 0, sizeof(priv->cur_fw));
+
if (!priv->firm)
return;
@@ -291,9 +299,6 @@
priv->firm = NULL;
priv->firm_size = 0;
- priv->state = XC2028_NO_FIRMWARE;
-
- memset(&priv->cur_fw, 0, sizeof(priv->cur_fw));
}
static int load_all_firmwares(struct dvb_frontend *fe,
@@ -884,9 +889,8 @@
return 0;
fail:
- priv->state = XC2028_NO_FIRMWARE;
+ free_firmware(priv);
- memset(&priv->cur_fw, 0, sizeof(priv->cur_fw));
if (retry_count < 8) {
msleep(50);
retry_count++;
@@ -1332,11 +1336,8 @@
mutex_lock(&xc2028_list_mutex);
/* only perform final cleanup if this is the last instance */
- if (hybrid_tuner_report_instance_count(priv) == 1) {
+ if (hybrid_tuner_report_instance_count(priv) == 1)
free_firmware(priv);
- kfree(priv->ctrl.fname);
- priv->ctrl.fname = NULL;
- }
if (priv)
hybrid_tuner_release_state(priv);
@@ -1399,19 +1400,8 @@
/*
* Copy the config data.
- * For the firmware name, keep a local copy of the string,
- * in order to avoid troubles during device release.
*/
- kfree(priv->ctrl.fname);
- priv->ctrl.fname = NULL;
memcpy(&priv->ctrl, p, sizeof(priv->ctrl));
- if (p->fname) {
- priv->ctrl.fname = kstrdup(p->fname, GFP_KERNEL);
- if (priv->ctrl.fname == NULL) {
- rc = -ENOMEM;
- goto unlock;
- }
- }
/*
* If firmware name changed, frees firmware. As free_firmware will
@@ -1426,10 +1416,15 @@
if (priv->state == XC2028_NO_FIRMWARE) {
if (!firmware_name[0])
- priv->fname = priv->ctrl.fname;
+ priv->fname = kstrdup(p->fname, GFP_KERNEL);
else
priv->fname = firmware_name;
+ if (!priv->fname) {
+ rc = -ENOMEM;
+ goto unlock;
+ }
+
rc = request_firmware_nowait(THIS_MODULE, 1,
priv->fname,
priv->i2c_props.adap->dev.parent,
diff --git a/drivers/media/usb/cx231xx/cx231xx-audio.c b/drivers/media/usb/cx231xx/cx231xx-audio.c
index de4ae5e..10d8a08e 100644
--- a/drivers/media/usb/cx231xx/cx231xx-audio.c
+++ b/drivers/media/usb/cx231xx/cx231xx-audio.c
@@ -671,10 +671,8 @@
spin_lock_init(&adev->slock);
err = snd_pcm_new(card, "Cx231xx Audio", 0, 0, 1, &pcm);
- if (err < 0) {
- snd_card_free(card);
- return err;
- }
+ if (err < 0)
+ goto err_free_card;
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE,
&snd_cx231xx_pcm_capture);
@@ -688,10 +686,9 @@
INIT_WORK(&dev->wq_trigger, audio_trigger);
err = snd_card_register(card);
- if (err < 0) {
- snd_card_free(card);
- return err;
- }
+ if (err < 0)
+ goto err_free_card;
+
adev->sndcard = card;
adev->udev = dev->udev;
@@ -701,6 +698,11 @@
hs_config_info[0].interface_info.
audio_index + 1];
+ if (uif->altsetting[0].desc.bNumEndpoints < isoc_pipe + 1) {
+ err = -ENODEV;
+ goto err_free_card;
+ }
+
adev->end_point_addr =
uif->altsetting[0].endpoint[isoc_pipe].desc.
bEndpointAddress;
@@ -710,13 +712,20 @@
"audio EndPoint Addr 0x%x, Alternate settings: %i\n",
adev->end_point_addr, adev->num_alt);
adev->alt_max_pkt_size = kmalloc(32 * adev->num_alt, GFP_KERNEL);
-
- if (adev->alt_max_pkt_size == NULL)
- return -ENOMEM;
+ if (!adev->alt_max_pkt_size) {
+ err = -ENOMEM;
+ goto err_free_card;
+ }
for (i = 0; i < adev->num_alt; i++) {
- u16 tmp =
- le16_to_cpu(uif->altsetting[i].endpoint[isoc_pipe].desc.
+ u16 tmp;
+
+ if (uif->altsetting[i].desc.bNumEndpoints < isoc_pipe + 1) {
+ err = -ENODEV;
+ goto err_free_pkt_size;
+ }
+
+ tmp = le16_to_cpu(uif->altsetting[i].endpoint[isoc_pipe].desc.
wMaxPacketSize);
adev->alt_max_pkt_size[i] =
(tmp & 0x07ff) * (((tmp & 0x1800) >> 11) + 1);
@@ -726,6 +735,13 @@
}
return 0;
+
+err_free_pkt_size:
+ kfree(adev->alt_max_pkt_size);
+err_free_card:
+ snd_card_free(card);
+
+ return err;
}
static int cx231xx_audio_fini(struct cx231xx *dev)
diff --git a/drivers/media/usb/cx231xx/cx231xx-cards.c b/drivers/media/usb/cx231xx/cx231xx-cards.c
index 8389c16..2c5f76d 100644
--- a/drivers/media/usb/cx231xx/cx231xx-cards.c
+++ b/drivers/media/usb/cx231xx/cx231xx-cards.c
@@ -1447,6 +1447,9 @@
uif = udev->actconfig->interface[idx];
+ if (uif->altsetting[0].desc.bNumEndpoints < isoc_pipe + 1)
+ return -ENODEV;
+
dev->video_mode.end_point_addr = uif->altsetting[0].endpoint[isoc_pipe].desc.bEndpointAddress;
dev->video_mode.num_alt = uif->num_altsetting;
@@ -1460,7 +1463,12 @@
return -ENOMEM;
for (i = 0; i < dev->video_mode.num_alt; i++) {
- u16 tmp = le16_to_cpu(uif->altsetting[i].endpoint[isoc_pipe].desc.wMaxPacketSize);
+ u16 tmp;
+
+ if (uif->altsetting[i].desc.bNumEndpoints < isoc_pipe + 1)
+ return -ENODEV;
+
+ tmp = le16_to_cpu(uif->altsetting[i].endpoint[isoc_pipe].desc.wMaxPacketSize);
dev->video_mode.alt_max_pkt_size[i] = (tmp & 0x07ff) * (((tmp & 0x1800) >> 11) + 1);
dev_dbg(dev->dev,
"Alternate setting %i, max size= %i\n", i,
@@ -1477,6 +1485,9 @@
}
uif = udev->actconfig->interface[idx];
+ if (uif->altsetting[0].desc.bNumEndpoints < isoc_pipe + 1)
+ return -ENODEV;
+
dev->vbi_mode.end_point_addr =
uif->altsetting[0].endpoint[isoc_pipe].desc.
bEndpointAddress;
@@ -1493,8 +1504,12 @@
return -ENOMEM;
for (i = 0; i < dev->vbi_mode.num_alt; i++) {
- u16 tmp =
- le16_to_cpu(uif->altsetting[i].endpoint[isoc_pipe].
+ u16 tmp;
+
+ if (uif->altsetting[i].desc.bNumEndpoints < isoc_pipe + 1)
+ return -ENODEV;
+
+ tmp = le16_to_cpu(uif->altsetting[i].endpoint[isoc_pipe].
desc.wMaxPacketSize);
dev->vbi_mode.alt_max_pkt_size[i] =
(tmp & 0x07ff) * (((tmp & 0x1800) >> 11) + 1);
@@ -1514,6 +1529,9 @@
}
uif = udev->actconfig->interface[idx];
+ if (uif->altsetting[0].desc.bNumEndpoints < isoc_pipe + 1)
+ return -ENODEV;
+
dev->sliced_cc_mode.end_point_addr =
uif->altsetting[0].endpoint[isoc_pipe].desc.
bEndpointAddress;
@@ -1528,7 +1546,12 @@
return -ENOMEM;
for (i = 0; i < dev->sliced_cc_mode.num_alt; i++) {
- u16 tmp = le16_to_cpu(uif->altsetting[i].endpoint[isoc_pipe].
+ u16 tmp;
+
+ if (uif->altsetting[i].desc.bNumEndpoints < isoc_pipe + 1)
+ return -ENODEV;
+
+ tmp = le16_to_cpu(uif->altsetting[i].endpoint[isoc_pipe].
desc.wMaxPacketSize);
dev->sliced_cc_mode.alt_max_pkt_size[i] =
(tmp & 0x07ff) * (((tmp & 0x1800) >> 11) + 1);
@@ -1693,6 +1716,11 @@
}
uif = udev->actconfig->interface[idx];
+ if (uif->altsetting[0].desc.bNumEndpoints < isoc_pipe + 1) {
+ retval = -ENODEV;
+ goto err_video_alt;
+ }
+
dev->ts1_mode.end_point_addr =
uif->altsetting[0].endpoint[isoc_pipe].
desc.bEndpointAddress;
@@ -1710,7 +1738,14 @@
}
for (i = 0; i < dev->ts1_mode.num_alt; i++) {
- u16 tmp = le16_to_cpu(uif->altsetting[i].
+ u16 tmp;
+
+ if (uif->altsetting[i].desc.bNumEndpoints < isoc_pipe + 1) {
+ retval = -ENODEV;
+ goto err_video_alt;
+ }
+
+ tmp = le16_to_cpu(uif->altsetting[i].
endpoint[isoc_pipe].desc.
wMaxPacketSize);
dev->ts1_mode.alt_max_pkt_size[i] =
diff --git a/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c b/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c
index f5df9eaba0..9757f35 100644
--- a/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c
+++ b/drivers/media/usb/dvb-usb-v2/dvb_usb_core.c
@@ -1010,8 +1010,8 @@
void dvb_usbv2_disconnect(struct usb_interface *intf)
{
struct dvb_usb_device *d = usb_get_intfdata(intf);
- const char *name = d->name;
- struct device dev = d->udev->dev;
+ const char *devname = kstrdup(dev_name(&d->udev->dev), GFP_KERNEL);
+ const char *drvname = d->name;
dev_dbg(&d->udev->dev, "%s: bInterfaceNumber=%d\n", __func__,
intf->cur_altsetting->desc.bInterfaceNumber);
@@ -1021,8 +1021,9 @@
dvb_usbv2_exit(d);
- dev_info(&dev, "%s: '%s' successfully deinitialized and disconnected\n",
- KBUILD_MODNAME, name);
+ pr_info("%s: '%s:%s' successfully deinitialized and disconnected\n",
+ KBUILD_MODNAME, drvname, devname);
+ kfree(devname);
}
EXPORT_SYMBOL(dvb_usbv2_disconnect);
diff --git a/drivers/media/usb/dvb-usb/dib0700_core.c b/drivers/media/usb/dvb-usb/dib0700_core.c
index ab58f0b..d1b4b72 100644
--- a/drivers/media/usb/dvb-usb/dib0700_core.c
+++ b/drivers/media/usb/dvb-usb/dib0700_core.c
@@ -783,6 +783,9 @@
/* Starting in firmware 1.20, the RC info is provided on a bulk pipe */
+ if (intf->altsetting[0].desc.bNumEndpoints < rc_ep + 1)
+ return -ENODEV;
+
purb = usb_alloc_urb(0, GFP_KERNEL);
if (purb == NULL) {
err("rc usb alloc urb failed");
diff --git a/drivers/media/usb/dvb-usb/dvb-usb-firmware.c b/drivers/media/usb/dvb-usb/dvb-usb-firmware.c
index 733a7ff..caad3b5 100644
--- a/drivers/media/usb/dvb-usb/dvb-usb-firmware.c
+++ b/drivers/media/usb/dvb-usb/dvb-usb-firmware.c
@@ -35,42 +35,51 @@
int usb_cypress_load_firmware(struct usb_device *udev, const struct firmware *fw, int type)
{
- struct hexline hx;
- u8 reset;
- int ret,pos=0;
+ struct hexline *hx;
+ u8 *buf;
+ int ret, pos = 0;
+ u16 cpu_cs_register = cypress[type].cpu_cs_register;
+
+ buf = kmalloc(sizeof(*hx), GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ hx = (struct hexline *)buf;
/* stop the CPU */
- reset = 1;
- if ((ret = usb_cypress_writemem(udev,cypress[type].cpu_cs_register,&reset,1)) != 1)
+ buf[0] = 1;
+ if (usb_cypress_writemem(udev, cpu_cs_register, buf, 1) != 1)
err("could not stop the USB controller CPU.");
- while ((ret = dvb_usb_get_hexline(fw,&hx,&pos)) > 0) {
- deb_fw("writing to address 0x%04x (buffer: 0x%02x %02x)\n",hx.addr,hx.len,hx.chk);
- ret = usb_cypress_writemem(udev,hx.addr,hx.data,hx.len);
+ while ((ret = dvb_usb_get_hexline(fw, hx, &pos)) > 0) {
+ deb_fw("writing to address 0x%04x (buffer: 0x%02x %02x)\n", hx->addr, hx->len, hx->chk);
+ ret = usb_cypress_writemem(udev, hx->addr, hx->data, hx->len);
- if (ret != hx.len) {
+ if (ret != hx->len) {
err("error while transferring firmware "
"(transferred size: %d, block size: %d)",
- ret,hx.len);
+ ret, hx->len);
ret = -EINVAL;
break;
}
}
if (ret < 0) {
err("firmware download failed at %d with %d",pos,ret);
+ kfree(buf);
return ret;
}
if (ret == 0) {
/* restart the CPU */
- reset = 0;
- if (ret || usb_cypress_writemem(udev,cypress[type].cpu_cs_register,&reset,1) != 1) {
+ buf[0] = 0;
+ if (usb_cypress_writemem(udev, cpu_cs_register, buf, 1) != 1) {
err("could not restart the USB controller CPU.");
ret = -EINVAL;
}
} else
ret = -EIO;
+ kfree(buf);
+
return ret;
}
EXPORT_SYMBOL(usb_cypress_load_firmware);
diff --git a/drivers/media/usb/dvb-usb/ttusb2.c b/drivers/media/usb/dvb-usb/ttusb2.c
index f107173..dd93c2c 100644
--- a/drivers/media/usb/dvb-usb/ttusb2.c
+++ b/drivers/media/usb/dvb-usb/ttusb2.c
@@ -78,6 +78,9 @@
u8 *s, *r = NULL;
int ret = 0;
+ if (4 + rlen > 64)
+ return -EIO;
+
s = kzalloc(wlen+4, GFP_KERNEL);
if (!s)
return -ENOMEM;
@@ -381,6 +384,22 @@
write_read = i+1 < num && (msg[i+1].flags & I2C_M_RD);
read = msg[i].flags & I2C_M_RD;
+ if (3 + msg[i].len > sizeof(obuf)) {
+ err("i2c wr len=%d too high", msg[i].len);
+ break;
+ }
+ if (write_read) {
+ if (3 + msg[i+1].len > sizeof(ibuf)) {
+ err("i2c rd len=%d too high", msg[i+1].len);
+ break;
+ }
+ } else if (read) {
+ if (3 + msg[i].len > sizeof(ibuf)) {
+ err("i2c rd len=%d too high", msg[i].len);
+ break;
+ }
+ }
+
obuf[0] = (msg[i].addr << 1) | (write_read | read);
if (read)
obuf[1] = 0;
diff --git a/drivers/media/usb/gspca/konica.c b/drivers/media/usb/gspca/konica.c
index 0712b1b..0f6d57f 100644
--- a/drivers/media/usb/gspca/konica.c
+++ b/drivers/media/usb/gspca/konica.c
@@ -188,6 +188,9 @@
return -EIO;
}
+ if (alt->desc.bNumEndpoints < 2)
+ return -ENODEV;
+
packet_size = le16_to_cpu(alt->endpoint[0].desc.wMaxPacketSize);
n = gspca_dev->cam.cam_mode[gspca_dev->curr_mode].priv;
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-eeprom.c b/drivers/media/usb/pvrusb2/pvrusb2-eeprom.c
index e1907cd..7613d1f 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-eeprom.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-eeprom.c
@@ -123,15 +123,10 @@
memset(&tvdata,0,sizeof(tvdata));
eeprom = pvr2_eeprom_fetch(hdw);
- if (!eeprom) return -EINVAL;
+ if (!eeprom)
+ return -EINVAL;
- {
- struct i2c_client fake_client;
- /* Newer version expects a useless client interface */
- fake_client.addr = hdw->eeprom_addr;
- fake_client.adapter = &hdw->i2c_adap;
- tveeprom_hauppauge_analog(&fake_client,&tvdata,eeprom);
- }
+ tveeprom_hauppauge_analog(NULL, &tvdata, eeprom);
trace_eeprom("eeprom assumed v4l tveeprom module");
trace_eeprom("eeprom direct call results:");
diff --git a/drivers/media/usb/usbvision/usbvision-video.c b/drivers/media/usb/usbvision/usbvision-video.c
index d1dc1a1..91d709e 100644
--- a/drivers/media/usb/usbvision/usbvision-video.c
+++ b/drivers/media/usb/usbvision/usbvision-video.c
@@ -1523,7 +1523,14 @@
}
for (i = 0; i < usbvision->num_alt; i++) {
- u16 tmp = le16_to_cpu(uif->altsetting[i].endpoint[1].desc.
+ u16 tmp;
+
+ if (uif->altsetting[i].desc.bNumEndpoints < 2) {
+ ret = -ENODEV;
+ goto err_pkt;
+ }
+
+ tmp = le16_to_cpu(uif->altsetting[i].endpoint[1].desc.
wMaxPacketSize);
usbvision->alt_max_pkt_size[i] =
(tmp & 0x07ff) * (((tmp & 0x1800) >> 11) + 1);
diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
index 5cefca9..885f689 100644
--- a/drivers/media/usb/uvc/uvc_driver.c
+++ b/drivers/media/usb/uvc/uvc_driver.c
@@ -1595,6 +1595,114 @@
return buffer;
}
+static struct uvc_video_chain *uvc_alloc_chain(struct uvc_device *dev)
+{
+ struct uvc_video_chain *chain;
+
+ chain = kzalloc(sizeof(*chain), GFP_KERNEL);
+ if (chain == NULL)
+ return NULL;
+
+ INIT_LIST_HEAD(&chain->entities);
+ mutex_init(&chain->ctrl_mutex);
+ chain->dev = dev;
+ v4l2_prio_init(&chain->prio);
+
+ return chain;
+}
+
+/*
+ * Fallback heuristic for devices that don't connect units and terminals in a
+ * valid chain.
+ *
+ * Some devices have invalid baSourceID references, causing uvc_scan_chain()
+ * to fail, but if we just take the entities we can find and put them together
+ * in the most sensible chain we can think of, turns out they do work anyway.
+ * Note: This heuristic assumes there is a single chain.
+ *
+ * At the time of writing, devices known to have such a broken chain are
+ * - Acer Integrated Camera (5986:055a)
+ * - Realtek rtl157a7 (0bda:57a7)
+ */
+static int uvc_scan_fallback(struct uvc_device *dev)
+{
+ struct uvc_video_chain *chain;
+ struct uvc_entity *iterm = NULL;
+ struct uvc_entity *oterm = NULL;
+ struct uvc_entity *entity;
+ struct uvc_entity *prev;
+
+ /*
+ * Start by locating the input and output terminals. We only support
+ * devices with exactly one of each for now.
+ */
+ list_for_each_entry(entity, &dev->entities, list) {
+ if (UVC_ENTITY_IS_ITERM(entity)) {
+ if (iterm)
+ return -EINVAL;
+ iterm = entity;
+ }
+
+ if (UVC_ENTITY_IS_OTERM(entity)) {
+ if (oterm)
+ return -EINVAL;
+ oterm = entity;
+ }
+ }
+
+ if (iterm == NULL || oterm == NULL)
+ return -EINVAL;
+
+ /* Allocate the chain and fill it. */
+ chain = uvc_alloc_chain(dev);
+ if (chain == NULL)
+ return -ENOMEM;
+
+ if (uvc_scan_chain_entity(chain, oterm) < 0)
+ goto error;
+
+ prev = oterm;
+
+ /*
+ * Add all Processing and Extension Units with two pads. The order
+ * doesn't matter much, use reverse list traversal to connect units in
+ * UVC descriptor order as we build the chain from output to input. This
+ * leads to units appearing in the order meant by the manufacturer for
+ * the cameras known to require this heuristic.
+ */
+ list_for_each_entry_reverse(entity, &dev->entities, list) {
+ if (entity->type != UVC_VC_PROCESSING_UNIT &&
+ entity->type != UVC_VC_EXTENSION_UNIT)
+ continue;
+
+ if (entity->num_pads != 2)
+ continue;
+
+ if (uvc_scan_chain_entity(chain, entity) < 0)
+ goto error;
+
+ prev->baSourceID[0] = entity->id;
+ prev = entity;
+ }
+
+ if (uvc_scan_chain_entity(chain, iterm) < 0)
+ goto error;
+
+ prev->baSourceID[0] = iterm->id;
+
+ list_add_tail(&chain->list, &dev->chains);
+
+ uvc_trace(UVC_TRACE_PROBE,
+ "Found a video chain by fallback heuristic (%s).\n",
+ uvc_print_chain(chain));
+
+ return 0;
+
+error:
+ kfree(chain);
+ return -EINVAL;
+}
+
/*
* Scan the device for video chains and register video devices.
*
@@ -1617,15 +1725,10 @@
if (term->chain.next || term->chain.prev)
continue;
- chain = kzalloc(sizeof(*chain), GFP_KERNEL);
+ chain = uvc_alloc_chain(dev);
if (chain == NULL)
return -ENOMEM;
- INIT_LIST_HEAD(&chain->entities);
- mutex_init(&chain->ctrl_mutex);
- chain->dev = dev;
- v4l2_prio_init(&chain->prio);
-
term->flags |= UVC_ENTITY_FLAG_DEFAULT;
if (uvc_scan_chain(chain, term) < 0) {
@@ -1639,6 +1742,9 @@
list_add_tail(&chain->list, &dev->chains);
}
+ if (list_empty(&dev->chains))
+ uvc_scan_fallback(dev);
+
if (list_empty(&dev->chains)) {
uvc_printk(KERN_INFO, "No valid video chain found.\n");
return -1;
diff --git a/drivers/media/usb/zr364xx/zr364xx.c b/drivers/media/usb/zr364xx/zr364xx.c
index 7433ba5..fd6a3b3 100644
--- a/drivers/media/usb/zr364xx/zr364xx.c
+++ b/drivers/media/usb/zr364xx/zr364xx.c
@@ -604,6 +604,14 @@
ptr = pdest = frm->lpvbits;
if (frm->ulState == ZR364XX_READ_IDLE) {
+ if (purb->actual_length < 128) {
+ /* header incomplete */
+ dev_info(&cam->udev->dev,
+ "%s: buffer (%d bytes) too small to hold jpeg header. Discarding.\n",
+ __func__, purb->actual_length);
+ return -EINVAL;
+ }
+
frm->ulState = ZR364XX_READ_FRAME;
frm->cur_size = 0;
diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
index 47f3768..3dc9ed2e0 100644
--- a/drivers/media/v4l2-core/videobuf2-core.c
+++ b/drivers/media/v4l2-core/videobuf2-core.c
@@ -793,7 +793,7 @@
*/
void *vb2_plane_vaddr(struct vb2_buffer *vb, unsigned int plane_no)
{
- if (plane_no > vb->num_planes || !vb->planes[plane_no].mem_priv)
+ if (plane_no >= vb->num_planes || !vb->planes[plane_no].mem_priv)
return NULL;
return call_ptr_memop(vb, vaddr, vb->planes[plane_no].mem_priv);
diff --git a/drivers/mfd/omap-usb-tll.c b/drivers/mfd/omap-usb-tll.c
index c30290f..fe51e97 100644
--- a/drivers/mfd/omap-usb-tll.c
+++ b/drivers/mfd/omap-usb-tll.c
@@ -375,8 +375,8 @@
* and use SDR Mode
*/
reg &= ~(OMAP_TLL_CHANNEL_CONF_UTMIAUTOIDLE
- | OMAP_TLL_CHANNEL_CONF_ULPINOBITSTUFF
| OMAP_TLL_CHANNEL_CONF_ULPIDDRMODE);
+ reg |= OMAP_TLL_CHANNEL_CONF_ULPINOBITSTUFF;
} else if (pdata->port_mode[i] ==
OMAP_EHCI_PORT_MODE_HSIC) {
/*
diff --git a/drivers/mfd/wcd934x-regmap.c b/drivers/mfd/wcd934x-regmap.c
index e8ba149..8fa3433 100644
--- a/drivers/mfd/wcd934x-regmap.c
+++ b/drivers/mfd/wcd934x-regmap.c
@@ -1932,13 +1932,11 @@
case WCD934X_ANA_BIAS:
case WCD934X_ANA_BUCK_CTL:
case WCD934X_ANA_RCO:
- case WCD934X_CDC_CLK_RST_CTRL_MCLK_CONTROL:
case WCD934X_CODEC_RPM_CLK_GATE:
case WCD934X_BIAS_VBG_FINE_ADJ:
case WCD934X_CODEC_CPR_SVS_CX_VDD:
case WCD934X_CODEC_CPR_SVS2_CX_VDD:
case WCD934X_CDC_TOP_TOP_CFG1:
- case WCD934X_CDC_CLK_RST_CTRL_FS_CNT_CONTROL:
return true;
}
diff --git a/drivers/mfd/wcd9xxx-core.c b/drivers/mfd/wcd9xxx-core.c
index 65195f5..7224bd6 100644
--- a/drivers/mfd/wcd9xxx-core.c
+++ b/drivers/mfd/wcd9xxx-core.c
@@ -603,7 +603,7 @@
}
-#ifdef CONFIG_DEBUG_FS
+#if defined(CONFIG_DEBUG_FS) && defined(WCD9XXX_CORE_DEBUG)
struct wcd9xxx *debugCodec;
static struct dentry *debugfs_wcd9xxx_dent;
@@ -1392,7 +1392,7 @@
__func__, ret);
goto err_slim_add;
}
-#ifdef CONFIG_DEBUG_FS
+#if defined(CONFIG_DEBUG_FS) && defined(WCD9XXX_CORE_DEBUG)
debugCodec = wcd9xxx;
debugfs_wcd9xxx_dent = debugfs_create_dir
@@ -1437,7 +1437,7 @@
struct wcd9xxx *wcd9xxx;
struct wcd9xxx_pdata *pdata = pdev->dev.platform_data;
-#ifdef CONFIG_DEBUG_FS
+#if defined(CONFIG_DEBUG_FS) && defined(WCD9XXX_CORE_DEBUG)
debugfs_remove_recursive(debugfs_wcd9xxx_dent);
#endif
wcd9xxx = slim_get_devicedata(pdev);
diff --git a/drivers/misc/c2port/c2port-duramar2150.c b/drivers/misc/c2port/c2port-duramar2150.c
index 5484301..3dc61ea 100644
--- a/drivers/misc/c2port/c2port-duramar2150.c
+++ b/drivers/misc/c2port/c2port-duramar2150.c
@@ -129,8 +129,8 @@
duramar2150_c2port_dev = c2port_device_register("uc",
&duramar2150_c2port_ops, NULL);
- if (!duramar2150_c2port_dev) {
- ret = -ENODEV;
+ if (IS_ERR(duramar2150_c2port_dev)) {
+ ret = PTR_ERR(duramar2150_c2port_dev);
goto free_region;
}
diff --git a/drivers/misc/cxl/file.c b/drivers/misc/cxl/file.c
index 783337d..10a0293 100644
--- a/drivers/misc/cxl/file.c
+++ b/drivers/misc/cxl/file.c
@@ -158,11 +158,8 @@
/* Do this outside the status_mutex to avoid a circular dependency with
* the locking in cxl_mmap_fault() */
- if (copy_from_user(&work, uwork,
- sizeof(struct cxl_ioctl_start_work))) {
- rc = -EFAULT;
- goto out;
- }
+ if (copy_from_user(&work, uwork, sizeof(work)))
+ return -EFAULT;
mutex_lock(&ctx->status_mutex);
if (ctx->status != OPENED) {
diff --git a/drivers/misc/enclosure.c b/drivers/misc/enclosure.c
index 65fed71..cc91f7b 100644
--- a/drivers/misc/enclosure.c
+++ b/drivers/misc/enclosure.c
@@ -375,6 +375,7 @@
struct device *dev)
{
struct enclosure_component *cdev;
+ int err;
if (!edev || component >= edev->components)
return -EINVAL;
@@ -384,12 +385,17 @@
if (cdev->dev == dev)
return -EEXIST;
- if (cdev->dev)
+ if (cdev->dev) {
enclosure_remove_links(cdev);
-
- put_device(cdev->dev);
+ put_device(cdev->dev);
+ }
cdev->dev = get_device(dev);
- return enclosure_add_links(cdev);
+ err = enclosure_add_links(cdev);
+ if (err) {
+ put_device(cdev->dev);
+ cdev->dev = NULL;
+ }
+ return err;
}
EXPORT_SYMBOL_GPL(enclosure_add_device);
diff --git a/drivers/misc/google-easel-comm-dma.c b/drivers/misc/google-easel-comm-dma.c
index c8fc749..8941080 100644
--- a/drivers/misc/google-easel-comm-dma.c
+++ b/drivers/misc/google-easel-comm-dma.c
@@ -522,6 +522,13 @@
if (msg_metadata->dma_xfer.xfer_type == EASELCOMM_DMA_XFER_ABORT)
return dma_dir == EASELCOMM_DMA_DIR_TO_CLIENT ? -EIO : 0;
+ /* Sanity check that list is properly terminated */
+ if (easelcomm_hw_verify_scatterlist(&msg_metadata->dma_xfer)) {
+ dev_err(easelcomm_miscdev.this_device,
+ "sg fails verification\n");
+ goto abort;
+ }
+
/* Single-Block or Multi-Block DMA? */
if (msg_metadata->dma_xfer.xfer_type == EASELCOMM_DMA_XFER_SBLK) {
/* Perform the SBLK DMA transfer */
@@ -559,9 +566,10 @@
int ret = 0;
dev_dbg(easelcomm_miscdev.this_device,
- "RECVDMA msg %u:r%llu buf_type=%d dma_buf_fd=%d\n",
+ "RECVDMA msg %u:r%llu buf_type=%d buf_size=%u dma_buf_fd=%d\n",
service->service_id, buf_desc->message_id,
- buf_desc->buf_type, buf_desc->dma_buf_fd);
+ buf_desc->buf_type, buf_desc->buf_size,
+ buf_desc->dma_buf_fd);
msg_metadata =
easelcomm_find_remote_message(service, buf_desc->message_id);
@@ -648,9 +656,10 @@
int ret = 0;
dev_dbg(easelcomm_miscdev.this_device,
- "SENDDMA msg %u:l%llu buf_type=%d dma_buf_fd=%d\n",
+ "SENDDMA msg %u:l%llu buf_type=%d buf_size=%u dma_buf_fd=%d\n",
service->service_id, buf_desc->message_id,
- buf_desc->buf_type, buf_desc->dma_buf_fd);
+ buf_desc->buf_type, buf_desc->buf_size,
+ buf_desc->dma_buf_fd);
msg_metadata =
easelcomm_find_local_message(service, buf_desc->message_id);
diff --git a/drivers/misc/google-easel-comm-hw.c b/drivers/misc/google-easel-comm-hw.c
index 2fd5a53..9c8744f7 100644
--- a/drivers/misc/google-easel-comm-hw.c
+++ b/drivers/misc/google-easel-comm-hw.c
@@ -96,6 +96,11 @@
struct mnh_outb_region outbound;
int ret;
+ /* Command channel size shall not exceed the translation region size */
+ BUILD_BUG_ON_MSG(
+ EASELCOMM_CMD_CHANNEL_SIZE > HW_MNH_PCIE_OUTBOUND_SIZE,
+ "Easelcomm command channel size exceeds translation region");
+
ret = mnh_get_rb_base(&easel_cmdchan_dma_addr);
if (WARN_ON(ret))
return ret;
@@ -483,6 +488,12 @@
}
EXPORT_SYMBOL(easelcomm_hw_build_scatterlist);
+int easelcomm_hw_verify_scatterlist(struct easelcomm_dma_xfer_info *xfer)
+{
+ return mnh_sg_verify(xfer->sg_local, xfer->sg_local_size,
+ xfer->sg_local_localdata);
+}
+
/*
* Return the number of scatter-gather entries in the MNH SG list. Used to
* determine whether both sides require only 1 block and can use single-block
@@ -490,7 +501,10 @@
*/
int easelcomm_hw_scatterlist_block_count(uint32_t scatterlist_size)
{
- return scatterlist_size / sizeof(struct mnh_sg_entry);
+ if (scatterlist_size == 0)
+ return 0;
+ scatterlist_size /= sizeof(struct mnh_sg_entry);
+ return scatterlist_size - 1; /* subtract the terminator entry */
}
EXPORT_SYMBOL(easelcomm_hw_scatterlist_block_count);
diff --git a/drivers/misc/google-easel-comm-private.h b/drivers/misc/google-easel-comm-private.h
index f852a05..1c5c8c3 100644
--- a/drivers/misc/google-easel-comm-private.h
+++ b/drivers/misc/google-easel-comm-private.h
@@ -273,6 +273,10 @@
extern uint64_t easelcomm_hw_scatterlist_sblk_addr(void *sglist);
/* destroy (and unmap pages pinned by) a scatter-gather list */
extern void easelcomm_hw_destroy_scatterlist(void *sglocaldata);
+/* sanity check scatterlists */
+extern int easelcomm_hw_verify_scatterlist(
+ struct easelcomm_dma_xfer_info *xfer);
+
/*
* Easel builds multi-block DMA Linked List from local and remote SG lists.
*
diff --git a/drivers/misc/google-easel-comm.c b/drivers/misc/google-easel-comm.c
index b19ec76..ce50569 100644
--- a/drivers/misc/google-easel-comm.c
+++ b/drivers/misc/google-easel-comm.c
@@ -76,6 +76,10 @@
struct easelcomm_service *service;
};
+/* max delay in msec waiting for remote to wrap command channel */
+#define CMDCHAN_WRAP_DONE_TIMEOUT_MS 500 /* TODO: set to 2000 (b/65052892) */
+/* additional retry times for asking remote to wrap command channel */
+#define CMDCHAN_SEND_WRAP_RETRY_TIMES 3 /* TODO: remove (b/65052892) */
/* max delay in msec waiting for remote to ack link shutdown */
#define LINK_SHUTDOWN_ACK_TIMEOUT 500
/* max delay in msec waiting for remote to return flush done */
@@ -906,7 +910,7 @@
channel_buf_hdr->producer_seqnbr_next) {
struct easelcomm_cmd_header *cmdhdr =
(struct easelcomm_cmd_header *)channel->readp;
- uint32_t saved_cmd_len = READ_ONCE(cmdhdr->command_arg_len);
+ uint32_t saved_cmd_len;
dev_dbg(easelcomm_miscdev.this_device, "cmdchan consumer loop prodseq=%llu consseq=%llu off=%lx\n",
channel_buf_hdr->producer_seqnbr_next,
@@ -933,6 +937,8 @@
continue;
}
+ saved_cmd_len = READ_ONCE(cmdhdr->command_arg_len);
+
dev_dbg(easelcomm_miscdev.this_device,
"cmdchan recv cmd seq=%llu svc=%u cmd=%u len=%u off=%lx\n",
cmdhdr->sequence_nbr, cmdhdr->service_id,
@@ -1088,6 +1094,96 @@
}
/*
+ * Wrap around remote's buffer. If producer does not receive a signal that
+ * consumer has wrapped around within predefined time, it will retry sending
+ * wrap command if enabled.
+ *
+ * Note: this retry mechanism is done by writing the wrap marker to the top
+ * of remote's cmd channel, which could, in the worst case, overwrite the
+ * unconsumed command at the top of cmd channel, causing all unconsumed
+ * commands behind it to be discarded.
+ *
+ * Called with remote channel mutex held.
+ * Caller is responsible for releasing the mutex.
+ *
+ * @channel: the pointer to remote channel
+ *
+ * @Return: 0 - sent wrap and remote completed wrapping
+ * negative - on error
+ */
+static int easelcomm_cmd_channel_send_wrap(
+ struct easelcomm_cmd_channel_remote *channel)
+{
+ int ret = 0;
+ int attempted = 0;
+ long remaining;
+ uint64_t wrap_marker = CMD_BUFFER_WRAP_MARKER;
+
+ /* TODO: remove retry code once b/65052892 is fixed */
+ do {
+ if (attempted > 0) {
+ dev_warn(easelcomm_miscdev.this_device,
+ "%s: retrying after %d attempts",
+ __func__, attempted);
+ }
+ attempted++;
+
+ /* Write the "buffer wrapped" marker in place of sequence # */
+ dev_dbg(easelcomm_miscdev.this_device, "cmdchan producer wrap at off=%llx seq=%llu\n",
+ channel->write_offset, channel->write_seqnbr);
+ ret = easelcomm_hw_remote_write(
+ &wrap_marker, sizeof(wrap_marker),
+ channel->write_offset);
+ if (ret)
+ goto exit;
+
+ /*
+ * Bump the producer seqnbr for the wrap marker (so consumer
+ * knows its there) and send IRQ to remote to consume the new
+ * data.
+ *
+ * Consumer will process entries up to and including the wrap
+ * marker and then send a "wrapped" interrupt that wakes up
+ * the wrap_ready completion.
+ */
+ ret = easelcomm_cmd_channel_bump_producer_seqnbr(channel);
+ if (ret)
+ goto exit;
+ ret = easelcomm_hw_send_data_interrupt();
+ if (ret)
+ goto exit;
+
+ channel->write_offset =
+ (sizeof(struct easelcomm_cmd_channel_header) + 7)
+ & ~0x7;
+ /* Wait for remote to catch up. IRQ from remote wakes this. */
+ remaining = wait_for_completion_interruptible_timeout(
+ &channel->wrap_ready,
+ msecs_to_jiffies(CMDCHAN_WRAP_DONE_TIMEOUT_MS));
+ if (remaining > 0) {
+ ret = 0;
+ break; /* remote did catch up; no need to retry */
+ }
+ if (remaining < 0) {
+ /* waiting was interrupted */
+ dev_err(easelcomm_miscdev.this_device,
+ "remote channel did not catch up: reason interrupted off=%llx seq=%llu\n",
+ channel->write_offset, channel->write_seqnbr);
+ ret = remaining;
+ goto exit;
+ }
+ /* remaining jiffies is 0; timed out */
+ dev_err(easelcomm_miscdev.this_device,
+ "remote channel did not catch up: reason timeout off=%llx seq=%llu\n",
+ channel->write_offset, channel->write_seqnbr);
+ ret = -ETIMEDOUT;
+ } while (attempted <= CMDCHAN_SEND_WRAP_RETRY_TIMES);
+
+exit:
+ return ret;
+}
+
+/*
* Start the process of writing a new command to the remote command channel.
* Wait for channel init'ed and grab the mutex. Wraparound the buffer if
* needed. Write the command header.
@@ -1122,38 +1218,7 @@
if (channel->write_offset + cmdbuf_size >
EASELCOMM_CMD_CHANNEL_SIZE - 8 -
sizeof(struct easelcomm_cmd_channel_header)) {
- uint64_t wrap_marker = CMD_BUFFER_WRAP_MARKER;
-
- /* Write the "buffer wrapped" marker in place of sequence # */
- dev_dbg(easelcomm_miscdev.this_device, "cmdchan producer wrap at off=%llx seq=%llu\n",
- channel->write_offset, channel->write_seqnbr);
- ret = easelcomm_hw_remote_write(
- &wrap_marker, sizeof(wrap_marker),
- channel->write_offset);
- if (ret)
- goto error;
-
- /*
- * Bump the producer seqnbr for the wrap marker (so consumer
- * knows its there) and send IRQ to remote to consume the new
- * data.
- *
- * Consumer will process entries up to and including the wrap
- * marker and then send a "wrapped" interrupt that wakes up
- * the wrap_ready completion.
- */
- ret = easelcomm_cmd_channel_bump_producer_seqnbr(channel);
- if (ret)
- goto error;
- ret = easelcomm_hw_send_data_interrupt();
- if (ret)
- goto error;
-
- channel->write_offset =
- (sizeof(struct easelcomm_cmd_channel_header) + 7)
- & ~0x7;
- /* Wait for remote to catch up. IRQ from remote wakes this. */
- ret = wait_for_completion_interruptible(&channel->wrap_ready);
+ ret = easelcomm_cmd_channel_send_wrap(channel);
if (ret)
goto error;
}
@@ -1288,6 +1353,12 @@
goto out_freedesc;
}
if (msg_desc->message_size > EASELCOMM_MAX_MESSAGE_SIZE) {
+ dev_err(easelcomm_miscdev.this_device,
+ "%s: svc:%u msg size %d is larger than max size %d\n",
+ __func__,
+ service->service_id,
+ msg_desc->message_size,
+ EASELCOMM_MAX_MESSAGE_SIZE);
ret = -EINVAL;
goto out_freedesc;
}
@@ -1442,7 +1513,7 @@
struct easelcomm_kmsg_desc orig_msg_desc;
struct easelcomm_message_metadata *orig_msg_metadata;
struct easelcomm_message_metadata *msg_metadata = NULL;
- int ret;
+ int ret = 0;
bool has_timeout = false;
long remaining = 0;
diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
index a266138..d277419 100644
--- a/drivers/misc/mei/hw-me-regs.h
+++ b/drivers/misc/mei/hw-me-regs.h
@@ -125,6 +125,11 @@
#define MEI_DEV_ID_KBP 0xA2BA /* Kaby Point */
#define MEI_DEV_ID_KBP_2 0xA2BB /* Kaby Point 2 */
+#define MEI_DEV_ID_LBG 0xA1BA /* Lewisburg (SPT) */
+
+#define MEI_DEV_ID_BXT_M 0x1A9A /* Broxton M */
+#define MEI_DEV_ID_APL_I 0x5A9A /* Apollo Lake I */
+
/*
* MEI HW Section
*/
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index 01e2038..adab5bb 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -86,10 +86,14 @@
{MEI_PCI_DEVICE(MEI_DEV_ID_SPT_2, mei_me_pch8_cfg)},
{MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H, mei_me_pch8_sps_cfg)},
{MEI_PCI_DEVICE(MEI_DEV_ID_SPT_H_2, mei_me_pch8_sps_cfg)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_LBG, mei_me_pch8_cfg)},
{MEI_PCI_DEVICE(MEI_DEV_ID_KBP, mei_me_pch8_cfg)},
{MEI_PCI_DEVICE(MEI_DEV_ID_KBP_2, mei_me_pch8_cfg)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_BXT_M, mei_me_pch8_cfg)},
+ {MEI_PCI_DEVICE(MEI_DEV_ID_APL_I, mei_me_pch8_cfg)},
+
/* required last entry */
{0, }
};
diff --git a/drivers/misc/mnh/Kconfig b/drivers/misc/mnh/Kconfig
index 4fe53ae4..311f6d1 100644
--- a/drivers/misc/mnh/Kconfig
+++ b/drivers/misc/mnh/Kconfig
@@ -34,3 +34,24 @@
default "n"
---help---
Include support for multiple MSIs in MNH PCIe Host driver
+
+config MNH_SIG
+ tristate "MNH signing engine"
+ depends on MNH_SM_HOST
+ default "n"
+ ---help---
+ Adds support for authentication of MNH images
+
+config MNH_SIG_FORCE_OTA
+ tristate "MNH signature enforcement - OTA"
+ depends on MNH_SIG
+ default "n"
+ ---help---
+ Enforce firmware signing for images delivered through an OTA
+
+config MNH_SIG_FORCE_PS
+ tristate "MNH signature enforcement - Play Store"
+ depends on MNH_SIG
+ default "y"
+ ---help---
+ Enforce firmware signing for images delivered through the Play Store
diff --git a/drivers/misc/mnh/Makefile b/drivers/misc/mnh/Makefile
index 4c95a90..29d20958 100644
--- a/drivers/misc/mnh/Makefile
+++ b/drivers/misc/mnh/Makefile
@@ -5,3 +5,4 @@
obj-$(CONFIG_MNH_PCIE_HOST) += mnh-pcie.o
obj-$(CONFIG_MNH_SM_HOST) += mnh-sm.o mnh-sm-ion.o mnh-pwr.o mnh-mipi.o mnh-ddr.o mnh-clk.o
obj-$(CONFIG_MNH_THERMAL_HOST) += mnh-thermal.o mnh-efuse.o
+obj-$(CONFIG_MNH_SIG) += mnh-crypto.o mnh-crypto-sysfs.o
diff --git a/drivers/misc/mnh/hw-mnh-regs.h b/drivers/misc/mnh/hw-mnh-regs.h
index 28ec432..74733f2 100644
--- a/drivers/misc/mnh/hw-mnh-regs.h
+++ b/drivers/misc/mnh/hw-mnh-regs.h
@@ -17,7 +17,6 @@
#ifndef __HW_MNH_PCIE_REGS_H_
#define __HW_MNH_PCIE_REGS_H_
-
#define HW_MNH_PCIE_CONFIG_ADDR_START 0x00200000
#define HW_MNH_PCIE_CONFIG_ADDR_END 0x00200FFF
#define HW_MNH_PCIE_BAR_2_ADDR_START 0x00000000
@@ -27,7 +26,9 @@
#define HW_MNH_PCIE_CLUSTER_ADDR_START 0x040C0000
#define HW_MNH_PCIE_CLUSTER_ADDR_END 0x040C0FFF
#define HW_MNH_PCIE_OUTBOUND_BASE 0x80000000
-#define HW_MNH_PCIE_OUTBOUND_END 0xFFFFFFFF
+#define HW_MNH_PCIE_OUTBOUND_SIZE 0x00019000 /* 100kB */
+#define HW_MNH_PCIE_OUTBOUND_END \
+ (HW_MNH_PCIE_OUTBOUND_BASE + HW_MNH_PCIE_OUTBOUND_SIZE - 1)
#define HW_MNH_PCIE_BAR2_R1_ADDR_START 0x00000000 /* Start of ROM */
#define HW_MNH_PCIE_BAR2_R1_ADDR_END 0x0011FFFF /* End of SRAM */
@@ -62,6 +63,7 @@
#define IATU_REGION_CTRL_2 0x908
#define IATU_ENABLE 0x80000000
+#define IATU_DMA_BYPASS 0x08000000
#define IATU_OUTBOUND 0x0
#define IATU_INBOUND 0x80000000
#define IATU_BAR_MODE 0xC0000000
diff --git a/drivers/misc/mnh/mnh-crypto-sysfs.c b/drivers/misc/mnh/mnh-crypto-sysfs.c
new file mode 100644
index 0000000..2d31da0
--- /dev/null
+++ b/drivers/misc/mnh/mnh-crypto-sysfs.c
@@ -0,0 +1,144 @@
+/*
+ * MNH Firmware for firmware authentication
+ *
+ * Copyright 2017 Google Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/device.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include "mnh-crypto.h"
+
+#define MISC_DEVICE_NAME "mnh_crypto"
+#define KEY_ID "Easel: 2d9cb8fb66a52266cb3b00b3e3db335fadf908e4"
+
+static ssize_t verify_img_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count);
+
+static ssize_t sig_key_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count);
+
+static ssize_t getkey_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf);
+
+static void mnh_crypto_decode_error(struct device *dev, int err);
+
+/* Device attributes */
+DEVICE_ATTR_RO(getkey);
+DEVICE_ATTR_WO(verify_img);
+DEVICE_ATTR_WO(sig_key);
+
+static struct attribute *mnh_crypto_attrs[] = {
+ &dev_attr_getkey.attr,
+ &dev_attr_verify_img.attr,
+ &dev_attr_sig_key.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(mnh_crypto);
+
+static struct miscdevice mnh_crypto_miscdevice = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = MISC_DEVICE_NAME,
+ .groups = mnh_crypto_groups,
+};
+
+/* echo -n "easel/Image">verify_img */
+static ssize_t verify_img_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int err;
+
+ err = mnh_crypto_verify_fw(dev, buf);
+ if (!err) {
+ dev_info(dev, "%s: Signature verfied OK\n", __func__);
+ return count;
+ }
+
+ mnh_crypto_decode_error(dev, err);
+ return err;
+}
+
+/* echo -n "Easel: 2d9cb8fb66a52266cb3b00b3e3db335fadf908e4">sig_key */
+static ssize_t sig_key_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int err;
+
+ err = mnh_crypto_check_key(dev, buf);
+ if (err > 0) {
+ dev_info(dev, "%s: Key found, ID:%x\n", __func__, err);
+ return count;
+ }
+
+ mnh_crypto_decode_error(dev, err);
+ return err;
+}
+
+/* test function */
+static ssize_t getkey_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ int err;
+ const char *keydesc = KEY_ID;
+
+ dev_dbg(dev, "Entering getkey_show...\n");
+ err = mnh_crypto_check_key(dev, keydesc);
+ if (err > 0)
+ dev_info(dev, "%s: Key found, ID:%x\n", __func__, err);
+ else
+ mnh_crypto_decode_error(dev, err);
+
+ return err;
+}
+
+/* create sysfs group */
+int mnh_crypto_config_sysfs(void)
+{
+ int err;
+ struct device *dev;
+
+ err = misc_register(&mnh_crypto_miscdevice);
+ if (err) {
+ pr_err("%s: failed to create misc device (%d)\n",
+ __func__, err);
+ return err;
+ }
+ dev = mnh_crypto_miscdevice.this_device;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mnh_crypto_config_sysfs);
+
+static void mnh_crypto_decode_error(struct device *dev, int err)
+{
+ /* Decode return code information */
+ if (err == -ENOKEY)
+ dev_err(dev, "%s: Required key not available\n", __func__);
+ else if (err == -EKEYEXPIRED)
+ dev_err(dev, "%s: Key has expired\n", __func__);
+ else if (err == -EKEYREVOKED)
+ dev_err(dev, "%s: Key has been revoked\n", __func__);
+ else if (err == -EKEYREJECTED)
+ dev_err(dev, "%s: Key was rejected by service\n", __func__);
+ else if (err == -ENOMEM)
+ dev_err(dev, "%s: Out of memory\n", __func__);
+ else if (err == -ECANCELED)
+ dev_err(dev, "%s: Operation canceled\n", __func__);
+ else
+ dev_err(dev, "%s: Unknown error code %d\n", __func__, err);
+}
diff --git a/drivers/misc/mnh/mnh-crypto.c b/drivers/misc/mnh/mnh-crypto.c
new file mode 100644
index 0000000..91c4290
--- /dev/null
+++ b/drivers/misc/mnh/mnh-crypto.c
@@ -0,0 +1,274 @@
+/*
+ * MNH Firmware for firmware authentication
+ *
+ * Copyright 2017 Google Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <crypto/pkcs7.h>
+#include <crypto/public_key.h>
+#include <keys/asymmetric-type.h>
+#include <keys/system_keyring.h>
+#include <linux/device.h>
+#include <linux/firmware.h>
+#include <linux/kernel.h>
+#include <linux/key.h>
+#include <linux/slab.h>
+#include "mnh-crypto.h"
+#include "mnh-sm.h"
+
+#define ESL_SIG_STRING "~Module signature appended~\n"
+
+/**
+ * Verify signature of a firmware blob in the filesystem
+ * @dev: Device node
+ * @path: path of the firmware file to be verified
+ * RETURNS:
+ * 0 success
+ * -EBADMSG PKCS#7 signature with non-detached data
+ * -ECANCELED invalid PKCS#7 message or signature block is missing
+ * -EKEYREJECTED key had a usage restriction, or signature failed to match
+ * -ENOPKG crypto module could not be found
+ * -ENOKEY PKCS#7 signature not signed with a trusted key
+*/
+int mnh_crypto_verify_fw(struct device *dev, const char *path)
+{
+ struct cert_info *info;
+ const struct firmware *img;
+ int err = 0;
+
+ /* load firmware from filesystem */
+ dev_dbg(dev, "%s loading firmware: %s\n", __func__, path);
+ err = request_firmware(&img, path, dev);
+ if (err) {
+ dev_err(dev, "%s request firmware failed - %d\n",
+ __func__, err);
+ return err;
+ }
+ dev_dbg(dev, "%s firmware loaded\n", __func__);
+
+ info = kzalloc(sizeof(struct cert_info),
+ GFP_KERNEL);
+ if (info == NULL) {
+ release_firmware(img);
+ return -ENOMEM;
+ }
+
+ info->img = img->data;
+ info->img_len = img->size;
+ err = mnh_crypto_verify(dev, info);
+
+ kfree(info);
+ release_firmware(img);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(mnh_crypto_verify_fw);
+
+/**
+ * Check proper framing of image, signature, header and end-marker
+ * and perform signature check.
+ * @dev: Device node
+ * @info: data structure including buffer address and size
+ * RETURNS:
+ * 0 success
+ * -EBADMSG PKCS#7 signature with non-detached data
+ * -ECANCELED invalid PKCS#7 message or signature block is missing
+ * -EKEYREJECTED key had a usage restriction, or signature failed to match
+ * -ENOPKG crypto module could not be found
+ * -ENOKEY PKCS#7 signature not signed with a trusted key
+ */
+int mnh_crypto_verify(struct device *dev, struct cert_info *info)
+{
+ int err = -ENOKEY;
+ const unsigned long marker_len = sizeof(ESL_SIG_STRING) - 1;
+ const void *img = info->img;
+
+ dev_dbg(dev, "%s starting signature check\n", __func__);
+ /* check if signature string is present at the end of the image */
+ dev_dbg(dev, "%s - buffer addr: %pP | buffer size: %lu\n", __func__,
+ info->img, info->img_len);
+ if (memcmp(img + info->img_len - marker_len,
+ ESL_SIG_STRING, marker_len) == 0) {
+ dev_dbg(dev, "%s ESL_SIG_STRING found, proceeding\n",
+ __func__);
+ info->img_len -= marker_len;
+ /* verify the signature against the available keys */
+ err = mnh_crypto_verify_sig(dev, info);
+ info->cert_size += marker_len;
+ if (!err) {
+ info->cert = FW_IMAGE_CERT_OK;
+ dev_dbg(dev, "%s Image signature verified OK\n",
+ __func__);
+ } else {
+ dev_err(dev, "%s Image signature error: %d\n",
+ __func__, err);
+ info->cert = FW_IMAGE_TAINTED;
+ }
+ } else {
+ info->cert = FW_IMAGE_NO_CERT;
+ dev_err(dev, "%s ESL_SIG_STRING not found, aborting\n",
+ __func__);
+ err = -ECANCELED;
+ }
+ return err;
+}
+EXPORT_SYMBOL_GPL(mnh_crypto_verify);
+
+/**
+ * Retrieves the serial number of a key in the keychain
+ * @dev: device node
+ * @keydesc: key descriptor
+ * Positive return values indicate success in which case the return value
+ * is the serial of the specified key.
+ */
+key_serial_t mnh_crypto_check_key(struct device *dev, const char *keydesc)
+{
+ key_ref_t key_ref;
+ struct key *key;
+
+ key_ref = make_key_ref(system_trusted_keyring, 1);
+ if (IS_ERR(key_ref)) {
+ dev_err(dev,
+ "mnh %s Request for system_trusted_keyring err %ld\n",
+ __func__, PTR_ERR(key_ref));
+ return -ENOKEY;
+ }
+ dev_dbg(dev, "mnh %s received system_trusted_keyring\n", __func__);
+ key = key_ref_to_ptr(key_ref);
+ dev_dbg(dev, "mnh %s key chain serial: %d\n",
+ __func__, key->serial);
+
+ key_ref = keyring_search(make_key_ref(system_trusted_keyring, 1),
+ &key_type_asymmetric, keydesc);
+ if (IS_ERR(key_ref)) {
+ dev_err(dev, "mnh %s Request for key '%s' err %ld\n",
+ __func__, keydesc, PTR_ERR(key_ref));
+ return -ENOKEY;
+ }
+
+ key = key_ref_to_ptr(key_ref);
+ dev_dbg(dev, "mnh %s key serial: %d\n", __func__, key->serial);
+ return key->serial;
+}
+EXPORT_SYMBOL_GPL(mnh_crypto_check_key);
+
+/*
+ * mnh_crypto_verify_sig checks the validity of the image's signature type
+ * and validates the signature of the image against the available public
+ * keys in the system keychain. Function populates info->cert & ->cert_size.
+ * RETURNS:
+ * 0 success
+ * -EBADMSG PKCS#7 signature with non-detached data
+ * -ECANCELED invalid PKCS#7 message
+ * -EKEYREJECTED key had a usage restriction, or signature failed to match
+ * -ENOPKG crypto module could not be found
+ * -ENOKEY PKCS#7 signature not signed with a trusted key
+ */
+int mnh_crypto_verify_sig(struct device *dev, struct cert_info *info)
+{
+ struct esl_img_signature esl_sig;
+ size_t sig_len;
+ size_t imglen = info->img_len;
+ const void *esl_img = info->img;
+
+ dev_dbg(dev, "mnh %s image size %zu\n", __func__, imglen);
+ if (imglen <= sizeof(esl_sig))
+ return -ECANCELED;
+ /* this structure is appended at the end of the file */
+ memcpy(&esl_sig, esl_img + (imglen - sizeof(esl_sig)),
+ sizeof(esl_sig));
+ /* strip out the esl_img_signature structure at the end */
+ imglen -= sizeof(esl_sig);
+ /* length of the actual signature block */
+ sig_len = be32_to_cpu(esl_sig.sig_len);
+ /* check signature type */
+ if (esl_sig.id_type != PKEY_ID_PKCS7) {
+ dev_err(dev, "mnh %s Image not signed with PKCS#7 message\n",
+ __func__);
+ return -ECANCELED;
+ }
+ /* strip out the signature, all that is left is the payload */
+ if (imglen <= sig_len)
+ return -ECANCELED;
+ imglen -= sig_len;
+ dev_dbg(dev, "mnh %s sizes: payload %zu | signature %zu | ident %zu\n",
+ __func__, imglen, sig_len, sizeof(esl_sig));
+
+ /* total size of signature and header information */
+ info->cert_size = info->img_len - imglen;
+
+ return mnh_crypto_verify_pkcs7_sig(dev, esl_img, imglen,
+ esl_img + imglen, sig_len);
+}
+
+/*
+ * Verify a PKCS#7-based signature on data.
+ * @data: The data to be verified.
+ * @len: Size of data.
+ * @raw_pkcs7: The PKCS#7 message that is the signature.
+ * @pkcs7_len: The size of @raw_pkcs7.
+ *
+ * RETURNS:
+ * -EBADMSG data already supplied in message
+ * -ENOKEY if no match for any of the signature chains in the message was
+ * found
+ * -EKEYREJECTED if a key was selected that had a usage restriction at
+ * odds with the specified usage
+ * -EKEYREJECTED if a key was selected that had a usage restriction at
+ * odds with the specified usage or a signature failed to match for
+ * which we found an appropriate X.509 certificate
+ * -EBADMSG if some part of the message was invalid
+ * -ENOPKG if none of the signature chains are verifiable because suitable
+ * crypto modules couldn't be found
+ */
+int mnh_crypto_verify_pkcs7_sig(struct device *dev, const void *data,
+ unsigned long len, const void *raw_pkcs7,
+ size_t pkcs7_len)
+{
+ struct pkcs7_message *pkcs7;
+ bool trusted = false;
+ int ret;
+
+ /* parse binary ASN.1 encoded message */
+ pkcs7 = pkcs7_parse_message(raw_pkcs7, pkcs7_len);
+ if (IS_ERR(pkcs7))
+ return PTR_ERR(pkcs7);
+ /* The data should be detached - so we need to supply it. */
+ if (pkcs7_supply_detached_data(pkcs7, data, len) < 0) {
+ dev_err(dev, "PKCS#7 signature with non-detached data\n");
+ ret = -EBADMSG;
+ goto error;
+ }
+ /*
+ * verify that the data digest matches the digest in the AuthAttrs
+ * and any signature in the message or one of the X.509 certificates
+ * it carries that matches another X.509 cert in the message can be
+ * verified
+ */
+ ret = pkcs7_verify(pkcs7, VERIFYING_MODULE_SIGNATURE);
+ if (ret < 0)
+ goto error;
+ ret = pkcs7_validate_trust(pkcs7, system_trusted_keyring, &trusted);
+ if (ret < 0)
+ goto error;
+ if (!trusted) {
+ dev_err(dev,
+ "PKCS#7 signature not signed with a trusted key\n");
+ ret = -ENOKEY;
+ }
+ pkcs7_free_message(pkcs7);
+ return ret;
+error:
+ pkcs7_free_message(pkcs7);
+ dev_err(dev, "%s: error code: %d\n", __func__, ret);
+ return ret;
+}
diff --git a/drivers/misc/mnh/mnh-crypto.h b/drivers/misc/mnh/mnh-crypto.h
new file mode 100644
index 0000000..8722f08
--- /dev/null
+++ b/drivers/misc/mnh/mnh-crypto.h
@@ -0,0 +1,64 @@
+/*
+ * MNH Firmware for firmware authentication
+ *
+ * Copyright 2017 Google Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#ifndef __MNH_CRYPTO
+#define __MNH_CRYPTO
+
+/*
+ * Signed firmware image blob as generated by signing tool
+ * +--------------+------------+-------------+--------------+
+ * | firmware | signature | esl_img_ | ESL_SIG |
+ * | image | | signature | _STRING |
+ * +--------------+------------+-------------+--------------+
+ * <- cert_info.img_len ->
+ * <- cert_info.cert_size ->
+ */
+struct esl_img_signature {
+ __u8 algo; /* Public key crypto algorithm */
+ __u8 hash; /* Digest algorithm */
+ __u8 id_type; /* Key identifier type */
+ __u8 signer_len; /* Length of signer's name */
+ __u8 key_id_len; /* Length of key identifier */
+ __u8 __pad[3]; /* */
+ __be32 sig_len; /* Length of the signature data */
+};
+
+enum cert_state {
+ FW_IMAGE_TAINTED = 0, /* initialization value */
+ FW_IMAGE_NO_CERT, /* firmware has no valid certificate */
+ FW_IMAGE_CERT_OK, /* firmware's signature verified correct */
+};
+
+struct cert_info {
+ const __u8 *img; /* pointer to the image file in memory */
+ unsigned long img_len; /* length of the image file */
+ enum cert_state cert; /* signature checked out */
+ size_t cert_size; /* length of the appended signature */
+};
+
+#if IS_ENABLED(CONFIG_MNH_SIG)
+int mnh_crypto_config_sysfs(void);
+#else
+static inline int mnh_crypto_config_sysfs(void) { return 0; }
+#endif
+int mnh_crypto_verify_fw(struct device *dev, const char *path);
+int mnh_crypto_verify(struct device *dev, struct cert_info *info);
+int mnh_crypto_check_key(struct device *dev, const char *keydesc);
+int mnh_crypto_verify_sig(struct device *dev, struct cert_info *info);
+int mnh_crypto_verify_pkcs7_sig(struct device *dev,
+ const void *data, unsigned long len,
+ const void *raw_pkcs7, size_t pkcs7_len);
+
+#endif /* __MNH_CRYPTO */
diff --git a/drivers/misc/mnh/mnh-ddr-33-100-400-600.h b/drivers/misc/mnh/mnh-ddr-33-100-400-600.h
new file mode 100644
index 0000000..264e2f5
--- /dev/null
+++ b/drivers/misc/mnh/mnh-ddr-33-100-400-600.h
@@ -0,0 +1,1920 @@
+/* regconfig tool: v1.4 */
+#ifndef _MNH_DDR_33_100_400_600_
+#define _MNH_DDR_33_100_400_600_
+#include "mnh-ddr.h"
+struct mnh_ddr_reg_config mnh_ddr_33_100_400_600 = {
+ {
+ 0x131b007d /* fsp 0 */,
+ 0x1311007d /* fsp 1 */,
+ 0x0422007d /* fsp 2 */,
+ 0x0311007d /* fsp 3 */
+ },
+ {
+ 0x00000b00 /* ctl 0 */,
+ 0x00000000 /* ctl 1 */,
+ 0x00000000 /* ctl 2 */,
+ 0x00000000 /* ctl 3 */,
+ 0x00000000 /* ctl 4 */,
+ 0x00000000 /* ctl 5 */,
+ 0x00003416 /* ctl 6 */,
+ 0x000208d6 /* ctl 7 */,
+ 0x00000005 /* ctl 8 */,
+ 0x00000086 /* ctl 9 */,
+ 0x00009c40 /* ctl 10 */,
+ 0x00061a80 /* ctl 11 */,
+ 0x00000005 /* ctl 12 */,
+ 0x00000190 /* ctl 13 */,
+ 0x00027100 /* ctl 14 */,
+ 0x00186a00 /* ctl 15 */,
+ 0x00000005 /* ctl 16 */,
+ 0x00000640 /* ctl 17 */,
+ 0x0003a9e1 /* ctl 18 */,
+ 0x0024a2c1 /* ctl 19 */,
+ 0x00000005 /* ctl 20 */,
+ 0x01000961 /* ctl 21 */,
+ 0x01010000 /* ctl 22 */,
+ 0x00030100 /* ctl 23 */,
+ 0x03020100 /* ctl 24 */,
+ 0x0000000e /* ctl 25 */,
+ 0x00000022 /* ctl 26 */,
+ 0x040c040c /* ctl 27 */,
+ 0x0c30081c /* ctl 28 */,
+ 0x07040804 /* ctl 29 */,
+ 0x03040903 /* ctl 30 */,
+ 0x09090d04 /* ctl 31 */,
+ 0x31080804 /* ctl 32 */,
+ 0x200f0922 /* ctl 33 */,
+ 0x0e33490d /* ctl 34 */,
+ 0x08203116 /* ctl 35 */,
+ 0x00000a0a /* ctl 36 */,
+ 0x04001248 /* ctl 37 */,
+ 0x0a0a0804 /* ctl 38 */,
+ 0x040036d8 /* ctl 39 */,
+ 0x0a0a0804 /* ctl 40 */,
+ 0x0600db60 /* ctl 41 */,
+ 0x0d0d0a06 /* ctl 42 */,
+ 0x0a014931 /* ctl 43 */,
+ 0x0203040a /* ctl 44 */,
+ 0x04040400 /* ctl 45 */,
+ 0x16100f05 /* ctl 46 */,
+ 0x000a0817 /* ctl 47 */,
+ 0x0a140011 /* ctl 48 */,
+ 0x0201010a /* ctl 49 */,
+ 0x00010002 /* ctl 50 */,
+ 0x2d1f0908 /* ctl 51 */,
+ 0x11050404 /* ctl 52 */,
+ 0x0101001a /* ctl 53 */,
+ 0x01000000 /* ctl 54 */,
+ 0x00000903 /* ctl 55 */,
+ 0x001a003f /* ctl 56 */,
+ 0x006800c1 /* ctl 57 */,
+ 0x009d030a /* ctl 58 */,
+ 0x00000490 /* ctl 59 */,
+ 0x00030003 /* ctl 60 */,
+ 0x000a0006 /* ctl 61 */,
+ 0x23180a0a /* ctl 62 */,
+ 0x0a000001 /* ctl 63 */,
+ 0x00010103 /* ctl 64 */,
+ 0x01030a00 /* ctl 65 */,
+ 0x0c000002 /* ctl 66 */,
+ 0x00030103 /* ctl 67 */,
+ 0x01031100 /* ctl 68 */,
+ 0x000a0003 /* ctl 69 */,
+ 0x001c0003 /* ctl 70 */,
+ 0x006e0006 /* ctl 71 */,
+ 0x00a6000a /* ctl 72 */,
+ 0x03050505 /* ctl 73 */,
+ 0x03010302 /* ctl 74 */,
+ 0x03050505 /* ctl 75 */,
+ 0x03010302 /* ctl 76 */,
+ 0x0c050605 /* ctl 77 */,
+ 0x03020602 /* ctl 78 */,
+ 0x13070a07 /* ctl 79 */,
+ 0x03030a02 /* ctl 80 */,
+ 0x03010000 /* ctl 81 */,
+ 0x00010000 /* ctl 82 */,
+ 0x00000000 /* ctl 83 */,
+ 0x00000000 /* ctl 84 */,
+ 0x00000000 /* ctl 85 */,
+ 0x01000000 /* ctl 86 */,
+ 0x80104002 /* ctl 87 */,
+ 0x00040003 /* ctl 88 */,
+ 0x00040005 /* ctl 89 */,
+ 0x00030000 /* ctl 90 */,
+ 0x00050004 /* ctl 91 */,
+ 0x00000004 /* ctl 92 */,
+ 0x00040003 /* ctl 93 */,
+ 0x00040005 /* ctl 94 */,
+ 0x00030000 /* ctl 95 */,
+ 0x00050004 /* ctl 96 */,
+ 0x00000004 /* ctl 97 */,
+ 0x01f803f0 /* ctl 98 */,
+ 0x0c100000 /* ctl 99 */,
+ 0x00000608 /* ctl 100 */,
+ 0x185030a0 /* ctl 101 */,
+ 0x49040000 /* ctl 102 */,
+ 0x00002482 /* ctl 103 */,
+ 0x00000000 /* ctl 104 */,
+ 0x00000000 /* ctl 105 */,
+ 0x00000000 /* ctl 106 */,
+ 0x00000000 /* ctl 107 */,
+ 0x00000000 /* ctl 108 */,
+ 0x00000000 /* ctl 109 */,
+ 0x03000000 /* ctl 110 */,
+ 0x06030303 /* ctl 111 */,
+ 0x00030a03 /* ctl 112 */,
+ 0x02030200 /* ctl 113 */,
+ 0x00070703 /* ctl 114 */,
+ 0x03020302 /* ctl 115 */,
+ 0x02000707 /* ctl 116 */,
+ 0x07030203 /* ctl 117 */,
+ 0x03020007 /* ctl 118 */,
+ 0x07070302 /* ctl 119 */,
+ 0x0000030f /* ctl 120 */,
+ 0x00070004 /* ctl 121 */,
+ 0x00000000 /* ctl 122 */,
+ 0x00000000 /* ctl 123 */,
+ 0x00000000 /* ctl 124 */,
+ 0x00000000 /* ctl 125 */,
+ 0x00000000 /* ctl 126 */,
+ 0x00000000 /* ctl 127 */,
+ 0x00000000 /* ctl 128 */,
+ 0x00000000 /* ctl 129 */,
+ 0x0400c001 /* ctl 130 */,
+ 0xc00400c0 /* ctl 131 */,
+ 0x00c00400 /* ctl 132 */,
+ 0x01000400 /* ctl 133 */,
+ 0x00000c00 /* ctl 134 */,
+ 0x00000000 /* ctl 135 */,
+ 0x00000001 /* ctl 136 */,
+ 0x00000002 /* ctl 137 */,
+ 0x00000003 /* ctl 138 */,
+ 0x0000100e /* ctl 139 */,
+ 0x00000000 /* ctl 140 */,
+ 0x00000000 /* ctl 141 */,
+ 0x00000000 /* ctl 142 */,
+ 0x00000000 /* ctl 143 */,
+ 0x000e0000 /* ctl 144 */,
+ 0x00110007 /* ctl 145 */,
+ 0x00110404 /* ctl 146 */,
+ 0x00140028 /* ctl 147 */,
+ 0x04040032 /* ctl 148 */,
+ 0x00a00032 /* ctl 149 */,
+ 0x00c80050 /* ctl 150 */,
+ 0x00c80606 /* ctl 151 */,
+ 0x007900f1 /* ctl 152 */,
+ 0x0a0a012d /* ctl 153 */,
+ 0x0000012d /* ctl 154 */,
+ 0x00000000 /* ctl 155 */,
+ 0x00000000 /* ctl 156 */,
+ 0x00000000 /* ctl 157 */,
+ 0x00060000 /* ctl 158 */,
+ 0x12260006 /* ctl 159 */,
+ 0x31002446 /* ctl 160 */,
+ 0x00313131 /* ctl 161 */,
+ 0x00000000 /* ctl 162 */,
+ 0x4d4d4d4d /* ctl 163 */,
+ 0x4d4d4dc0 /* ctl 164 */,
+ 0x0000004d /* ctl 165 */,
+ 0x00000000 /* ctl 166 */,
+ 0x06060606 /* ctl 167 */,
+ 0x01000000 /* ctl 168 */,
+ 0x00000001 /* ctl 169 */,
+ 0x00000000 /* ctl 170 */,
+ 0x01000000 /* ctl 171 */,
+ 0x00000001 /* ctl 172 */,
+ 0x00000000 /* ctl 173 */,
+ 0x00000000 /* ctl 174 */,
+ 0x00000000 /* ctl 175 */,
+ 0x00000000 /* ctl 176 */,
+ 0x00000000 /* ctl 177 */,
+ 0x00000000 /* ctl 178 */,
+ 0x00000000 /* ctl 179 */,
+ 0x00000000 /* ctl 180 */,
+ 0x00000000 /* ctl 181 */,
+ 0x00000000 /* ctl 182 */,
+ 0x11000000 /* ctl 183 */,
+ 0x000c1815 /* ctl 184 */,
+ 0x00000000 /* ctl 185 */,
+ 0x00000000 /* ctl 186 */,
+ 0x00000000 /* ctl 187 */,
+ 0x00000000 /* ctl 188 */,
+ 0x00000000 /* ctl 189 */,
+ 0x00000000 /* ctl 190 */,
+ 0x00000000 /* ctl 191 */,
+ 0x00000000 /* ctl 192 */,
+ 0x00000000 /* ctl 193 */,
+ 0x00000000 /* ctl 194 */,
+ 0x00000000 /* ctl 195 */,
+ 0x00000000 /* ctl 196 */,
+ 0x00000000 /* ctl 197 */,
+ 0x00000000 /* ctl 198 */,
+ 0x00000000 /* ctl 199 */,
+ 0x00000000 /* ctl 200 */,
+ 0x00000000 /* ctl 201 */,
+ 0x00000000 /* ctl 202 */,
+ 0x00020003 /* ctl 203 */,
+ 0x00400100 /* ctl 204 */,
+ 0x00080043 /* ctl 205 */,
+ 0x01000200 /* ctl 206 */,
+ 0x00c80040 /* ctl 207 */,
+ 0x00020008 /* ctl 208 */,
+ 0x00400100 /* ctl 209 */,
+ 0x00180320 /* ctl 210 */,
+ 0x01000200 /* ctl 211 */,
+ 0x04b10040 /* ctl 212 */,
+ 0x00000025 /* ctl 213 */,
+ 0x000a0004 /* ctl 214 */,
+ 0x003d0028 /* ctl 215 */,
+ 0x02010000 /* ctl 216 */,
+ 0xff0b0000 /* ctl 217 */,
+ 0x010101ff /* ctl 218 */,
+ 0x01010101 /* ctl 219 */,
+ 0x00010b01 /* ctl 220 */,
+ 0x01010000 /* ctl 221 */,
+ 0x00000004 /* ctl 222 */,
+ 0x01000001 /* ctl 223 */,
+ 0x02020202 /* ctl 224 */,
+ 0x00000001 /* ctl 225 */,
+ 0x00000000 /* ctl 226 */,
+ 0x00000000 /* ctl 227 */,
+ 0x00000000 /* ctl 228 */,
+ 0x00000000 /* ctl 229 */,
+ 0x00000000 /* ctl 230 */,
+ 0x00000000 /* ctl 231 */,
+ 0x00000000 /* ctl 232 */,
+ 0x00000000 /* ctl 233 */,
+ 0x00000000 /* ctl 234 */,
+ 0x00000000 /* ctl 235 */,
+ 0x00000000 /* ctl 236 */,
+ 0x00000000 /* ctl 237 */,
+ 0x00000000 /* ctl 238 */,
+ 0x00000000 /* ctl 239 */,
+ 0x00000000 /* ctl 240 */,
+ 0x00000000 /* ctl 241 */,
+ 0x00000000 /* ctl 242 */,
+ 0x00000000 /* ctl 243 */,
+ 0x00000000 /* ctl 244 */,
+ 0x00000000 /* ctl 245 */,
+ 0x00000000 /* ctl 246 */,
+ 0x00000000 /* ctl 247 */,
+ 0x00010001 /* ctl 248 */,
+ 0x01010001 /* ctl 249 */,
+ 0x00010101 /* ctl 250 */,
+ 0x08080300 /* ctl 251 */,
+ 0x08000a08 /* ctl 252 */,
+ 0x000b0b08 /* ctl 253 */,
+ 0x03010100 /* ctl 254 */,
+ 0x00000005 /* ctl 255 */,
+ 0x00000000 /* ctl 256 */,
+ 0x00010000 /* ctl 257 */,
+ 0x00280d00 /* ctl 258 */,
+ 0x00000001 /* ctl 259 */,
+ 0x00000001 /* ctl 260 */,
+ 0x00000000 /* ctl 261 */,
+ 0x00000000 /* ctl 262 */,
+ 0x00000000 /* ctl 263 */,
+ 0x00000000 /* ctl 264 */,
+ 0x00000000 /* ctl 265 */,
+ 0x00000000 /* ctl 266 */,
+ 0x00000000 /* ctl 267 */,
+ 0x00000000 /* ctl 268 */,
+ 0x00000000 /* ctl 269 */,
+ 0x00000000 /* ctl 270 */,
+ 0x01000000 /* ctl 271 */,
+ 0x00000001 /* ctl 272 */,
+ 0x00010100 /* ctl 273 */,
+ 0x00000000 /* ctl 274 */,
+ 0x00000000 /* ctl 275 */,
+ 0x00000000 /* ctl 276 */,
+ 0x00000000 /* ctl 277 */,
+ 0x00000000 /* ctl 278 */,
+ 0x00000000 /* ctl 279 */,
+ 0x00000000 /* ctl 280 */,
+ 0x00000000 /* ctl 281 */,
+ 0x00000000 /* ctl 282 */,
+ 0x00000000 /* ctl 283 */,
+ 0x00000000 /* ctl 284 */,
+ 0x00000000 /* ctl 285 */,
+ 0x00000000 /* ctl 286 */,
+ 0x00000000 /* ctl 287 */,
+ 0x00000000 /* ctl 288 */,
+ 0x00000000 /* ctl 289 */,
+ 0x00000000 /* ctl 290 */,
+ 0x00000000 /* ctl 291 */,
+ 0x00000000 /* ctl 292 */,
+ 0x00000000 /* ctl 293 */,
+ 0x00000000 /* ctl 294 */,
+ 0x000556aa /* ctl 295 */,
+ 0x000aaaaa /* ctl 296 */,
+ 0x000aa955 /* ctl 297 */,
+ 0x00055555 /* ctl 298 */,
+ 0x000b3133 /* ctl 299 */,
+ 0x0004cd33 /* ctl 300 */,
+ 0x0004cecc /* ctl 301 */,
+ 0x000b32cc /* ctl 302 */,
+ 0x00010300 /* ctl 303 */,
+ 0x00000100 /* ctl 304 */,
+ 0x00000000 /* ctl 305 */,
+ 0x00000000 /* ctl 306 */,
+ 0x00000000 /* ctl 307 */,
+ 0x00000000 /* ctl 308 */,
+ 0x00000000 /* ctl 309 */,
+ 0x00000000 /* ctl 310 */,
+ 0x00000000 /* ctl 311 */,
+ 0x00000000 /* ctl 312 */,
+ 0x00000000 /* ctl 313 */,
+ 0x00000000 /* ctl 314 */,
+ 0x00000101 /* ctl 315 */,
+ 0x00010100 /* ctl 316 */,
+ 0x00000001 /* ctl 317 */,
+ 0x00000000 /* ctl 318 */,
+ 0x0003ffff /* ctl 319 */,
+ 0x00000000 /* ctl 320 */,
+ 0x0003ffff /* ctl 321 */,
+ 0x00000000 /* ctl 322 */,
+ 0x0003ffff /* ctl 323 */,
+ 0x00000000 /* ctl 324 */,
+ 0x0003ffff /* ctl 325 */,
+ 0x00000000 /* ctl 326 */,
+ 0x0003ffff /* ctl 327 */,
+ 0x00000000 /* ctl 328 */,
+ 0x0003ffff /* ctl 329 */,
+ 0x00000000 /* ctl 330 */,
+ 0x0003ffff /* ctl 331 */,
+ 0x00000000 /* ctl 332 */,
+ 0x0003ffff /* ctl 333 */,
+ 0x00000000 /* ctl 334 */,
+ 0x0003ffff /* ctl 335 */,
+ 0x00000000 /* ctl 336 */,
+ 0x0003ffff /* ctl 337 */,
+ 0x00000000 /* ctl 338 */,
+ 0x0003ffff /* ctl 339 */,
+ 0x00000000 /* ctl 340 */,
+ 0x0003ffff /* ctl 341 */,
+ 0x00000000 /* ctl 342 */,
+ 0x0003ffff /* ctl 343 */,
+ 0x00000000 /* ctl 344 */,
+ 0x0003ffff /* ctl 345 */,
+ 0x00000000 /* ctl 346 */,
+ 0x0003ffff /* ctl 347 */,
+ 0x00000000 /* ctl 348 */,
+ 0x0003ffff /* ctl 349 */,
+ 0x00000000 /* ctl 350 */,
+ 0x0003ffff /* ctl 351 */,
+ 0x00000000 /* ctl 352 */,
+ 0x0003ffff /* ctl 353 */,
+ 0x00000000 /* ctl 354 */,
+ 0x0003ffff /* ctl 355 */,
+ 0x00000000 /* ctl 356 */,
+ 0x0003ffff /* ctl 357 */,
+ 0x00000000 /* ctl 358 */,
+ 0x0003ffff /* ctl 359 */,
+ 0x00000000 /* ctl 360 */,
+ 0x0003ffff /* ctl 361 */,
+ 0x00000000 /* ctl 362 */,
+ 0x0003ffff /* ctl 363 */,
+ 0x00000000 /* ctl 364 */,
+ 0x0003ffff /* ctl 365 */,
+ 0x00000000 /* ctl 366 */,
+ 0x0003ffff /* ctl 367 */,
+ 0x00000000 /* ctl 368 */,
+ 0x0003ffff /* ctl 369 */,
+ 0x00000000 /* ctl 370 */,
+ 0x0003ffff /* ctl 371 */,
+ 0x00000000 /* ctl 372 */,
+ 0x0003ffff /* ctl 373 */,
+ 0x00000000 /* ctl 374 */,
+ 0x0003ffff /* ctl 375 */,
+ 0x00000000 /* ctl 376 */,
+ 0x0003ffff /* ctl 377 */,
+ 0x00000000 /* ctl 378 */,
+ 0x0003ffff /* ctl 379 */,
+ 0x00000000 /* ctl 380 */,
+ 0x0303ffff /* ctl 381 */,
+ 0xffffffff /* ctl 382 */,
+ 0x00030000 /* ctl 383 */,
+ 0xffffffff /* ctl 384 */,
+ 0x00030000 /* ctl 385 */,
+ 0xffffffff /* ctl 386 */,
+ 0x00030000 /* ctl 387 */,
+ 0xffffffff /* ctl 388 */,
+ 0x00030000 /* ctl 389 */,
+ 0xffffffff /* ctl 390 */,
+ 0x00030000 /* ctl 391 */,
+ 0xffffffff /* ctl 392 */,
+ 0x00030000 /* ctl 393 */,
+ 0xffffffff /* ctl 394 */,
+ 0x00030000 /* ctl 395 */,
+ 0xffffffff /* ctl 396 */,
+ 0x00030000 /* ctl 397 */,
+ 0xffffffff /* ctl 398 */,
+ 0x00030000 /* ctl 399 */,
+ 0xffffffff /* ctl 400 */,
+ 0x00030000 /* ctl 401 */,
+ 0xffffffff /* ctl 402 */,
+ 0x00030000 /* ctl 403 */,
+ 0xffffffff /* ctl 404 */,
+ 0x00030000 /* ctl 405 */,
+ 0xffffffff /* ctl 406 */,
+ 0x00030000 /* ctl 407 */,
+ 0xffffffff /* ctl 408 */,
+ 0x00030000 /* ctl 409 */,
+ 0xffffffff /* ctl 410 */,
+ 0x00030000 /* ctl 411 */,
+ 0xffffffff /* ctl 412 */,
+ 0x00030000 /* ctl 413 */,
+ 0xffffffff /* ctl 414 */,
+ 0x00030000 /* ctl 415 */,
+ 0xffffffff /* ctl 416 */,
+ 0x00030000 /* ctl 417 */,
+ 0xffffffff /* ctl 418 */,
+ 0x00030000 /* ctl 419 */,
+ 0xffffffff /* ctl 420 */,
+ 0x00030000 /* ctl 421 */,
+ 0xffffffff /* ctl 422 */,
+ 0x00030000 /* ctl 423 */,
+ 0xffffffff /* ctl 424 */,
+ 0x00030000 /* ctl 425 */,
+ 0xffffffff /* ctl 426 */,
+ 0x00030000 /* ctl 427 */,
+ 0xffffffff /* ctl 428 */,
+ 0x00030000 /* ctl 429 */,
+ 0xffffffff /* ctl 430 */,
+ 0x00030000 /* ctl 431 */,
+ 0xffffffff /* ctl 432 */,
+ 0x00030000 /* ctl 433 */,
+ 0xffffffff /* ctl 434 */,
+ 0x00030000 /* ctl 435 */,
+ 0xffffffff /* ctl 436 */,
+ 0x00030000 /* ctl 437 */,
+ 0xffffffff /* ctl 438 */,
+ 0x00030000 /* ctl 439 */,
+ 0xffffffff /* ctl 440 */,
+ 0x00030000 /* ctl 441 */,
+ 0xffffffff /* ctl 442 */,
+ 0x00030000 /* ctl 443 */,
+ 0xffffffff /* ctl 444 */,
+ 0x00000000 /* ctl 445 */,
+ 0x02020200 /* ctl 446 */,
+ 0x00640002 /* ctl 447 */,
+ 0x01010101 /* ctl 448 */,
+ 0x00006401 /* ctl 449 */,
+ 0x00000000 /* ctl 450 */,
+ 0x15140000 /* ctl 451 */,
+ 0x0000221c /* ctl 452 */,
+ 0x0001f808 /* ctl 453 */,
+ 0x00000000 /* ctl 454 */,
+ 0x00000000 /* ctl 455 */,
+ 0x000001f8 /* ctl 456 */,
+ 0x000013b0 /* ctl 457 */,
+ 0x06080205 /* ctl 458 */,
+ 0x00000000 /* ctl 459 */,
+ 0x00000000 /* ctl 460 */,
+ 0x00000608 /* ctl 461 */,
+ 0x00003c50 /* ctl 462 */,
+ 0x18500205 /* ctl 463 */,
+ 0x00000000 /* ctl 464 */,
+ 0x00000000 /* ctl 465 */,
+ 0x00001850 /* ctl 466 */,
+ 0x0000f320 /* ctl 467 */,
+ 0x2482060c /* ctl 468 */,
+ 0x00000000 /* ctl 469 */,
+ 0x00000000 /* ctl 470 */,
+ 0x00002482 /* ctl 471 */,
+ 0x00016d14 /* ctl 472 */,
+ 0x02020a15 /* ctl 473 */,
+ 0x03020202 /* ctl 474 */,
+ 0x00001803 /* ctl 475 */,
+ 0x00000000 /* ctl 476 */,
+ 0x00000000 /* ctl 477 */,
+ 0x00001403 /* ctl 478 */,
+ 0x000007d0 /* ctl 479 */,
+ 0x00000000 /* ctl 480 */,
+ 0x00000000 /* ctl 481 */,
+ 0x00030000 /* ctl 482 */,
+ 0x00070019 /* ctl 483 */,
+ 0x0008001a /* ctl 484 */,
+ 0x000e0020 /* ctl 485 */,
+ 0x00120024 /* ctl 486 */,
+ 0x00000000 /* ctl 487 */,
+ 0x00000000 /* ctl 488 */,
+ 0x02000000 /* ctl 489 */,
+ 0x05020202 /* ctl 490 */,
+ 0x0d010501 /* ctl 491 */,
+ 0x05091705 /* ctl 492 */,
+ 0x01010001 /* ctl 493 */,
+ 0x01010101 /* ctl 494 */,
+ 0x01000101 /* ctl 495 */,
+ 0x01000100 /* ctl 496 */,
+ 0x00010001 /* ctl 497 */,
+ 0x00010002 /* ctl 498 */,
+ 0x00020100 /* ctl 499 */,
+ 0x00020002 /* ctl 500 */,
+ 0x00000000 /* ctl 501 */,
+ 0x00000400 /* ctl 502 */,
+ 0x000c0018 /* ctl 503 */,
+ 0x00300059 /* ctl 504 */,
+ 0x0049017e /* ctl 505 */,
+ 0x00400241 /* ctl 506 */,
+ 0x00120103 /* ctl 507 */,
+ 0x00000002 /* ctl 508 */,
+ 0x00000200 /* ctl 509 */,
+ 0x00000200 /* ctl 510 */,
+ 0x00000200 /* ctl 511 */,
+ 0x00000200 /* ctl 512 */,
+ 0x00000200 /* ctl 513 */,
+ 0x00000200 /* ctl 514 */,
+ 0x00000200 /* ctl 515 */,
+ 0x00000200 /* ctl 516 */,
+ 0x00000200 /* ctl 517 */,
+ 0x00000200 /* ctl 518 */,
+ 0x00000200 /* ctl 519 */,
+ 0x00000200 /* ctl 520 */,
+ 0x00000200 /* ctl 521 */,
+ 0x00000200 /* ctl 522 */,
+ 0x00000200 /* ctl 523 */,
+ 0x00000200 /* ctl 524 */,
+ 0x00000001 /* ctl 525 */,
+ 0x000f0000 /* ctl 526 */,
+ 0x000f000f /* ctl 527 */,
+ 0x000f000f /* ctl 528 */,
+ 0x000f000f /* ctl 529 */,
+ 0x000f000f /* ctl 530 */,
+ 0x000f000f /* ctl 531 */,
+ 0x000f000f /* ctl 532 */,
+ 0x000f000f /* ctl 533 */,
+ 0x000f000f /* ctl 534 */,
+ 0x000f000f /* ctl 535 */,
+ 0x000f000f /* ctl 536 */,
+ 0x000f000f /* ctl 537 */,
+ 0x000f000f /* ctl 538 */,
+ 0x000f000f /* ctl 539 */,
+ 0x000f000f /* ctl 540 */,
+ 0x000f000f /* ctl 541 */,
+ 0x000f000f /* ctl 542 */,
+ 0x000f000f /* ctl 543 */,
+ 0x000f000f /* ctl 544 */,
+ 0x000f000f /* ctl 545 */,
+ 0x000f000f /* ctl 546 */,
+ 0x000f000f /* ctl 547 */,
+ 0x000f000f /* ctl 548 */,
+ 0x000f000f /* ctl 549 */,
+ 0x000f000f /* ctl 550 */,
+ 0x000f000f /* ctl 551 */,
+ 0x000f000f /* ctl 552 */,
+ 0x000f000f /* ctl 553 */,
+ 0x000f000f /* ctl 554 */,
+ 0x000f000f /* ctl 555 */,
+ 0x000f000f /* ctl 556 */,
+ 0x000f000f /* ctl 557 */,
+ 0x0000000f /* ctl 558 */
+ },
+ {
+ 0x00000b00 /* pi 0 */,
+ 0x00000101 /* pi 1 */,
+ 0x00640000 /* pi 2 */,
+ 0x00049040 /* pi 3 */,
+ 0x000001f8 /* pi 4 */,
+ 0x0000c100 /* pi 5 */,
+ 0x00000608 /* pi 6 */,
+ 0x00030a00 /* pi 7 */,
+ 0x00001850 /* pi 8 */,
+ 0x00049040 /* pi 9 */,
+ 0x00002482 /* pi 10 */,
+ 0x000001f8 /* pi 11 */,
+ 0x00000200 /* pi 12 */,
+ 0x00000200 /* pi 13 */,
+ 0x00000200 /* pi 14 */,
+ 0x00000200 /* pi 15 */,
+ 0x00000608 /* pi 16 */,
+ 0x00000200 /* pi 17 */,
+ 0x00000200 /* pi 18 */,
+ 0x00000200 /* pi 19 */,
+ 0x00000200 /* pi 20 */,
+ 0x00001850 /* pi 21 */,
+ 0x00000200 /* pi 22 */,
+ 0x00000200 /* pi 23 */,
+ 0x00000200 /* pi 24 */,
+ 0x00000200 /* pi 25 */,
+ 0x00002482 /* pi 26 */,
+ 0x00000200 /* pi 27 */,
+ 0x00000200 /* pi 28 */,
+ 0x00000200 /* pi 29 */,
+ 0x00000200 /* pi 30 */,
+ 0x00010000 /* pi 31 */,
+ 0x0000000f /* pi 32 */,
+ 0x01000003 /* pi 33 */,
+ 0x00000000 /* pi 34 */,
+ 0x00000000 /* pi 35 */,
+ 0x00000000 /* pi 36 */,
+ 0x00000000 /* pi 37 */,
+ 0x00000000 /* pi 38 */,
+ 0x00000000 /* pi 39 */,
+ 0x00000000 /* pi 40 */,
+ 0x00000000 /* pi 41 */,
+ 0x00000000 /* pi 42 */,
+ 0x00000000 /* pi 43 */,
+ 0x00000000 /* pi 44 */,
+ 0x00000000 /* pi 45 */,
+ 0x00000000 /* pi 46 */,
+ 0x00000000 /* pi 47 */,
+ 0x00000000 /* pi 48 */,
+ 0x01000000 /* pi 49 */,
+ 0x29030001 /* pi 50 */,
+ 0x0847382a /* pi 51 */,
+ 0x00010002 /* pi 52 */,
+ 0x00000009 /* pi 53 */,
+ 0x000000fc /* pi 54 */,
+ 0x0000001a /* pi 55 */,
+ 0x00000304 /* pi 56 */,
+ 0x00000068 /* pi 57 */,
+ 0x00000c28 /* pi 58 */,
+ 0x0000009d /* pi 59 */,
+ 0x00001241 /* pi 60 */,
+ 0x00000000 /* pi 61 */,
+ 0x00000000 /* pi 62 */,
+ 0x00000000 /* pi 63 */,
+ 0x00000000 /* pi 64 */,
+ 0x00000000 /* pi 65 */,
+ 0x00000000 /* pi 66 */,
+ 0x00000000 /* pi 67 */,
+ 0x00000000 /* pi 68 */,
+ 0x00000000 /* pi 69 */,
+ 0x01000000 /* pi 70 */,
+ 0x04040401 /* pi 71 */,
+ 0x0a000004 /* pi 72 */,
+ 0x01010128 /* pi 73 */,
+ 0x00000001 /* pi 74 */,
+ 0x00000000 /* pi 75 */,
+ 0x00030003 /* pi 76 */,
+ 0x00000018 /* pi 77 */,
+ 0x00000000 /* pi 78 */,
+ 0x00000000 /* pi 79 */,
+ 0x00060002 /* pi 80 */,
+ 0x00010001 /* pi 81 */,
+ 0x01010001 /* pi 82 */,
+ 0x02010100 /* pi 83 */,
+ 0x03030002 /* pi 84 */,
+ 0x04040705 /* pi 85 */,
+ 0x0000120a /* pi 86 */,
+ 0x00000000 /* pi 87 */,
+ 0x00000000 /* pi 88 */,
+ 0x00000000 /* pi 89 */,
+ 0x001e0303 /* pi 90 */,
+ 0x000007d0 /* pi 91 */,
+ 0x01010300 /* pi 92 */,
+ 0x01010101 /* pi 93 */,
+ 0x00000101 /* pi 94 */,
+ 0x00000000 /* pi 95 */,
+ 0x00000000 /* pi 96 */,
+ 0x01000000 /* pi 97 */,
+ 0x00000101 /* pi 98 */,
+ 0x55555a5a /* pi 99 */,
+ 0x55555a5a /* pi 100 */,
+ 0x55555a5a /* pi 101 */,
+ 0x55555a5a /* pi 102 */,
+ 0x05050001 /* pi 103 */,
+ 0x0200150c /* pi 104 */,
+ 0x000a0602 /* pi 105 */,
+ 0x170d0505 /* pi 106 */,
+ 0x02020202 /* pi 107 */,
+ 0x00000000 /* pi 108 */,
+ 0x00000003 /* pi 109 */,
+ 0x00170300 /* pi 110 */,
+ 0x00070019 /* pi 111 */,
+ 0x0008001a /* pi 112 */,
+ 0x000e0020 /* pi 113 */,
+ 0x00120024 /* pi 114 */,
+ 0x00000000 /* pi 115 */,
+ 0x00000000 /* pi 116 */,
+ 0x01010100 /* pi 117 */,
+ 0x00000001 /* pi 118 */,
+ 0x010a140a /* pi 119 */,
+ 0x00010011 /* pi 120 */,
+ 0x00020032 /* pi 121 */,
+ 0x000200c8 /* pi 122 */,
+ 0x010a012d /* pi 123 */,
+ 0x00120100 /* pi 124 */,
+ 0x01000012 /* pi 125 */,
+ 0x00330033 /* pi 126 */,
+ 0x00c90100 /* pi 127 */,
+ 0x010000c9 /* pi 128 */,
+ 0x012e012e /* pi 129 */,
+ 0x02044d4d /* pi 130 */,
+ 0x06051001 /* pi 131 */,
+ 0x02080706 /* pi 132 */,
+ 0x00c00002 /* pi 133 */,
+ 0x00c01000 /* pi 134 */,
+ 0x00c01000 /* pi 135 */,
+ 0x00c01000 /* pi 136 */,
+ 0x04041000 /* pi 137 */,
+ 0x0a020100 /* pi 138 */,
+ 0x01001510 /* pi 139 */,
+ 0x00001901 /* pi 140 */,
+ 0x001d0019 /* pi 141 */,
+ 0x005f0026 /* pi 142 */,
+ 0x4d030000 /* pi 143 */,
+ 0x0102044d /* pi 144 */,
+ 0x34000000 /* pi 145 */,
+ 0x00000000 /* pi 146 */,
+ 0x00000000 /* pi 147 */,
+ 0x01010000 /* pi 148 */,
+ 0x00000101 /* pi 149 */,
+ 0x31000600 /* pi 150 */,
+ 0x064d4d00 /* pi 151 */,
+ 0x00310006 /* pi 152 */,
+ 0x26064d4d /* pi 153 */,
+ 0x4d003112 /* pi 154 */,
+ 0x2446064d /* pi 155 */,
+ 0x4d4d0031 /* pi 156 */,
+ 0x00060006 /* pi 157 */,
+ 0x4d4d0031 /* pi 158 */,
+ 0x31000600 /* pi 159 */,
+ 0x064d4d00 /* pi 160 */,
+ 0x00311226 /* pi 161 */,
+ 0x46064d4d /* pi 162 */,
+ 0x4d003124 /* pi 163 */,
+ 0x0000064d /* pi 164 */,
+ 0x00001101 /* pi 165 */,
+ 0x00c80032 /* pi 166 */,
+ 0x010a012d /* pi 167 */,
+ 0x080c020a /* pi 168 */,
+ 0x00020d11 /* pi 169 */,
+ 0x00020002 /* pi 170 */,
+ 0x00020002 /* pi 171 */,
+ 0x00000000 /* pi 172 */,
+ 0x00000000 /* pi 173 */,
+ 0x04000000 /* pi 174 */,
+ 0x00080100 /* pi 175 */,
+ 0x000001f8 /* pi 176 */,
+ 0x000013b0 /* pi 177 */,
+ 0x00000608 /* pi 178 */,
+ 0x00003c50 /* pi 179 */,
+ 0x00001850 /* pi 180 */,
+ 0x0000f320 /* pi 181 */,
+ 0x00002482 /* pi 182 */,
+ 0x00016d14 /* pi 183 */,
+ 0x00010000 /* pi 184 */,
+ 0x02000101 /* pi 185 */,
+ 0x01030001 /* pi 186 */,
+ 0x00010400 /* pi 187 */,
+ 0x06000105 /* pi 188 */,
+ 0x01070001 /* pi 189 */,
+ 0x00000000 /* pi 190 */,
+ 0x00000000 /* pi 191 */
+ },
+ {
+ 0x76543210 /* phy 0 */,
+ 0x0004f008 /* phy 1 */,
+ 0x00020133 /* phy 2 */,
+ 0x00000000 /* phy 3 */,
+ 0x00000000 /* phy 4 */,
+ 0x00010000 /* phy 5 */,
+ 0x01cece0e /* phy 6 */,
+ 0x02cece0e /* phy 7 */,
+ 0x00010000 /* phy 8 */,
+ 0x00000004 /* phy 9 */,
+ 0x00000001 /* phy 10 */,
+ 0x00000000 /* phy 11 */,
+ 0x00000000 /* phy 12 */,
+ 0x00000100 /* phy 13 */,
+ 0x001700c0 /* phy 14 */,
+ 0x020100cc /* phy 15 */,
+ 0x00030066 /* phy 16 */,
+ 0x00000000 /* phy 17 */,
+ 0x00000001 /* phy 18 */,
+ 0x00000000 /* phy 19 */,
+ 0x00000000 /* phy 20 */,
+ 0x00000000 /* phy 21 */,
+ 0x00000000 /* phy 22 */,
+ 0x00000000 /* phy 23 */,
+ 0x00000000 /* phy 24 */,
+ 0x04080000 /* phy 25 */,
+ 0x04080400 /* phy 26 */,
+ 0x10000000 /* phy 27 */,
+ 0x0c008007 /* phy 28 */,
+ 0x00000100 /* phy 29 */,
+ 0x00000100 /* phy 30 */,
+ 0x55555555 /* phy 31 */,
+ 0xaaaaaaaa /* phy 32 */,
+ 0x55555555 /* phy 33 */,
+ 0xaaaaaaaa /* phy 34 */,
+ 0x00015555 /* phy 35 */,
+ 0x00000000 /* phy 36 */,
+ 0x00000000 /* phy 37 */,
+ 0x00000000 /* phy 38 */,
+ 0x00000000 /* phy 39 */,
+ 0x00000000 /* phy 40 */,
+ 0x00000000 /* phy 41 */,
+ 0x00000000 /* phy 42 */,
+ 0x00000000 /* phy 43 */,
+ 0x00000000 /* phy 44 */,
+ 0x00000000 /* phy 45 */,
+ 0x00000000 /* phy 46 */,
+ 0x00000000 /* phy 47 */,
+ 0x00000000 /* phy 48 */,
+ 0x00000000 /* phy 49 */,
+ 0x00000000 /* phy 50 */,
+ 0x00000000 /* phy 51 */,
+ 0x00000000 /* phy 52 */,
+ 0x00000000 /* phy 53 */,
+ 0x00000000 /* phy 54 */,
+ 0x00000104 /* phy 55 */,
+ 0x00000020 /* phy 56 */,
+ 0x00000000 /* phy 57 */,
+ 0x00000000 /* phy 58 */,
+ 0x00000000 /* phy 59 */,
+ 0x00000000 /* phy 60 */,
+ 0x00000000 /* phy 61 */,
+ 0x00000000 /* phy 62 */,
+ 0x00000000 /* phy 63 */,
+ 0x02800280 /* phy 64 */,
+ 0x02800280 /* phy 65 */,
+ 0x02800280 /* phy 66 */,
+ 0x02800280 /* phy 67 */,
+ 0x00000280 /* phy 68 */,
+ 0x00000000 /* phy 69 */,
+ 0x00000000 /* phy 70 */,
+ 0x00000000 /* phy 71 */,
+ 0x00000000 /* phy 72 */,
+ 0x00000000 /* phy 73 */,
+ 0x00800080 /* phy 74 */,
+ 0x00800080 /* phy 75 */,
+ 0x00800080 /* phy 76 */,
+ 0x00800080 /* phy 77 */,
+ 0x00800080 /* phy 78 */,
+ 0x00800080 /* phy 79 */,
+ 0x00800080 /* phy 80 */,
+ 0x00800080 /* phy 81 */,
+ 0x00800080 /* phy 82 */,
+ 0x00010019 /* phy 83 */,
+ 0x000001d0 /* phy 84 */,
+ 0x00000000 /* phy 85 */,
+ 0x00000200 /* phy 86 */,
+ 0x00000004 /* phy 87 */,
+ 0x51816152 /* phy 88 */,
+ 0xc0c08161 /* phy 89 */,
+ 0x00010000 /* phy 90 */,
+ 0x02001000 /* phy 91 */,
+ 0x0c0432ff /* phy 92 */,
+ 0x000f0c18 /* phy 93 */,
+ 0x01000140 /* phy 94 */,
+ 0x00000c20 /* phy 95 */,
+ 0x00000000 /* phy 96 */,
+ 0x00000000 /* phy 97 */,
+ 0x00000000 /* phy 98 */,
+ 0x00000000 /* phy 99 */,
+ 0x00000000 /* phy 100 */,
+ 0x00000000 /* phy 101 */,
+ 0x00000000 /* phy 102 */,
+ 0x00000000 /* phy 103 */,
+ 0x00000000 /* phy 104 */,
+ 0x00000000 /* phy 105 */,
+ 0x00000000 /* phy 106 */,
+ 0x00000000 /* phy 107 */,
+ 0x00000000 /* phy 108 */,
+ 0x00000000 /* phy 109 */,
+ 0x00000000 /* phy 110 */,
+ 0x00000000 /* phy 111 */,
+ 0x00000000 /* phy 112 */,
+ 0x00000000 /* phy 113 */,
+ 0x00000000 /* phy 114 */,
+ 0x00000000 /* phy 115 */,
+ 0x00000000 /* phy 116 */,
+ 0x00000000 /* phy 117 */,
+ 0x00000000 /* phy 118 */,
+ 0x00000000 /* phy 119 */,
+ 0x00000000 /* phy 120 */,
+ 0x00000000 /* phy 121 */,
+ 0x00000000 /* phy 122 */,
+ 0x00000000 /* phy 123 */,
+ 0x00000000 /* phy 124 */,
+ 0x00000000 /* phy 125 */,
+ 0x00000000 /* phy 126 */,
+ 0x00000000 /* phy 127 */,
+ 0x76543210 /* phy 128 */,
+ 0x0004f008 /* phy 129 */,
+ 0x00020133 /* phy 130 */,
+ 0x00000000 /* phy 131 */,
+ 0x00000000 /* phy 132 */,
+ 0x00010000 /* phy 133 */,
+ 0x01cece0e /* phy 134 */,
+ 0x02cece0e /* phy 135 */,
+ 0x00010000 /* phy 136 */,
+ 0x00000004 /* phy 137 */,
+ 0x00000001 /* phy 138 */,
+ 0x00000000 /* phy 139 */,
+ 0x00000000 /* phy 140 */,
+ 0x00000100 /* phy 141 */,
+ 0x001700c0 /* phy 142 */,
+ 0x020100cc /* phy 143 */,
+ 0x00030066 /* phy 144 */,
+ 0x00000000 /* phy 145 */,
+ 0x00000001 /* phy 146 */,
+ 0x00000000 /* phy 147 */,
+ 0x00000000 /* phy 148 */,
+ 0x00000000 /* phy 149 */,
+ 0x00000000 /* phy 150 */,
+ 0x00000000 /* phy 151 */,
+ 0x00000000 /* phy 152 */,
+ 0x04080000 /* phy 153 */,
+ 0x04080400 /* phy 154 */,
+ 0x10000000 /* phy 155 */,
+ 0x0c008007 /* phy 156 */,
+ 0x00000100 /* phy 157 */,
+ 0x00000100 /* phy 158 */,
+ 0x55555555 /* phy 159 */,
+ 0xaaaaaaaa /* phy 160 */,
+ 0x55555555 /* phy 161 */,
+ 0xaaaaaaaa /* phy 162 */,
+ 0x00005555 /* phy 163 */,
+ 0x00000000 /* phy 164 */,
+ 0x00000000 /* phy 165 */,
+ 0x00000000 /* phy 166 */,
+ 0x00000000 /* phy 167 */,
+ 0x00000000 /* phy 168 */,
+ 0x00000000 /* phy 169 */,
+ 0x00000000 /* phy 170 */,
+ 0x00000000 /* phy 171 */,
+ 0x00000000 /* phy 172 */,
+ 0x00000000 /* phy 173 */,
+ 0x00000000 /* phy 174 */,
+ 0x00000000 /* phy 175 */,
+ 0x00000000 /* phy 176 */,
+ 0x00000000 /* phy 177 */,
+ 0x00000000 /* phy 178 */,
+ 0x00000000 /* phy 179 */,
+ 0x00000000 /* phy 180 */,
+ 0x00000000 /* phy 181 */,
+ 0x00000000 /* phy 182 */,
+ 0x00000104 /* phy 183 */,
+ 0x00000020 /* phy 184 */,
+ 0x00000000 /* phy 185 */,
+ 0x00000000 /* phy 186 */,
+ 0x00000000 /* phy 187 */,
+ 0x00000000 /* phy 188 */,
+ 0x00000000 /* phy 189 */,
+ 0x00000000 /* phy 190 */,
+ 0x00000000 /* phy 191 */,
+ 0x02800280 /* phy 192 */,
+ 0x02800280 /* phy 193 */,
+ 0x02800280 /* phy 194 */,
+ 0x02800280 /* phy 195 */,
+ 0x00000280 /* phy 196 */,
+ 0x00000000 /* phy 197 */,
+ 0x00000000 /* phy 198 */,
+ 0x00000000 /* phy 199 */,
+ 0x00000000 /* phy 200 */,
+ 0x00000000 /* phy 201 */,
+ 0x00800080 /* phy 202 */,
+ 0x00800080 /* phy 203 */,
+ 0x00800080 /* phy 204 */,
+ 0x00800080 /* phy 205 */,
+ 0x00800080 /* phy 206 */,
+ 0x00800080 /* phy 207 */,
+ 0x00800080 /* phy 208 */,
+ 0x00800080 /* phy 209 */,
+ 0x00800080 /* phy 210 */,
+ 0x00010019 /* phy 211 */,
+ 0x000001d0 /* phy 212 */,
+ 0x00000000 /* phy 213 */,
+ 0x00000200 /* phy 214 */,
+ 0x00000004 /* phy 215 */,
+ 0x51816152 /* phy 216 */,
+ 0xc0c08161 /* phy 217 */,
+ 0x00010000 /* phy 218 */,
+ 0x02001000 /* phy 219 */,
+ 0x0c0432ff /* phy 220 */,
+ 0x000f0c18 /* phy 221 */,
+ 0x01000140 /* phy 222 */,
+ 0x00000c20 /* phy 223 */,
+ 0x00000000 /* phy 224 */,
+ 0x00000000 /* phy 225 */,
+ 0x00000000 /* phy 226 */,
+ 0x00000000 /* phy 227 */,
+ 0x00000000 /* phy 228 */,
+ 0x00000000 /* phy 229 */,
+ 0x00000000 /* phy 230 */,
+ 0x00000000 /* phy 231 */,
+ 0x00000000 /* phy 232 */,
+ 0x00000000 /* phy 233 */,
+ 0x00000000 /* phy 234 */,
+ 0x00000000 /* phy 235 */,
+ 0x00000000 /* phy 236 */,
+ 0x00000000 /* phy 237 */,
+ 0x00000000 /* phy 238 */,
+ 0x00000000 /* phy 239 */,
+ 0x00000000 /* phy 240 */,
+ 0x00000000 /* phy 241 */,
+ 0x00000000 /* phy 242 */,
+ 0x00000000 /* phy 243 */,
+ 0x00000000 /* phy 244 */,
+ 0x00000000 /* phy 245 */,
+ 0x00000000 /* phy 246 */,
+ 0x00000000 /* phy 247 */,
+ 0x00000000 /* phy 248 */,
+ 0x00000000 /* phy 249 */,
+ 0x00000000 /* phy 250 */,
+ 0x00000000 /* phy 251 */,
+ 0x00000000 /* phy 252 */,
+ 0x00000000 /* phy 253 */,
+ 0x00000000 /* phy 254 */,
+ 0x00000000 /* phy 255 */,
+ 0x76543210 /* phy 256 */,
+ 0x0004f008 /* phy 257 */,
+ 0x00020133 /* phy 258 */,
+ 0x00000000 /* phy 259 */,
+ 0x00000000 /* phy 260 */,
+ 0x00010000 /* phy 261 */,
+ 0x01cece0e /* phy 262 */,
+ 0x02cece0e /* phy 263 */,
+ 0x00010000 /* phy 264 */,
+ 0x00000004 /* phy 265 */,
+ 0x00000001 /* phy 266 */,
+ 0x00000000 /* phy 267 */,
+ 0x00000000 /* phy 268 */,
+ 0x00000100 /* phy 269 */,
+ 0x001700c0 /* phy 270 */,
+ 0x020100cc /* phy 271 */,
+ 0x00030066 /* phy 272 */,
+ 0x00000000 /* phy 273 */,
+ 0x00000001 /* phy 274 */,
+ 0x00000000 /* phy 275 */,
+ 0x00000000 /* phy 276 */,
+ 0x00000000 /* phy 277 */,
+ 0x00000000 /* phy 278 */,
+ 0x00000000 /* phy 279 */,
+ 0x00000000 /* phy 280 */,
+ 0x04080000 /* phy 281 */,
+ 0x04080400 /* phy 282 */,
+ 0x10000000 /* phy 283 */,
+ 0x0c008007 /* phy 284 */,
+ 0x00000100 /* phy 285 */,
+ 0x00000100 /* phy 286 */,
+ 0x55555555 /* phy 287 */,
+ 0xaaaaaaaa /* phy 288 */,
+ 0x55555555 /* phy 289 */,
+ 0xaaaaaaaa /* phy 290 */,
+ 0x00015555 /* phy 291 */,
+ 0x00000000 /* phy 292 */,
+ 0x00000000 /* phy 293 */,
+ 0x00000000 /* phy 294 */,
+ 0x00000000 /* phy 295 */,
+ 0x00000000 /* phy 296 */,
+ 0x00000000 /* phy 297 */,
+ 0x00000000 /* phy 298 */,
+ 0x00000000 /* phy 299 */,
+ 0x00000000 /* phy 300 */,
+ 0x00000000 /* phy 301 */,
+ 0x00000000 /* phy 302 */,
+ 0x00000000 /* phy 303 */,
+ 0x00000000 /* phy 304 */,
+ 0x00000000 /* phy 305 */,
+ 0x00000000 /* phy 306 */,
+ 0x00000000 /* phy 307 */,
+ 0x00000000 /* phy 308 */,
+ 0x00000000 /* phy 309 */,
+ 0x00000000 /* phy 310 */,
+ 0x00000104 /* phy 311 */,
+ 0x00000020 /* phy 312 */,
+ 0x00000000 /* phy 313 */,
+ 0x00000000 /* phy 314 */,
+ 0x00000000 /* phy 315 */,
+ 0x00000000 /* phy 316 */,
+ 0x00000000 /* phy 317 */,
+ 0x00000000 /* phy 318 */,
+ 0x00000000 /* phy 319 */,
+ 0x02800280 /* phy 320 */,
+ 0x02800280 /* phy 321 */,
+ 0x02800280 /* phy 322 */,
+ 0x02800280 /* phy 323 */,
+ 0x00000280 /* phy 324 */,
+ 0x00000000 /* phy 325 */,
+ 0x00000000 /* phy 326 */,
+ 0x00000000 /* phy 327 */,
+ 0x00000000 /* phy 328 */,
+ 0x00000000 /* phy 329 */,
+ 0x00800080 /* phy 330 */,
+ 0x00800080 /* phy 331 */,
+ 0x00800080 /* phy 332 */,
+ 0x00800080 /* phy 333 */,
+ 0x00800080 /* phy 334 */,
+ 0x00800080 /* phy 335 */,
+ 0x00800080 /* phy 336 */,
+ 0x00800080 /* phy 337 */,
+ 0x00800080 /* phy 338 */,
+ 0x00010019 /* phy 339 */,
+ 0x000001d0 /* phy 340 */,
+ 0x00000000 /* phy 341 */,
+ 0x00000200 /* phy 342 */,
+ 0x00000004 /* phy 343 */,
+ 0x51816152 /* phy 344 */,
+ 0xc0c08161 /* phy 345 */,
+ 0x00010000 /* phy 346 */,
+ 0x02001000 /* phy 347 */,
+ 0x0c0432ff /* phy 348 */,
+ 0x000f0c18 /* phy 349 */,
+ 0x01000140 /* phy 350 */,
+ 0x00000c20 /* phy 351 */,
+ 0x00000000 /* phy 352 */,
+ 0x00000000 /* phy 353 */,
+ 0x00000000 /* phy 354 */,
+ 0x00000000 /* phy 355 */,
+ 0x00000000 /* phy 356 */,
+ 0x00000000 /* phy 357 */,
+ 0x00000000 /* phy 358 */,
+ 0x00000000 /* phy 359 */,
+ 0x00000000 /* phy 360 */,
+ 0x00000000 /* phy 361 */,
+ 0x00000000 /* phy 362 */,
+ 0x00000000 /* phy 363 */,
+ 0x00000000 /* phy 364 */,
+ 0x00000000 /* phy 365 */,
+ 0x00000000 /* phy 366 */,
+ 0x00000000 /* phy 367 */,
+ 0x00000000 /* phy 368 */,
+ 0x00000000 /* phy 369 */,
+ 0x00000000 /* phy 370 */,
+ 0x00000000 /* phy 371 */,
+ 0x00000000 /* phy 372 */,
+ 0x00000000 /* phy 373 */,
+ 0x00000000 /* phy 374 */,
+ 0x00000000 /* phy 375 */,
+ 0x00000000 /* phy 376 */,
+ 0x00000000 /* phy 377 */,
+ 0x00000000 /* phy 378 */,
+ 0x00000000 /* phy 379 */,
+ 0x00000000 /* phy 380 */,
+ 0x00000000 /* phy 381 */,
+ 0x00000000 /* phy 382 */,
+ 0x00000000 /* phy 383 */,
+ 0x76543210 /* phy 384 */,
+ 0x0004f008 /* phy 385 */,
+ 0x00020133 /* phy 386 */,
+ 0x00000000 /* phy 387 */,
+ 0x00000000 /* phy 388 */,
+ 0x00010000 /* phy 389 */,
+ 0x01cece0e /* phy 390 */,
+ 0x02cece0e /* phy 391 */,
+ 0x00010000 /* phy 392 */,
+ 0x00000004 /* phy 393 */,
+ 0x00000001 /* phy 394 */,
+ 0x00000000 /* phy 395 */,
+ 0x00000000 /* phy 396 */,
+ 0x00000100 /* phy 397 */,
+ 0x001700c0 /* phy 398 */,
+ 0x020100cc /* phy 399 */,
+ 0x00030066 /* phy 400 */,
+ 0x00000000 /* phy 401 */,
+ 0x00000001 /* phy 402 */,
+ 0x00000000 /* phy 403 */,
+ 0x00000000 /* phy 404 */,
+ 0x00000000 /* phy 405 */,
+ 0x00000000 /* phy 406 */,
+ 0x00000000 /* phy 407 */,
+ 0x00000000 /* phy 408 */,
+ 0x04080000 /* phy 409 */,
+ 0x04080400 /* phy 410 */,
+ 0x10000000 /* phy 411 */,
+ 0x0c008007 /* phy 412 */,
+ 0x00000100 /* phy 413 */,
+ 0x00000100 /* phy 414 */,
+ 0x55555555 /* phy 415 */,
+ 0xaaaaaaaa /* phy 416 */,
+ 0x55555555 /* phy 417 */,
+ 0xaaaaaaaa /* phy 418 */,
+ 0x00005555 /* phy 419 */,
+ 0x00000000 /* phy 420 */,
+ 0x00000000 /* phy 421 */,
+ 0x00000000 /* phy 422 */,
+ 0x00000000 /* phy 423 */,
+ 0x00000000 /* phy 424 */,
+ 0x00000000 /* phy 425 */,
+ 0x00000000 /* phy 426 */,
+ 0x00000000 /* phy 427 */,
+ 0x00000000 /* phy 428 */,
+ 0x00000000 /* phy 429 */,
+ 0x00000000 /* phy 430 */,
+ 0x00000000 /* phy 431 */,
+ 0x00000000 /* phy 432 */,
+ 0x00000000 /* phy 433 */,
+ 0x00000000 /* phy 434 */,
+ 0x00000000 /* phy 435 */,
+ 0x00000000 /* phy 436 */,
+ 0x00000000 /* phy 437 */,
+ 0x00000000 /* phy 438 */,
+ 0x00000104 /* phy 439 */,
+ 0x00000020 /* phy 440 */,
+ 0x00000000 /* phy 441 */,
+ 0x00000000 /* phy 442 */,
+ 0x00000000 /* phy 443 */,
+ 0x00000000 /* phy 444 */,
+ 0x00000000 /* phy 445 */,
+ 0x00000000 /* phy 446 */,
+ 0x00000000 /* phy 447 */,
+ 0x02800280 /* phy 448 */,
+ 0x02800280 /* phy 449 */,
+ 0x02800280 /* phy 450 */,
+ 0x02800280 /* phy 451 */,
+ 0x00000280 /* phy 452 */,
+ 0x00000000 /* phy 453 */,
+ 0x00000000 /* phy 454 */,
+ 0x00000000 /* phy 455 */,
+ 0x00000000 /* phy 456 */,
+ 0x00000000 /* phy 457 */,
+ 0x00800080 /* phy 458 */,
+ 0x00800080 /* phy 459 */,
+ 0x00800080 /* phy 460 */,
+ 0x00800080 /* phy 461 */,
+ 0x00800080 /* phy 462 */,
+ 0x00800080 /* phy 463 */,
+ 0x00800080 /* phy 464 */,
+ 0x00800080 /* phy 465 */,
+ 0x00800080 /* phy 466 */,
+ 0x00010019 /* phy 467 */,
+ 0x000001d0 /* phy 468 */,
+ 0x00000000 /* phy 469 */,
+ 0x00000200 /* phy 470 */,
+ 0x00000004 /* phy 471 */,
+ 0x51816152 /* phy 472 */,
+ 0xc0c08161 /* phy 473 */,
+ 0x00010000 /* phy 474 */,
+ 0x02001000 /* phy 475 */,
+ 0x0c0432ff /* phy 476 */,
+ 0x000f0c18 /* phy 477 */,
+ 0x01000140 /* phy 478 */,
+ 0x00000c20 /* phy 479 */,
+ 0x00000000 /* phy 480 */,
+ 0x00000000 /* phy 481 */,
+ 0x00000000 /* phy 482 */,
+ 0x00000000 /* phy 483 */,
+ 0x00000000 /* phy 484 */,
+ 0x00000000 /* phy 485 */,
+ 0x00000000 /* phy 486 */,
+ 0x00000000 /* phy 487 */,
+ 0x00000000 /* phy 488 */,
+ 0x00000000 /* phy 489 */,
+ 0x00000000 /* phy 490 */,
+ 0x00000000 /* phy 491 */,
+ 0x00000000 /* phy 492 */,
+ 0x00000000 /* phy 493 */,
+ 0x00000000 /* phy 494 */,
+ 0x00000000 /* phy 495 */,
+ 0x00000000 /* phy 496 */,
+ 0x00000000 /* phy 497 */,
+ 0x00000000 /* phy 498 */,
+ 0x00000000 /* phy 499 */,
+ 0x00000000 /* phy 500 */,
+ 0x00000000 /* phy 501 */,
+ 0x00000000 /* phy 502 */,
+ 0x00000000 /* phy 503 */,
+ 0x00000000 /* phy 504 */,
+ 0x00000000 /* phy 505 */,
+ 0x00000000 /* phy 506 */,
+ 0x00000000 /* phy 507 */,
+ 0x00000000 /* phy 508 */,
+ 0x00000000 /* phy 509 */,
+ 0x00000000 /* phy 510 */,
+ 0x00000000 /* phy 511 */,
+ 0x00000000 /* phy 512 */,
+ 0x00000000 /* phy 513 */,
+ 0x00000000 /* phy 514 */,
+ 0x00000000 /* phy 515 */,
+ 0x00000000 /* phy 516 */,
+ 0x00000000 /* phy 517 */,
+ 0x00000000 /* phy 518 */,
+ 0x00010000 /* phy 519 */,
+ 0x00000200 /* phy 520 */,
+ 0x00000000 /* phy 521 */,
+ 0x00000000 /* phy 522 */,
+ 0x00000000 /* phy 523 */,
+ 0x00400320 /* phy 524 */,
+ 0x00000040 /* phy 525 */,
+ 0x00000098 /* phy 526 */,
+ 0x03000098 /* phy 527 */,
+ 0x00000200 /* phy 528 */,
+ 0x00000000 /* phy 529 */,
+ 0x00000000 /* phy 530 */,
+ 0x00000000 /* phy 531 */,
+ 0x0000002a /* phy 532 */,
+ 0x00000015 /* phy 533 */,
+ 0x00000015 /* phy 534 */,
+ 0x0000002a /* phy 535 */,
+ 0x00000033 /* phy 536 */,
+ 0x0000000c /* phy 537 */,
+ 0x0000000c /* phy 538 */,
+ 0x00000033 /* phy 539 */,
+ 0x00000010 /* phy 540 */,
+ 0x00030000 /* phy 541 */,
+ 0x00000003 /* phy 542 */,
+ 0x000300ce /* phy 543 */,
+ 0x03000300 /* phy 544 */,
+ 0x03000300 /* phy 545 */,
+ 0x00000300 /* phy 546 */,
+ 0xff020010 /* phy 547 */,
+ 0x00000332 /* phy 548 */,
+ 0x00000000 /* phy 549 */,
+ 0x00000000 /* phy 550 */,
+ 0x00000000 /* phy 551 */,
+ 0x00000000 /* phy 552 */,
+ 0x00000000 /* phy 553 */,
+ 0x00000000 /* phy 554 */,
+ 0x00000000 /* phy 555 */,
+ 0x00000000 /* phy 556 */,
+ 0x00000000 /* phy 557 */,
+ 0x00000000 /* phy 558 */,
+ 0x00000000 /* phy 559 */,
+ 0x00000000 /* phy 560 */,
+ 0x00000000 /* phy 561 */,
+ 0x00000000 /* phy 562 */,
+ 0x00000000 /* phy 563 */,
+ 0x00000000 /* phy 564 */,
+ 0x00000000 /* phy 565 */,
+ 0x00000000 /* phy 566 */,
+ 0x00000000 /* phy 567 */,
+ 0x00000000 /* phy 568 */,
+ 0x00000000 /* phy 569 */,
+ 0x00000000 /* phy 570 */,
+ 0x00000000 /* phy 571 */,
+ 0x00000000 /* phy 572 */,
+ 0x00000000 /* phy 573 */,
+ 0x00000000 /* phy 574 */,
+ 0x00000000 /* phy 575 */,
+ 0x00000000 /* phy 576 */,
+ 0x00000000 /* phy 577 */,
+ 0x00000000 /* phy 578 */,
+ 0x00000000 /* phy 579 */,
+ 0x00000000 /* phy 580 */,
+ 0x00000000 /* phy 581 */,
+ 0x00000000 /* phy 582 */,
+ 0x00000000 /* phy 583 */,
+ 0x00000000 /* phy 584 */,
+ 0x00000000 /* phy 585 */,
+ 0x00000000 /* phy 586 */,
+ 0x00000000 /* phy 587 */,
+ 0x00000000 /* phy 588 */,
+ 0x00000000 /* phy 589 */,
+ 0x00000000 /* phy 590 */,
+ 0x00000000 /* phy 591 */,
+ 0x00000000 /* phy 592 */,
+ 0x00000000 /* phy 593 */,
+ 0x00000000 /* phy 594 */,
+ 0x00000000 /* phy 595 */,
+ 0x00000000 /* phy 596 */,
+ 0x00000000 /* phy 597 */,
+ 0x00000000 /* phy 598 */,
+ 0x00000000 /* phy 599 */,
+ 0x00000000 /* phy 600 */,
+ 0x00000000 /* phy 601 */,
+ 0x00000000 /* phy 602 */,
+ 0x00000000 /* phy 603 */,
+ 0x00000000 /* phy 604 */,
+ 0x00000000 /* phy 605 */,
+ 0x00000000 /* phy 606 */,
+ 0x00000000 /* phy 607 */,
+ 0x00000000 /* phy 608 */,
+ 0x00000000 /* phy 609 */,
+ 0x00000000 /* phy 610 */,
+ 0x00000000 /* phy 611 */,
+ 0x00000000 /* phy 612 */,
+ 0x00000000 /* phy 613 */,
+ 0x00000000 /* phy 614 */,
+ 0x00000000 /* phy 615 */,
+ 0x00000000 /* phy 616 */,
+ 0x00000000 /* phy 617 */,
+ 0x00000000 /* phy 618 */,
+ 0x00000000 /* phy 619 */,
+ 0x00000000 /* phy 620 */,
+ 0x00000000 /* phy 621 */,
+ 0x00000000 /* phy 622 */,
+ 0x00000000 /* phy 623 */,
+ 0x00000000 /* phy 624 */,
+ 0x00000000 /* phy 625 */,
+ 0x00000000 /* phy 626 */,
+ 0x00000000 /* phy 627 */,
+ 0x00000000 /* phy 628 */,
+ 0x00000000 /* phy 629 */,
+ 0x00000000 /* phy 630 */,
+ 0x00000000 /* phy 631 */,
+ 0x00000000 /* phy 632 */,
+ 0x00000000 /* phy 633 */,
+ 0x00000000 /* phy 634 */,
+ 0x00000000 /* phy 635 */,
+ 0x00000000 /* phy 636 */,
+ 0x00000000 /* phy 637 */,
+ 0x00000000 /* phy 638 */,
+ 0x00000000 /* phy 639 */,
+ 0x00000000 /* phy 640 */,
+ 0x00000000 /* phy 641 */,
+ 0x00000000 /* phy 642 */,
+ 0x00000000 /* phy 643 */,
+ 0x00000000 /* phy 644 */,
+ 0x00000000 /* phy 645 */,
+ 0x00000000 /* phy 646 */,
+ 0x00010000 /* phy 647 */,
+ 0x00000200 /* phy 648 */,
+ 0x00000000 /* phy 649 */,
+ 0x00000000 /* phy 650 */,
+ 0x00000000 /* phy 651 */,
+ 0x00400320 /* phy 652 */,
+ 0x00000040 /* phy 653 */,
+ 0x0000dcba /* phy 654 */,
+ 0x0300dcba /* phy 655 */,
+ 0x00000200 /* phy 656 */,
+ 0x00000000 /* phy 657 */,
+ 0x00000000 /* phy 658 */,
+ 0x00000000 /* phy 659 */,
+ 0x0000002a /* phy 660 */,
+ 0x00000015 /* phy 661 */,
+ 0x00000015 /* phy 662 */,
+ 0x0000002a /* phy 663 */,
+ 0x00000033 /* phy 664 */,
+ 0x0000000c /* phy 665 */,
+ 0x0000000c /* phy 666 */,
+ 0x00000033 /* phy 667 */,
+ 0x00005432 /* phy 668 */,
+ 0x000f0000 /* phy 669 */,
+ 0x0000000f /* phy 670 */,
+ 0x000300ce /* phy 671 */,
+ 0x03000300 /* phy 672 */,
+ 0x03000300 /* phy 673 */,
+ 0x00000300 /* phy 674 */,
+ 0xff020010 /* phy 675 */,
+ 0x00000332 /* phy 676 */,
+ 0x00000000 /* phy 677 */,
+ 0x00000000 /* phy 678 */,
+ 0x00000000 /* phy 679 */,
+ 0x00000000 /* phy 680 */,
+ 0x00000000 /* phy 681 */,
+ 0x00000000 /* phy 682 */,
+ 0x00000000 /* phy 683 */,
+ 0x00000000 /* phy 684 */,
+ 0x00000000 /* phy 685 */,
+ 0x00000000 /* phy 686 */,
+ 0x00000000 /* phy 687 */,
+ 0x00000000 /* phy 688 */,
+ 0x00000000 /* phy 689 */,
+ 0x00000000 /* phy 690 */,
+ 0x00000000 /* phy 691 */,
+ 0x00000000 /* phy 692 */,
+ 0x00000000 /* phy 693 */,
+ 0x00000000 /* phy 694 */,
+ 0x00000000 /* phy 695 */,
+ 0x00000000 /* phy 696 */,
+ 0x00000000 /* phy 697 */,
+ 0x00000000 /* phy 698 */,
+ 0x00000000 /* phy 699 */,
+ 0x00000000 /* phy 700 */,
+ 0x00000000 /* phy 701 */,
+ 0x00000000 /* phy 702 */,
+ 0x00000000 /* phy 703 */,
+ 0x00000000 /* phy 704 */,
+ 0x00000000 /* phy 705 */,
+ 0x00000000 /* phy 706 */,
+ 0x00000000 /* phy 707 */,
+ 0x00000000 /* phy 708 */,
+ 0x00000000 /* phy 709 */,
+ 0x00000000 /* phy 710 */,
+ 0x00000000 /* phy 711 */,
+ 0x00000000 /* phy 712 */,
+ 0x00000000 /* phy 713 */,
+ 0x00000000 /* phy 714 */,
+ 0x00000000 /* phy 715 */,
+ 0x00000000 /* phy 716 */,
+ 0x00000000 /* phy 717 */,
+ 0x00000000 /* phy 718 */,
+ 0x00000000 /* phy 719 */,
+ 0x00000000 /* phy 720 */,
+ 0x00000000 /* phy 721 */,
+ 0x00000000 /* phy 722 */,
+ 0x00000000 /* phy 723 */,
+ 0x00000000 /* phy 724 */,
+ 0x00000000 /* phy 725 */,
+ 0x00000000 /* phy 726 */,
+ 0x00000000 /* phy 727 */,
+ 0x00000000 /* phy 728 */,
+ 0x00000000 /* phy 729 */,
+ 0x00000000 /* phy 730 */,
+ 0x00000000 /* phy 731 */,
+ 0x00000000 /* phy 732 */,
+ 0x00000000 /* phy 733 */,
+ 0x00000000 /* phy 734 */,
+ 0x00000000 /* phy 735 */,
+ 0x00000000 /* phy 736 */,
+ 0x00000000 /* phy 737 */,
+ 0x00000000 /* phy 738 */,
+ 0x00000000 /* phy 739 */,
+ 0x00000000 /* phy 740 */,
+ 0x00000000 /* phy 741 */,
+ 0x00000000 /* phy 742 */,
+ 0x00000000 /* phy 743 */,
+ 0x00000000 /* phy 744 */,
+ 0x00000000 /* phy 745 */,
+ 0x00000000 /* phy 746 */,
+ 0x00000000 /* phy 747 */,
+ 0x00000000 /* phy 748 */,
+ 0x00000000 /* phy 749 */,
+ 0x00000000 /* phy 750 */,
+ 0x00000000 /* phy 751 */,
+ 0x00000000 /* phy 752 */,
+ 0x00000000 /* phy 753 */,
+ 0x00000000 /* phy 754 */,
+ 0x00000000 /* phy 755 */,
+ 0x00000000 /* phy 756 */,
+ 0x00000000 /* phy 757 */,
+ 0x00000000 /* phy 758 */,
+ 0x00000000 /* phy 759 */,
+ 0x00000000 /* phy 760 */,
+ 0x00000000 /* phy 761 */,
+ 0x00000000 /* phy 762 */,
+ 0x00000000 /* phy 763 */,
+ 0x00000000 /* phy 764 */,
+ 0x00000000 /* phy 765 */,
+ 0x00000000 /* phy 766 */,
+ 0x00000000 /* phy 767 */,
+ 0x00000000 /* phy 768 */,
+ 0x00000000 /* phy 769 */,
+ 0x00000000 /* phy 770 */,
+ 0x00000000 /* phy 771 */,
+ 0x00000000 /* phy 772 */,
+ 0x00000000 /* phy 773 */,
+ 0x00000000 /* phy 774 */,
+ 0x00010000 /* phy 775 */,
+ 0x00000200 /* phy 776 */,
+ 0x00000000 /* phy 777 */,
+ 0x00000000 /* phy 778 */,
+ 0x00000000 /* phy 779 */,
+ 0x00400320 /* phy 780 */,
+ 0x00000040 /* phy 781 */,
+ 0x00000098 /* phy 782 */,
+ 0x03000098 /* phy 783 */,
+ 0x00000200 /* phy 784 */,
+ 0x00000000 /* phy 785 */,
+ 0x00000000 /* phy 786 */,
+ 0x00000000 /* phy 787 */,
+ 0x0000002a /* phy 788 */,
+ 0x00000015 /* phy 789 */,
+ 0x00000015 /* phy 790 */,
+ 0x0000002a /* phy 791 */,
+ 0x00000033 /* phy 792 */,
+ 0x0000000c /* phy 793 */,
+ 0x0000000c /* phy 794 */,
+ 0x00000033 /* phy 795 */,
+ 0x00000010 /* phy 796 */,
+ 0x00030000 /* phy 797 */,
+ 0x00000003 /* phy 798 */,
+ 0x000300ce /* phy 799 */,
+ 0x03000300 /* phy 800 */,
+ 0x03000300 /* phy 801 */,
+ 0x00000300 /* phy 802 */,
+ 0xff020010 /* phy 803 */,
+ 0x00000332 /* phy 804 */,
+ 0x00000000 /* phy 805 */,
+ 0x00000000 /* phy 806 */,
+ 0x00000000 /* phy 807 */,
+ 0x00000000 /* phy 808 */,
+ 0x00000000 /* phy 809 */,
+ 0x00000000 /* phy 810 */,
+ 0x00000000 /* phy 811 */,
+ 0x00000000 /* phy 812 */,
+ 0x00000000 /* phy 813 */,
+ 0x00000000 /* phy 814 */,
+ 0x00000000 /* phy 815 */,
+ 0x00000000 /* phy 816 */,
+ 0x00000000 /* phy 817 */,
+ 0x00000000 /* phy 818 */,
+ 0x00000000 /* phy 819 */,
+ 0x00000000 /* phy 820 */,
+ 0x00000000 /* phy 821 */,
+ 0x00000000 /* phy 822 */,
+ 0x00000000 /* phy 823 */,
+ 0x00000000 /* phy 824 */,
+ 0x00000000 /* phy 825 */,
+ 0x00000000 /* phy 826 */,
+ 0x00000000 /* phy 827 */,
+ 0x00000000 /* phy 828 */,
+ 0x00000000 /* phy 829 */,
+ 0x00000000 /* phy 830 */,
+ 0x00000000 /* phy 831 */,
+ 0x00000000 /* phy 832 */,
+ 0x00000000 /* phy 833 */,
+ 0x00000000 /* phy 834 */,
+ 0x00000000 /* phy 835 */,
+ 0x00000000 /* phy 836 */,
+ 0x00000000 /* phy 837 */,
+ 0x00000000 /* phy 838 */,
+ 0x00000000 /* phy 839 */,
+ 0x00000000 /* phy 840 */,
+ 0x00000000 /* phy 841 */,
+ 0x00000000 /* phy 842 */,
+ 0x00000000 /* phy 843 */,
+ 0x00000000 /* phy 844 */,
+ 0x00000000 /* phy 845 */,
+ 0x00000000 /* phy 846 */,
+ 0x00000000 /* phy 847 */,
+ 0x00000000 /* phy 848 */,
+ 0x00000000 /* phy 849 */,
+ 0x00000000 /* phy 850 */,
+ 0x00000000 /* phy 851 */,
+ 0x00000000 /* phy 852 */,
+ 0x00000000 /* phy 853 */,
+ 0x00000000 /* phy 854 */,
+ 0x00000000 /* phy 855 */,
+ 0x00000000 /* phy 856 */,
+ 0x00000000 /* phy 857 */,
+ 0x00000000 /* phy 858 */,
+ 0x00000000 /* phy 859 */,
+ 0x00000000 /* phy 860 */,
+ 0x00000000 /* phy 861 */,
+ 0x00000000 /* phy 862 */,
+ 0x00000000 /* phy 863 */,
+ 0x00000000 /* phy 864 */,
+ 0x00000000 /* phy 865 */,
+ 0x00000000 /* phy 866 */,
+ 0x00000000 /* phy 867 */,
+ 0x00000000 /* phy 868 */,
+ 0x00000000 /* phy 869 */,
+ 0x00000000 /* phy 870 */,
+ 0x00000000 /* phy 871 */,
+ 0x00000000 /* phy 872 */,
+ 0x00000000 /* phy 873 */,
+ 0x00000000 /* phy 874 */,
+ 0x00000000 /* phy 875 */,
+ 0x00000000 /* phy 876 */,
+ 0x00000000 /* phy 877 */,
+ 0x00000000 /* phy 878 */,
+ 0x00000000 /* phy 879 */,
+ 0x00000000 /* phy 880 */,
+ 0x00000000 /* phy 881 */,
+ 0x00000000 /* phy 882 */,
+ 0x00000000 /* phy 883 */,
+ 0x00000000 /* phy 884 */,
+ 0x00000000 /* phy 885 */,
+ 0x00000000 /* phy 886 */,
+ 0x00000000 /* phy 887 */,
+ 0x00000000 /* phy 888 */,
+ 0x00000000 /* phy 889 */,
+ 0x00000000 /* phy 890 */,
+ 0x00000000 /* phy 891 */,
+ 0x00000000 /* phy 892 */,
+ 0x00000000 /* phy 893 */,
+ 0x00000000 /* phy 894 */,
+ 0x00000000 /* phy 895 */,
+ 0x00000000 /* phy 896 */,
+ 0x00000000 /* phy 897 */,
+ 0x00000000 /* phy 898 */,
+ 0x00000000 /* phy 899 */,
+ 0x00000000 /* phy 900 */,
+ 0x00000000 /* phy 901 */,
+ 0x00000000 /* phy 902 */,
+ 0x00010000 /* phy 903 */,
+ 0x00000200 /* phy 904 */,
+ 0x00000000 /* phy 905 */,
+ 0x00000000 /* phy 906 */,
+ 0x00000000 /* phy 907 */,
+ 0x00400320 /* phy 908 */,
+ 0x00000040 /* phy 909 */,
+ 0x0000dcba /* phy 910 */,
+ 0x0300dcba /* phy 911 */,
+ 0x00000200 /* phy 912 */,
+ 0x00000000 /* phy 913 */,
+ 0x00000000 /* phy 914 */,
+ 0x00000000 /* phy 915 */,
+ 0x0000002a /* phy 916 */,
+ 0x00000015 /* phy 917 */,
+ 0x00000015 /* phy 918 */,
+ 0x0000002a /* phy 919 */,
+ 0x00000033 /* phy 920 */,
+ 0x0000000c /* phy 921 */,
+ 0x0000000c /* phy 922 */,
+ 0x00000033 /* phy 923 */,
+ 0x00005432 /* phy 924 */,
+ 0x000f0000 /* phy 925 */,
+ 0x0000000f /* phy 926 */,
+ 0x000300ce /* phy 927 */,
+ 0x03000300 /* phy 928 */,
+ 0x03000300 /* phy 929 */,
+ 0x00000300 /* phy 930 */,
+ 0xff020010 /* phy 931 */,
+ 0x00000332 /* phy 932 */,
+ 0x00000000 /* phy 933 */,
+ 0x00000000 /* phy 934 */,
+ 0x00000000 /* phy 935 */,
+ 0x00000000 /* phy 936 */,
+ 0x00000000 /* phy 937 */,
+ 0x00000000 /* phy 938 */,
+ 0x00000000 /* phy 939 */,
+ 0x00000000 /* phy 940 */,
+ 0x00000000 /* phy 941 */,
+ 0x00000000 /* phy 942 */,
+ 0x00000000 /* phy 943 */,
+ 0x00000000 /* phy 944 */,
+ 0x00000000 /* phy 945 */,
+ 0x00000000 /* phy 946 */,
+ 0x00000000 /* phy 947 */,
+ 0x00000000 /* phy 948 */,
+ 0x00000000 /* phy 949 */,
+ 0x00000000 /* phy 950 */,
+ 0x00000000 /* phy 951 */,
+ 0x00000000 /* phy 952 */,
+ 0x00000000 /* phy 953 */,
+ 0x00000000 /* phy 954 */,
+ 0x00000000 /* phy 955 */,
+ 0x00000000 /* phy 956 */,
+ 0x00000000 /* phy 957 */,
+ 0x00000000 /* phy 958 */,
+ 0x00000000 /* phy 959 */,
+ 0x00000000 /* phy 960 */,
+ 0x00000000 /* phy 961 */,
+ 0x00000000 /* phy 962 */,
+ 0x00000000 /* phy 963 */,
+ 0x00000000 /* phy 964 */,
+ 0x00000000 /* phy 965 */,
+ 0x00000000 /* phy 966 */,
+ 0x00000000 /* phy 967 */,
+ 0x00000000 /* phy 968 */,
+ 0x00000000 /* phy 969 */,
+ 0x00000000 /* phy 970 */,
+ 0x00000000 /* phy 971 */,
+ 0x00000000 /* phy 972 */,
+ 0x00000000 /* phy 973 */,
+ 0x00000000 /* phy 974 */,
+ 0x00000000 /* phy 975 */,
+ 0x00000000 /* phy 976 */,
+ 0x00000000 /* phy 977 */,
+ 0x00000000 /* phy 978 */,
+ 0x00000000 /* phy 979 */,
+ 0x00000000 /* phy 980 */,
+ 0x00000000 /* phy 981 */,
+ 0x00000000 /* phy 982 */,
+ 0x00000000 /* phy 983 */,
+ 0x00000000 /* phy 984 */,
+ 0x00000000 /* phy 985 */,
+ 0x00000000 /* phy 986 */,
+ 0x00000000 /* phy 987 */,
+ 0x00000000 /* phy 988 */,
+ 0x00000000 /* phy 989 */,
+ 0x00000000 /* phy 990 */,
+ 0x00000000 /* phy 991 */,
+ 0x00000000 /* phy 992 */,
+ 0x00000000 /* phy 993 */,
+ 0x00000000 /* phy 994 */,
+ 0x00000000 /* phy 995 */,
+ 0x00000000 /* phy 996 */,
+ 0x00000000 /* phy 997 */,
+ 0x00000000 /* phy 998 */,
+ 0x00000000 /* phy 999 */,
+ 0x00000000 /* phy 1000 */,
+ 0x00000000 /* phy 1001 */,
+ 0x00000000 /* phy 1002 */,
+ 0x00000000 /* phy 1003 */,
+ 0x00000000 /* phy 1004 */,
+ 0x00000000 /* phy 1005 */,
+ 0x00000000 /* phy 1006 */,
+ 0x00000000 /* phy 1007 */,
+ 0x00000000 /* phy 1008 */,
+ 0x00000000 /* phy 1009 */,
+ 0x00000000 /* phy 1010 */,
+ 0x00000000 /* phy 1011 */,
+ 0x00000000 /* phy 1012 */,
+ 0x00000000 /* phy 1013 */,
+ 0x00000000 /* phy 1014 */,
+ 0x00000000 /* phy 1015 */,
+ 0x00000000 /* phy 1016 */,
+ 0x00000000 /* phy 1017 */,
+ 0x00000000 /* phy 1018 */,
+ 0x00000000 /* phy 1019 */,
+ 0x00000000 /* phy 1020 */,
+ 0x00000000 /* phy 1021 */,
+ 0x00000000 /* phy 1022 */,
+ 0x00000000 /* phy 1023 */,
+ 0x00000000 /* phy 1024 */,
+ 0x00000100 /* phy 1025 */,
+ 0x05000000 /* phy 1026 */,
+ 0x01000000 /* phy 1027 */,
+ 0x00040003 /* phy 1028 */,
+ 0x00320100 /* phy 1029 */,
+ 0x02005502 /* phy 1030 */,
+ 0x00000000 /* phy 1031 */,
+ 0x00000000 /* phy 1032 */,
+ 0x00000000 /* phy 1033 */,
+ 0x00000004 /* phy 1034 */,
+ 0x00000000 /* phy 1035 */,
+ 0x01010100 /* phy 1036 */,
+ 0x00000200 /* phy 1037 */,
+ 0x01000000 /* phy 1038 */,
+ 0x00000001 /* phy 1039 */,
+ 0x00000000 /* phy 1040 */,
+ 0x00000000 /* phy 1041 */,
+ 0x00000000 /* phy 1042 */,
+ 0x00000000 /* phy 1043 */,
+ 0x0000ffff /* phy 1044 */,
+ 0x05221502 /* phy 1045 */,
+ 0x00000522 /* phy 1046 */,
+ 0x00000000 /* phy 1047 */,
+ 0x000f1f01 /* phy 1048 */,
+ 0x0f1f0f1f /* phy 1049 */,
+ 0x0f1f0f1f /* phy 1050 */,
+ 0x03000003 /* phy 1051 */,
+ 0x00000300 /* phy 1052 */,
+ 0x0b221b02 /* phy 1053 */,
+ 0x09240b22 /* phy 1054 */,
+ 0x00000000 /* phy 1055 */,
+ 0x00000000 /* phy 1056 */,
+ 0x05030000 /* phy 1057 */,
+ 0x14000001 /* phy 1058 */,
+ 0x63c0ce00 /* phy 1059 */,
+ 0x0000000e /* phy 1060 */,
+ 0x001f0fc0 /* phy 1061 */,
+ 0x003c37c0 /* phy 1062 */,
+ 0x1f0fc0ce /* phy 1063 */,
+ 0x3c37c0ce /* phy 1064 */,
+ 0x00024410 /* phy 1065 */,
+ 0x00004410 /* phy 1066 */,
+ 0x00004410 /* phy 1067 */,
+ 0x00024410 /* phy 1068 */,
+ 0x00004410 /* phy 1069 */,
+ 0x000fc0ff /* phy 1070 */,
+ 0x00004410 /* phy 1071 */,
+ 0x000fc0ff /* phy 1072 */,
+ 0x00004410 /* phy 1073 */,
+ 0x1f0fc0ce /* phy 1074 */,
+ 0x00004410 /* phy 1075 */,
+ 0x00004011 /* phy 1076 */,
+ 0x00004410 /* phy 1077 */,
+ 0x00000000 /* phy 1078 */,
+ 0x00000000 /* phy 1079 */,
+ 0x00000000 /* phy 1080 */,
+ 0x00000000 /* phy 1081 */,
+ 0x00000078 /* phy 1082 */,
+ 0x00000000 /* phy 1083 */,
+ 0x00010108 /* phy 1084 */,
+ 0x00000000 /* phy 1085 */,
+ 0x03000000 /* phy 1086 */,
+ 0x00000008 /* phy 1087 */,
+ 0x00000000 /* phy 1088 */,
+ 0x00000000 /* phy 1089 */,
+ 0x00000000 /* phy 1090 */,
+ 0x00000000 /* phy 1091 */,
+ 0x00000001 /* phy 1092 */,
+ 0x00000000 /* phy 1093 */,
+ 0x00000000 /* phy 1094 */,
+ 0x01000000 /* phy 1095 */,
+ 0x00000001 /* phy 1096 */,
+ 0x00000000 /* phy 1097 */,
+ 0x07010000 /* phy 1098 */,
+ 0x00000204 /* phy 1099 */,
+ 0x00000000 /* phy 1100 */
+ },
+ {
+ { 83, 0x00020119 } /* setA */,
+ { 90, 0x01020000 } /* setA */,
+ { 92, 0x0c053eff } /* setA */,
+ { 211, 0x00020119 } /* setA */,
+ { 218, 0x01020000 } /* setA */,
+ { 220, 0x0c053eff } /* setA */,
+ { 339, 0x00020119 } /* setA */,
+ { 346, 0x01020000 } /* setA */,
+ { 348, 0x0c053eff } /* setA */,
+ { 467, 0x00020119 } /* setA */,
+ { 474, 0x01020000 } /* setA */,
+ { 476, 0x0c053eff } /* setA */,
+ { 548, 0x0000033e } /* setA */,
+ { 676, 0x0000033e } /* setA */,
+ { 804, 0x0000033e } /* setA */,
+ { 932, 0x0000033e } /* setA */,
+ { 1045, 0x01221102 } /* setA */,
+ { 1046, 0x00000122 } /* setA */,
+ { 1048, 0x000f1f00 } /* setA */,
+ { 0xFFFFFFFF, 0xFFFFFFFF } /* termination */
+ },
+ {
+ { 83, 0x0003011a } /* setB */,
+ { 85, 0x01000000 } /* setB */,
+ { 90, 0x02030000 } /* setB */,
+ { 92, 0x0c073eff } /* setB */,
+ { 211, 0x0003011a } /* setB */,
+ { 213, 0x01000000 } /* setB */,
+ { 218, 0x02030000 } /* setB */,
+ { 220, 0x0c073eff } /* setB */,
+ { 339, 0x0003011a } /* setB */,
+ { 341, 0x01000000 } /* setB */,
+ { 346, 0x02030000 } /* setB */,
+ { 348, 0x0c073eff } /* setB */,
+ { 467, 0x0003011a } /* setB */,
+ { 469, 0x01000000 } /* setB */,
+ { 474, 0x02030000 } /* setB */,
+ { 476, 0x0c073eff } /* setB */,
+ { 548, 0x0000033e } /* setB */,
+ { 676, 0x0000033e } /* setB */,
+ { 804, 0x0000033e } /* setB */,
+ { 932, 0x0000033e } /* setB */,
+ { 1045, 0x01221102 } /* setB */,
+ { 1046, 0x00000122 } /* setB */,
+ { 1048, 0x000f1f00 } /* setB */,
+ { 0xFFFFFFFF, 0xFFFFFFFF } /* termination */
+ },
+};
+#endif
+
diff --git a/drivers/misc/mnh/mnh-ddr-33-300-600-400.h b/drivers/misc/mnh/mnh-ddr-33-300-600-400.h
deleted file mode 100644
index e2c6920..0000000
--- a/drivers/misc/mnh/mnh-ddr-33-300-600-400.h
+++ /dev/null
@@ -1,1865 +0,0 @@
-/* regconfig tool: v1.0 */
-#ifndef _MNH_DDR_33_300_600_400_
-#define _MNH_DDR_33_300_600_400_
-#include "mnh-ddr.h"
-struct mnh_ddr_reg_config mnh_ddr_33_300_600_400 = {
- {
- 0x00000b00 /* 0 */,
- 0x00000000 /* 1 */,
- 0x00000000 /* 2 */,
- 0x00000000 /* 3 */,
- 0x00000000 /* 4 */,
- 0x00000000 /* 5 */,
- 0x00003416 /* 6 */,
- 0x000208d6 /* 7 */,
- 0x00000005 /* 8 */,
- 0x00000086 /* 9 */,
- 0x0001d4a9 /* 10 */,
- 0x00124e91 /* 11 */,
- 0x00000005 /* 12 */,
- 0x000004b0 /* 13 */,
- 0x0003a9e1 /* 14 */,
- 0x0024a2c1 /* 15 */,
- 0x00000005 /* 16 */,
- 0x00000961 /* 17 */,
- 0x00027100 /* 18 */,
- 0x00186a00 /* 19 */,
- 0x00000005 /* 20 */,
- 0x01000640 /* 21 */,
- 0x01010000 /* 22 */,
- 0x00030100 /* 23 */,
- 0x03020100 /* 24 */,
- 0x0000000e /* 25 */,
- 0x00000022 /* 26 */,
- 0x081c040c /* 27 */,
- 0x081c0c30 /* 28 */,
- 0x07040804 /* 29 */,
- 0x03040903 /* 30 */,
- 0x091a2506 /* 31 */,
- 0x490d180b /* 32 */,
- 0x31160e33 /* 33 */,
- 0x09223108 /* 34 */,
- 0x0820200f /* 35 */,
- 0x00000a0a /* 36 */,
- 0x04001248 /* 37 */,
- 0x0a0a0804 /* 38 */,
- 0x0500a47f /* 39 */,
- 0x0d0d0a05 /* 40 */,
- 0x0a014931 /* 41 */,
- 0x0a0a080a /* 42 */,
- 0x0600db60 /* 43 */,
- 0x02030406 /* 44 */,
- 0x0b040400 /* 45 */,
- 0x0f17160c /* 46 */,
- 0x000a0810 /* 47 */,
- 0x0a140011 /* 48 */,
- 0x0201010a /* 49 */,
- 0x00010002 /* 50 */,
- 0x1f2d1708 /* 51 */,
- 0x1a0d0404 /* 52 */,
- 0x01010011 /* 53 */,
- 0x01000000 /* 54 */,
- 0x00000903 /* 55 */,
- 0x004e00fc /* 56 */,
- 0x009d091b /* 57 */,
- 0x00681241 /* 58 */,
- 0x00000c28 /* 59 */,
- 0x00050003 /* 60 */,
- 0x0006000a /* 61 */,
- 0x1823130a /* 62 */,
- 0x0a000001 /* 63 */,
- 0x00020103 /* 64 */,
- 0x01030a00 /* 65 */,
- 0x11000003 /* 66 */,
- 0x00020103 /* 67 */,
- 0x01030c00 /* 68 */,
- 0x000a0003 /* 69 */,
- 0x00530005 /* 70 */,
- 0x00a6000a /* 71 */,
- 0x006e0006 /* 72 */,
- 0x03050505 /* 73 */,
- 0x03010302 /* 74 */,
- 0x09050505 /* 75 */,
- 0x03020502 /* 76 */,
- 0x13070a07 /* 77 */,
- 0x03030a02 /* 78 */,
- 0x0c050605 /* 79 */,
- 0x03020602 /* 80 */,
- 0x03010000 /* 81 */,
- 0x00010000 /* 82 */,
- 0x00000000 /* 83 */,
- 0x00000000 /* 84 */,
- 0x00000000 /* 85 */,
- 0x01000000 /* 86 */,
- 0x80104002 /* 87 */,
- 0x00040003 /* 88 */,
- 0x00040005 /* 89 */,
- 0x00030000 /* 90 */,
- 0x00050004 /* 91 */,
- 0x00000004 /* 92 */,
- 0x00040003 /* 93 */,
- 0x00040005 /* 94 */,
- 0x00030000 /* 95 */,
- 0x00050004 /* 96 */,
- 0x00000004 /* 97 */,
- 0x01f803f0 /* 98 */,
- 0x246c0000 /* 99 */,
- 0x00001236 /* 100 */,
- 0x24824904 /* 101 */,
- 0x30a00000 /* 102 */,
- 0x00001850 /* 103 */,
- 0x00000000 /* 104 */,
- 0x00000000 /* 105 */,
- 0x00000000 /* 106 */,
- 0x00000000 /* 107 */,
- 0x00000000 /* 108 */,
- 0x00000000 /* 109 */,
- 0x03000000 /* 110 */,
- 0x0a030503 /* 111 */,
- 0x00030603 /* 112 */,
- 0x02030200 /* 113 */,
- 0x00070703 /* 114 */,
- 0x03020302 /* 115 */,
- 0x02000707 /* 116 */,
- 0x07030203 /* 117 */,
- 0x03020007 /* 118 */,
- 0x07070302 /* 119 */,
- 0x0000030e /* 120 */,
- 0x00070004 /* 121 */,
- 0x00000000 /* 122 */,
- 0x00000000 /* 123 */,
- 0x00000000 /* 124 */,
- 0x00000000 /* 125 */,
- 0x00000000 /* 126 */,
- 0x00000000 /* 127 */,
- 0x00000000 /* 128 */,
- 0x00000000 /* 129 */,
- 0x0400c001 /* 130 */,
- 0xc00400c0 /* 131 */,
- 0x00c00400 /* 132 */,
- 0x01000400 /* 133 */,
- 0x00000c00 /* 134 */,
- 0x00000000 /* 135 */,
- 0x00000001 /* 136 */,
- 0x00000002 /* 137 */,
- 0x00000003 /* 138 */,
- 0x0000100e /* 139 */,
- 0x00000000 /* 140 */,
- 0x00000000 /* 141 */,
- 0x00000000 /* 142 */,
- 0x00000000 /* 143 */,
- 0x000e0000 /* 144 */,
- 0x00110007 /* 145 */,
- 0x00110404 /* 146 */,
- 0x003c0078 /* 147 */,
- 0x05050096 /* 148 */,
- 0x00f10096 /* 149 */,
- 0x012d0079 /* 150 */,
- 0x012d0a0a /* 151 */,
- 0x005000a0 /* 152 */,
- 0x060600c8 /* 153 */,
- 0x000000c8 /* 154 */,
- 0x00000000 /* 155 */,
- 0x00000000 /* 156 */,
- 0x00000000 /* 157 */,
- 0x00060000 /* 158 */,
- 0x24461226 /* 159 */,
- 0x31001226 /* 160 */,
- 0x00313131 /* 161 */,
- 0x00000000 /* 162 */,
- 0x4d4d4d4d /* 163 */,
- 0x4d4d4dc0 /* 164 */,
- 0x0000004d /* 165 */,
- 0x00000000 /* 166 */,
- 0x06060606 /* 167 */,
- 0x01000000 /* 168 */,
- 0x00000001 /* 169 */,
- 0x00000000 /* 170 */,
- 0x01000000 /* 171 */,
- 0x00000001 /* 172 */,
- 0x00000000 /* 173 */,
- 0x00000000 /* 174 */,
- 0x00000000 /* 175 */,
- 0x00000000 /* 176 */,
- 0x00000000 /* 177 */,
- 0x00000000 /* 178 */,
- 0x00000000 /* 179 */,
- 0x00000000 /* 180 */,
- 0x00000000 /* 181 */,
- 0x00000000 /* 182 */,
- 0x11000000 /* 183 */,
- 0x000c1815 /* 184 */,
- 0x00000000 /* 185 */,
- 0x00000000 /* 186 */,
- 0x00000000 /* 187 */,
- 0x00000000 /* 188 */,
- 0x00000000 /* 189 */,
- 0x00000000 /* 190 */,
- 0x00000000 /* 191 */,
- 0x00000000 /* 192 */,
- 0x00000000 /* 193 */,
- 0x00000000 /* 194 */,
- 0x00000000 /* 195 */,
- 0x00000000 /* 196 */,
- 0x00000000 /* 197 */,
- 0x00000000 /* 198 */,
- 0x00000000 /* 199 */,
- 0x00000000 /* 200 */,
- 0x00000000 /* 201 */,
- 0x00000000 /* 202 */,
- 0x00020003 /* 203 */,
- 0x00400100 /* 204 */,
- 0x00080043 /* 205 */,
- 0x01000200 /* 206 */,
- 0x02580040 /* 207 */,
- 0x00020012 /* 208 */,
- 0x00400100 /* 209 */,
- 0x002504b1 /* 210 */,
- 0x01000200 /* 211 */,
- 0x03200040 /* 212 */,
- 0x00000018 /* 213 */,
- 0x001e0004 /* 214 */,
- 0x0028003d /* 215 */,
- 0x02010000 /* 216 */,
- 0xff0b0000 /* 217 */,
- 0x010101ff /* 218 */,
- 0x01010101 /* 219 */,
- 0x00010b01 /* 220 */,
- 0x01010000 /* 221 */,
- 0x00000004 /* 222 */,
- 0x01000001 /* 223 */,
- 0x02020202 /* 224 */,
- 0x00000001 /* 225 */,
- 0x00000000 /* 226 */,
- 0x00000000 /* 227 */,
- 0x00000000 /* 228 */,
- 0x00000000 /* 229 */,
- 0x00000000 /* 230 */,
- 0x00000000 /* 231 */,
- 0x00000000 /* 232 */,
- 0x00000000 /* 233 */,
- 0x00000000 /* 234 */,
- 0x00000000 /* 235 */,
- 0x00000000 /* 236 */,
- 0x00000000 /* 237 */,
- 0x00000000 /* 238 */,
- 0x00000000 /* 239 */,
- 0x00000000 /* 240 */,
- 0x00000000 /* 241 */,
- 0x00000000 /* 242 */,
- 0x00000000 /* 243 */,
- 0x00000000 /* 244 */,
- 0x00000000 /* 245 */,
- 0x00000000 /* 246 */,
- 0x00000000 /* 247 */,
- 0x00010001 /* 248 */,
- 0x01010001 /* 249 */,
- 0x00010101 /* 250 */,
- 0x08080300 /* 251 */,
- 0x0800080a /* 252 */,
- 0x000b0b0c /* 253 */,
- 0x05030100 /* 254 */,
- 0x00000003 /* 255 */,
- 0x00000000 /* 256 */,
- 0x00010000 /* 257 */,
- 0x00280d00 /* 258 */,
- 0x00000001 /* 259 */,
- 0x00000001 /* 260 */,
- 0x00000000 /* 261 */,
- 0x00000000 /* 262 */,
- 0x00000000 /* 263 */,
- 0x00000000 /* 264 */,
- 0x00000000 /* 265 */,
- 0x00000000 /* 266 */,
- 0x00000000 /* 267 */,
- 0x00000000 /* 268 */,
- 0x00000000 /* 269 */,
- 0x00000000 /* 270 */,
- 0x01000000 /* 271 */,
- 0x00000001 /* 272 */,
- 0x00010100 /* 273 */,
- 0x00000000 /* 274 */,
- 0x00000000 /* 275 */,
- 0x00000000 /* 276 */,
- 0x00000000 /* 277 */,
- 0x00000000 /* 278 */,
- 0x00000000 /* 279 */,
- 0x00000000 /* 280 */,
- 0x00000000 /* 281 */,
- 0x00000000 /* 282 */,
- 0x00000000 /* 283 */,
- 0x00000000 /* 284 */,
- 0x00000000 /* 285 */,
- 0x00000000 /* 286 */,
- 0x00000000 /* 287 */,
- 0x00000000 /* 288 */,
- 0x00000000 /* 289 */,
- 0x00000000 /* 290 */,
- 0x00000000 /* 291 */,
- 0x00000000 /* 292 */,
- 0x00000000 /* 293 */,
- 0x00000000 /* 294 */,
- 0x000556aa /* 295 */,
- 0x000aaaaa /* 296 */,
- 0x000aa955 /* 297 */,
- 0x00055555 /* 298 */,
- 0x000b3133 /* 299 */,
- 0x0004cd33 /* 300 */,
- 0x0004cecc /* 301 */,
- 0x000b32cc /* 302 */,
- 0x00010300 /* 303 */,
- 0x00000100 /* 304 */,
- 0x00000000 /* 305 */,
- 0x00000000 /* 306 */,
- 0x00000000 /* 307 */,
- 0x00000000 /* 308 */,
- 0x00000000 /* 309 */,
- 0x00000000 /* 310 */,
- 0x00000000 /* 311 */,
- 0x00000000 /* 312 */,
- 0x00000000 /* 313 */,
- 0x00000000 /* 314 */,
- 0x00000101 /* 315 */,
- 0x00010100 /* 316 */,
- 0x00000001 /* 317 */,
- 0x00000000 /* 318 */,
- 0x0003ffff /* 319 */,
- 0x00000000 /* 320 */,
- 0x0003ffff /* 321 */,
- 0x00000000 /* 322 */,
- 0x0003ffff /* 323 */,
- 0x00000000 /* 324 */,
- 0x0003ffff /* 325 */,
- 0x00000000 /* 326 */,
- 0x0003ffff /* 327 */,
- 0x00000000 /* 328 */,
- 0x0003ffff /* 329 */,
- 0x00000000 /* 330 */,
- 0x0003ffff /* 331 */,
- 0x00000000 /* 332 */,
- 0x0003ffff /* 333 */,
- 0x00000000 /* 334 */,
- 0x0003ffff /* 335 */,
- 0x00000000 /* 336 */,
- 0x0003ffff /* 337 */,
- 0x00000000 /* 338 */,
- 0x0003ffff /* 339 */,
- 0x00000000 /* 340 */,
- 0x0003ffff /* 341 */,
- 0x00000000 /* 342 */,
- 0x0003ffff /* 343 */,
- 0x00000000 /* 344 */,
- 0x0003ffff /* 345 */,
- 0x00000000 /* 346 */,
- 0x0003ffff /* 347 */,
- 0x00000000 /* 348 */,
- 0x0003ffff /* 349 */,
- 0x00000000 /* 350 */,
- 0x0003ffff /* 351 */,
- 0x00000000 /* 352 */,
- 0x0003ffff /* 353 */,
- 0x00000000 /* 354 */,
- 0x0003ffff /* 355 */,
- 0x00000000 /* 356 */,
- 0x0003ffff /* 357 */,
- 0x00000000 /* 358 */,
- 0x0003ffff /* 359 */,
- 0x00000000 /* 360 */,
- 0x0003ffff /* 361 */,
- 0x00000000 /* 362 */,
- 0x0003ffff /* 363 */,
- 0x00000000 /* 364 */,
- 0x0003ffff /* 365 */,
- 0x00000000 /* 366 */,
- 0x0003ffff /* 367 */,
- 0x00000000 /* 368 */,
- 0x0003ffff /* 369 */,
- 0x00000000 /* 370 */,
- 0x0003ffff /* 371 */,
- 0x00000000 /* 372 */,
- 0x0003ffff /* 373 */,
- 0x00000000 /* 374 */,
- 0x0003ffff /* 375 */,
- 0x00000000 /* 376 */,
- 0x0003ffff /* 377 */,
- 0x00000000 /* 378 */,
- 0x0003ffff /* 379 */,
- 0x00000000 /* 380 */,
- 0x0303ffff /* 381 */,
- 0xffffffff /* 382 */,
- 0x00030000 /* 383 */,
- 0xffffffff /* 384 */,
- 0x00030000 /* 385 */,
- 0xffffffff /* 386 */,
- 0x00030000 /* 387 */,
- 0xffffffff /* 388 */,
- 0x00030000 /* 389 */,
- 0xffffffff /* 390 */,
- 0x00030000 /* 391 */,
- 0xffffffff /* 392 */,
- 0x00030000 /* 393 */,
- 0xffffffff /* 394 */,
- 0x00030000 /* 395 */,
- 0xffffffff /* 396 */,
- 0x00030000 /* 397 */,
- 0xffffffff /* 398 */,
- 0x00030000 /* 399 */,
- 0xffffffff /* 400 */,
- 0x00030000 /* 401 */,
- 0xffffffff /* 402 */,
- 0x00030000 /* 403 */,
- 0xffffffff /* 404 */,
- 0x00030000 /* 405 */,
- 0xffffffff /* 406 */,
- 0x00030000 /* 407 */,
- 0xffffffff /* 408 */,
- 0x00030000 /* 409 */,
- 0xffffffff /* 410 */,
- 0x00030000 /* 411 */,
- 0xffffffff /* 412 */,
- 0x00030000 /* 413 */,
- 0xffffffff /* 414 */,
- 0x00030000 /* 415 */,
- 0xffffffff /* 416 */,
- 0x00030000 /* 417 */,
- 0xffffffff /* 418 */,
- 0x00030000 /* 419 */,
- 0xffffffff /* 420 */,
- 0x00030000 /* 421 */,
- 0xffffffff /* 422 */,
- 0x00030000 /* 423 */,
- 0xffffffff /* 424 */,
- 0x00030000 /* 425 */,
- 0xffffffff /* 426 */,
- 0x00030000 /* 427 */,
- 0xffffffff /* 428 */,
- 0x00030000 /* 429 */,
- 0xffffffff /* 430 */,
- 0x00030000 /* 431 */,
- 0xffffffff /* 432 */,
- 0x00030000 /* 433 */,
- 0xffffffff /* 434 */,
- 0x00030000 /* 435 */,
- 0xffffffff /* 436 */,
- 0x00030000 /* 437 */,
- 0xffffffff /* 438 */,
- 0x00030000 /* 439 */,
- 0xffffffff /* 440 */,
- 0x00030000 /* 441 */,
- 0xffffffff /* 442 */,
- 0x00030000 /* 443 */,
- 0xffffffff /* 444 */,
- 0x00000000 /* 445 */,
- 0x02020200 /* 446 */,
- 0x00640002 /* 447 */,
- 0x01010101 /* 448 */,
- 0x00006401 /* 449 */,
- 0x00000000 /* 450 */,
- 0x1a140000 /* 451 */,
- 0x00001c22 /* 452 */,
- 0x0001f808 /* 453 */,
- 0x00000000 /* 454 */,
- 0x00000000 /* 455 */,
- 0x000001f8 /* 456 */,
- 0x000013b0 /* 457 */,
- 0x12360205 /* 458 */,
- 0x00000000 /* 459 */,
- 0x00000000 /* 460 */,
- 0x00001236 /* 461 */,
- 0x0000b61c /* 462 */,
- 0x2482060d /* 463 */,
- 0x00000000 /* 464 */,
- 0x00000000 /* 465 */,
- 0x00002482 /* 466 */,
- 0x00016d14 /* 467 */,
- 0x18500a15 /* 468 */,
- 0x00000000 /* 469 */,
- 0x00000000 /* 470 */,
- 0x00001850 /* 471 */,
- 0x0000f320 /* 472 */,
- 0x0202060c /* 473 */,
- 0x03020202 /* 474 */,
- 0x00001803 /* 475 */,
- 0x00000000 /* 476 */,
- 0x00000000 /* 477 */,
- 0x00001403 /* 478 */,
- 0x000007d0 /* 479 */,
- 0x00000000 /* 480 */,
- 0x00000000 /* 481 */,
- 0x00030000 /* 482 */,
- 0x00070019 /* 483 */,
- 0x000c001e /* 484 */,
- 0x00120024 /* 485 */,
- 0x000e0020 /* 486 */,
- 0x00000000 /* 487 */,
- 0x00000000 /* 488 */,
- 0x02000000 /* 489 */,
- 0x05020202 /* 490 */,
- 0x17050d01 /* 491 */,
- 0x05050d09 /* 492 */,
- 0x01010001 /* 493 */,
- 0x01010101 /* 494 */,
- 0x01000101 /* 495 */,
- 0x01000100 /* 496 */,
- 0x00010001 /* 497 */,
- 0x00010002 /* 498 */,
- 0x00020100 /* 499 */,
- 0x00020002 /* 500 */,
- 0x00000000 /* 501 */,
- 0x00000400 /* 502 */,
- 0x00240018 /* 503 */,
- 0x0049011c /* 504 */,
- 0x00300241 /* 505 */,
- 0x0040017e /* 506 */,
- 0x00120103 /* 507 */,
- 0x00000002 /* 508 */,
- 0x00000200 /* 509 */,
- 0x00000200 /* 510 */,
- 0x00000200 /* 511 */,
- 0x00000200 /* 512 */,
- 0x00000200 /* 513 */,
- 0x00000200 /* 514 */,
- 0x00000200 /* 515 */,
- 0x00000200 /* 516 */,
- 0x00000200 /* 517 */,
- 0x00000200 /* 518 */,
- 0x00000200 /* 519 */,
- 0x00000200 /* 520 */,
- 0x00000200 /* 521 */,
- 0x00000200 /* 522 */,
- 0x00000200 /* 523 */,
- 0x00000200 /* 524 */,
- 0x00000001 /* 525 */,
- 0x000f0000 /* 526 */,
- 0x000f000f /* 527 */,
- 0x000f000f /* 528 */,
- 0x000f000f /* 529 */,
- 0x000f000f /* 530 */,
- 0x000f000f /* 531 */,
- 0x000f000f /* 532 */,
- 0x000f000f /* 533 */,
- 0x000f000f /* 534 */,
- 0x000f000f /* 535 */,
- 0x000f000f /* 536 */,
- 0x000f000f /* 537 */,
- 0x000f000f /* 538 */,
- 0x000f000f /* 539 */,
- 0x000f000f /* 540 */,
- 0x000f000f /* 541 */,
- 0x000f000f /* 542 */,
- 0x000f000f /* 543 */,
- 0x000f000f /* 544 */,
- 0x000f000f /* 545 */,
- 0x000f000f /* 546 */,
- 0x000f000f /* 547 */,
- 0x000f000f /* 548 */,
- 0x000f000f /* 549 */,
- 0x000f000f /* 550 */,
- 0x000f000f /* 551 */,
- 0x000f000f /* 552 */,
- 0x000f000f /* 553 */,
- 0x000f000f /* 554 */,
- 0x000f000f /* 555 */,
- 0x000f000f /* 556 */,
- 0x000f000f /* 557 */,
- 0x0000000f /* 558 */
- },
- {
- 0x00000b00 /* 0 */,
- 0x00000101 /* 1 */,
- 0x00640000 /* 2 */,
- 0x00030a00 /* 3 */,
- 0x000001f8 /* 4 */,
- 0x000246c0 /* 5 */,
- 0x00001236 /* 6 */,
- 0x00049040 /* 7 */,
- 0x00002482 /* 8 */,
- 0x00030a00 /* 9 */,
- 0x00001850 /* 10 */,
- 0x000001f8 /* 11 */,
- 0x00000200 /* 12 */,
- 0x00000200 /* 13 */,
- 0x00000200 /* 14 */,
- 0x00000200 /* 15 */,
- 0x00001236 /* 16 */,
- 0x00000200 /* 17 */,
- 0x00000200 /* 18 */,
- 0x00000200 /* 19 */,
- 0x00000200 /* 20 */,
- 0x00002482 /* 21 */,
- 0x00000200 /* 22 */,
- 0x00000200 /* 23 */,
- 0x00000200 /* 24 */,
- 0x00000200 /* 25 */,
- 0x00001850 /* 26 */,
- 0x00000200 /* 27 */,
- 0x00000200 /* 28 */,
- 0x00000200 /* 29 */,
- 0x00000200 /* 30 */,
- 0x00010000 /* 31 */,
- 0x0000000f /* 32 */,
- 0x01000003 /* 33 */,
- 0x00000000 /* 34 */,
- 0x00000000 /* 35 */,
- 0x00000000 /* 36 */,
- 0x00000000 /* 37 */,
- 0x00000000 /* 38 */,
- 0x00000000 /* 39 */,
- 0x00000000 /* 40 */,
- 0x00000000 /* 41 */,
- 0x00000000 /* 42 */,
- 0x00000000 /* 43 */,
- 0x00000000 /* 44 */,
- 0x00000000 /* 45 */,
- 0x00000000 /* 46 */,
- 0x00000000 /* 47 */,
- 0x00000000 /* 48 */,
- 0x01000000 /* 49 */,
- 0x29030001 /* 50 */,
- 0x08384737 /* 51 */,
- 0x00010002 /* 52 */,
- 0x00000009 /* 53 */,
- 0x000000fc /* 54 */,
- 0x0000004e /* 55 */,
- 0x0000091b /* 56 */,
- 0x0000009d /* 57 */,
- 0x00001241 /* 58 */,
- 0x00000068 /* 59 */,
- 0x00000c28 /* 60 */,
- 0x00000000 /* 61 */,
- 0x00000000 /* 62 */,
- 0x00000000 /* 63 */,
- 0x00000000 /* 64 */,
- 0x00000000 /* 65 */,
- 0x00000000 /* 66 */,
- 0x00000000 /* 67 */,
- 0x00000000 /* 68 */,
- 0x00000000 /* 69 */,
- 0x01000000 /* 70 */,
- 0x04040401 /* 71 */,
- 0x0a000004 /* 72 */,
- 0x01010128 /* 73 */,
- 0x00000001 /* 74 */,
- 0x00000000 /* 75 */,
- 0x00030003 /* 76 */,
- 0x00000018 /* 77 */,
- 0x00000000 /* 78 */,
- 0x00000000 /* 79 */,
- 0x00060002 /* 80 */,
- 0x00010001 /* 81 */,
- 0x01010001 /* 82 */,
- 0x02010100 /* 83 */,
- 0x05030002 /* 84 */,
- 0x0a040507 /* 85 */,
- 0x00000a12 /* 86 */,
- 0x00000000 /* 87 */,
- 0x00000000 /* 88 */,
- 0x00000000 /* 89 */,
- 0x001e0303 /* 90 */,
- 0x000007d0 /* 91 */,
- 0x01010300 /* 92 */,
- 0x01010101 /* 93 */,
- 0x00000101 /* 94 */,
- 0x00000000 /* 95 */,
- 0x00000000 /* 96 */,
- 0x01000000 /* 97 */,
- 0x00000101 /* 98 */,
- 0x55555a5a /* 99 */,
- 0x55555a5a /* 100 */,
- 0x55555a5a /* 101 */,
- 0x55555a5a /* 102 */,
- 0x0d050001 /* 103 */,
- 0x02000c15 /* 104 */,
- 0x00060a06 /* 105 */,
- 0x0d170d05 /* 106 */,
- 0x02020202 /* 107 */,
- 0x00000000 /* 108 */,
- 0x00000003 /* 109 */,
- 0x00170300 /* 110 */,
- 0x00070019 /* 111 */,
- 0x000c001e /* 112 */,
- 0x00120024 /* 113 */,
- 0x000e0020 /* 114 */,
- 0x00000000 /* 115 */,
- 0x00000000 /* 116 */,
- 0x01010100 /* 117 */,
- 0x00000001 /* 118 */,
- 0x010a140a /* 119 */,
- 0x00010011 /* 120 */,
- 0x00020096 /* 121 */,
- 0x0002012d /* 122 */,
- 0x010a00c8 /* 123 */,
- 0x00120100 /* 124 */,
- 0x01000012 /* 125 */,
- 0x00970097 /* 126 */,
- 0x012e0100 /* 127 */,
- 0x0100012e /* 128 */,
- 0x00c900c9 /* 129 */,
- 0x02044d4d /* 130 */,
- 0x06051001 /* 131 */,
- 0x02070807 /* 132 */,
- 0x00c00002 /* 133 */,
- 0x00c01000 /* 134 */,
- 0x00c01000 /* 135 */,
- 0x00c01000 /* 136 */,
- 0x04041000 /* 137 */,
- 0x0e020100 /* 138 */,
- 0x01001015 /* 139 */,
- 0x00001901 /* 140 */,
- 0x0026001d /* 141 */,
- 0x005f001d /* 142 */,
- 0x4d030000 /* 143 */,
- 0x0102044d /* 144 */,
- 0x34000000 /* 145 */,
- 0x00000000 /* 146 */,
- 0x00000000 /* 147 */,
- 0x01010000 /* 148 */,
- 0x00000101 /* 149 */,
- 0x31000600 /* 150 */,
- 0x064d4d00 /* 151 */,
- 0x00311226 /* 152 */,
- 0x46064d4d /* 153 */,
- 0x4d003124 /* 154 */,
- 0x1226064d /* 155 */,
- 0x4d4d0031 /* 156 */,
- 0x00060006 /* 157 */,
- 0x4d4d0031 /* 158 */,
- 0x31122600 /* 159 */,
- 0x064d4d00 /* 160 */,
- 0x00312446 /* 161 */,
- 0x26064d4d /* 162 */,
- 0x4d003112 /* 163 */,
- 0x0000064d /* 164 */,
- 0x00001101 /* 165 */,
- 0x012d0096 /* 166 */,
- 0x010a00c8 /* 167 */,
- 0x0d11060a /* 168 */,
- 0x0002080c /* 169 */,
- 0x00020002 /* 170 */,
- 0x00020002 /* 171 */,
- 0x00000000 /* 172 */,
- 0x00000000 /* 173 */,
- 0x04000000 /* 174 */,
- 0x00080100 /* 175 */,
- 0x000001f8 /* 176 */,
- 0x000013b0 /* 177 */,
- 0x00001236 /* 178 */,
- 0x0000b61c /* 179 */,
- 0x00002482 /* 180 */,
- 0x00016d14 /* 181 */,
- 0x00001850 /* 182 */,
- 0x0000f320 /* 183 */,
- 0x00010000 /* 184 */,
- 0x02000101 /* 185 */,
- 0x01030001 /* 186 */,
- 0x00010400 /* 187 */,
- 0x06000105 /* 188 */,
- 0x01070001 /* 189 */,
- 0x00000000 /* 190 */,
- 0x00000000 /* 191 */
- },
- {
- 0x76543210 /* 0 */,
- 0x0004f008 /* 1 */,
- 0x00020133 /* 2 */,
- 0x00000000 /* 3 */,
- 0x00000000 /* 4 */,
- 0x00010000 /* 5 */,
- 0x01cece0e /* 6 */,
- 0x02cece0e /* 7 */,
- 0x00010000 /* 8 */,
- 0x00000004 /* 9 */,
- 0x00000001 /* 10 */,
- 0x00000000 /* 11 */,
- 0x00000000 /* 12 */,
- 0x00000100 /* 13 */,
- 0x001700c0 /* 14 */,
- 0x020100cc /* 15 */,
- 0x00030066 /* 16 */,
- 0x00000000 /* 17 */,
- 0x00000000 /* 18 */,
- 0x00000000 /* 19 */,
- 0x00000000 /* 20 */,
- 0x00000000 /* 21 */,
- 0x00000000 /* 22 */,
- 0x00000000 /* 23 */,
- 0x00000000 /* 24 */,
- 0x04080000 /* 25 */,
- 0x04080400 /* 26 */,
- 0x10000000 /* 27 */,
- 0x0c008007 /* 28 */,
- 0x00000100 /* 29 */,
- 0x00000100 /* 30 */,
- 0x55555555 /* 31 */,
- 0xaaaaaaaa /* 32 */,
- 0x55555555 /* 33 */,
- 0xaaaaaaaa /* 34 */,
- 0x00015555 /* 35 */,
- 0x00000000 /* 36 */,
- 0x00000000 /* 37 */,
- 0x00000000 /* 38 */,
- 0x00000000 /* 39 */,
- 0x00000000 /* 40 */,
- 0x00000000 /* 41 */,
- 0x00000000 /* 42 */,
- 0x00000000 /* 43 */,
- 0x00000000 /* 44 */,
- 0x00000000 /* 45 */,
- 0x00000000 /* 46 */,
- 0x00000000 /* 47 */,
- 0x00000000 /* 48 */,
- 0x00000000 /* 49 */,
- 0x00000000 /* 50 */,
- 0x00000000 /* 51 */,
- 0x00000000 /* 52 */,
- 0x00000000 /* 53 */,
- 0x00000000 /* 54 */,
- 0x00000004 /* 55 */,
- 0x00000020 /* 56 */,
- 0x00000000 /* 57 */,
- 0x00000000 /* 58 */,
- 0x00000000 /* 59 */,
- 0x00000000 /* 60 */,
- 0x00000000 /* 61 */,
- 0x00000000 /* 62 */,
- 0x00000000 /* 63 */,
- 0x02800280 /* 64 */,
- 0x02800280 /* 65 */,
- 0x02800280 /* 66 */,
- 0x02800280 /* 67 */,
- 0x00000280 /* 68 */,
- 0x00000000 /* 69 */,
- 0x00000000 /* 70 */,
- 0x00000000 /* 71 */,
- 0x00000000 /* 72 */,
- 0x00000000 /* 73 */,
- 0x00800080 /* 74 */,
- 0x00800080 /* 75 */,
- 0x00800080 /* 76 */,
- 0x00800080 /* 77 */,
- 0x00800080 /* 78 */,
- 0x00800080 /* 79 */,
- 0x00800080 /* 80 */,
- 0x00800080 /* 81 */,
- 0x00800080 /* 82 */,
- 0x00020019 /* 83 */,
- 0x000001d0 /* 84 */,
- 0x00000000 /* 85 */,
- 0x00000200 /* 86 */,
- 0x00000004 /* 87 */,
- 0x51816152 /* 88 */,
- 0xc0c08161 /* 89 */,
- 0x00010000 /* 90 */,
- 0x02001000 /* 91 */,
- 0x0c053eff /* 92 */,
- 0x000f0c18 /* 93 */,
- 0x01000140 /* 94 */,
- 0x00000c20 /* 95 */,
- 0x00000000 /* 96 */,
- 0x00000000 /* 97 */,
- 0x00000000 /* 98 */,
- 0x00000000 /* 99 */,
- 0x00000000 /* 100 */,
- 0x00000000 /* 101 */,
- 0x00000000 /* 102 */,
- 0x00000000 /* 103 */,
- 0x00000000 /* 104 */,
- 0x00000000 /* 105 */,
- 0x00000000 /* 106 */,
- 0x00000000 /* 107 */,
- 0x00000000 /* 108 */,
- 0x00000000 /* 109 */,
- 0x00000000 /* 110 */,
- 0x00000000 /* 111 */,
- 0x00000000 /* 112 */,
- 0x00000000 /* 113 */,
- 0x00000000 /* 114 */,
- 0x00000000 /* 115 */,
- 0x00000000 /* 116 */,
- 0x00000000 /* 117 */,
- 0x00000000 /* 118 */,
- 0x00000000 /* 119 */,
- 0x00000000 /* 120 */,
- 0x00000000 /* 121 */,
- 0x00000000 /* 122 */,
- 0x00000000 /* 123 */,
- 0x00000000 /* 124 */,
- 0x00000000 /* 125 */,
- 0x00000000 /* 126 */,
- 0x00000000 /* 127 */,
- 0x76543210 /* 128 */,
- 0x0004f008 /* 129 */,
- 0x00020133 /* 130 */,
- 0x00000000 /* 131 */,
- 0x00000000 /* 132 */,
- 0x00010000 /* 133 */,
- 0x01cece0e /* 134 */,
- 0x02cece0e /* 135 */,
- 0x00010000 /* 136 */,
- 0x00000004 /* 137 */,
- 0x00000001 /* 138 */,
- 0x00000000 /* 139 */,
- 0x00000000 /* 140 */,
- 0x00000100 /* 141 */,
- 0x001700c0 /* 142 */,
- 0x020100cc /* 143 */,
- 0x00030066 /* 144 */,
- 0x00000000 /* 145 */,
- 0x00000000 /* 146 */,
- 0x00000000 /* 147 */,
- 0x00000000 /* 148 */,
- 0x00000000 /* 149 */,
- 0x00000000 /* 150 */,
- 0x00000000 /* 151 */,
- 0x00000000 /* 152 */,
- 0x04080000 /* 153 */,
- 0x04080400 /* 154 */,
- 0x10000000 /* 155 */,
- 0x0c008007 /* 156 */,
- 0x00000100 /* 157 */,
- 0x00000100 /* 158 */,
- 0x55555555 /* 159 */,
- 0xaaaaaaaa /* 160 */,
- 0x55555555 /* 161 */,
- 0xaaaaaaaa /* 162 */,
- 0x00005555 /* 163 */,
- 0x00000000 /* 164 */,
- 0x00000000 /* 165 */,
- 0x00000000 /* 166 */,
- 0x00000000 /* 167 */,
- 0x00000000 /* 168 */,
- 0x00000000 /* 169 */,
- 0x00000000 /* 170 */,
- 0x00000000 /* 171 */,
- 0x00000000 /* 172 */,
- 0x00000000 /* 173 */,
- 0x00000000 /* 174 */,
- 0x00000000 /* 175 */,
- 0x00000000 /* 176 */,
- 0x00000000 /* 177 */,
- 0x00000000 /* 178 */,
- 0x00000000 /* 179 */,
- 0x00000000 /* 180 */,
- 0x00000000 /* 181 */,
- 0x00000000 /* 182 */,
- 0x00000004 /* 183 */,
- 0x00000020 /* 184 */,
- 0x00000000 /* 185 */,
- 0x00000000 /* 186 */,
- 0x00000000 /* 187 */,
- 0x00000000 /* 188 */,
- 0x00000000 /* 189 */,
- 0x00000000 /* 190 */,
- 0x00000000 /* 191 */,
- 0x02800280 /* 192 */,
- 0x02800280 /* 193 */,
- 0x02800280 /* 194 */,
- 0x02800280 /* 195 */,
- 0x00000280 /* 196 */,
- 0x00000000 /* 197 */,
- 0x00000000 /* 198 */,
- 0x00000000 /* 199 */,
- 0x00000000 /* 200 */,
- 0x00000000 /* 201 */,
- 0x00800080 /* 202 */,
- 0x00800080 /* 203 */,
- 0x00800080 /* 204 */,
- 0x00800080 /* 205 */,
- 0x00800080 /* 206 */,
- 0x00800080 /* 207 */,
- 0x00800080 /* 208 */,
- 0x00800080 /* 209 */,
- 0x00800080 /* 210 */,
- 0x00020019 /* 211 */,
- 0x000001d0 /* 212 */,
- 0x00000000 /* 213 */,
- 0x00000200 /* 214 */,
- 0x00000004 /* 215 */,
- 0x51816152 /* 216 */,
- 0xc0c08161 /* 217 */,
- 0x00010000 /* 218 */,
- 0x02001000 /* 219 */,
- 0x0c053eff /* 220 */,
- 0x000f0c18 /* 221 */,
- 0x01000140 /* 222 */,
- 0x00000c20 /* 223 */,
- 0x00000000 /* 224 */,
- 0x00000000 /* 225 */,
- 0x00000000 /* 226 */,
- 0x00000000 /* 227 */,
- 0x00000000 /* 228 */,
- 0x00000000 /* 229 */,
- 0x00000000 /* 230 */,
- 0x00000000 /* 231 */,
- 0x00000000 /* 232 */,
- 0x00000000 /* 233 */,
- 0x00000000 /* 234 */,
- 0x00000000 /* 235 */,
- 0x00000000 /* 236 */,
- 0x00000000 /* 237 */,
- 0x00000000 /* 238 */,
- 0x00000000 /* 239 */,
- 0x00000000 /* 240 */,
- 0x00000000 /* 241 */,
- 0x00000000 /* 242 */,
- 0x00000000 /* 243 */,
- 0x00000000 /* 244 */,
- 0x00000000 /* 245 */,
- 0x00000000 /* 246 */,
- 0x00000000 /* 247 */,
- 0x00000000 /* 248 */,
- 0x00000000 /* 249 */,
- 0x00000000 /* 250 */,
- 0x00000000 /* 251 */,
- 0x00000000 /* 252 */,
- 0x00000000 /* 253 */,
- 0x00000000 /* 254 */,
- 0x00000000 /* 255 */,
- 0x76543210 /* 256 */,
- 0x0004f008 /* 257 */,
- 0x00020133 /* 258 */,
- 0x00000000 /* 259 */,
- 0x00000000 /* 260 */,
- 0x00010000 /* 261 */,
- 0x01cece0e /* 262 */,
- 0x02cece0e /* 263 */,
- 0x00010000 /* 264 */,
- 0x00000004 /* 265 */,
- 0x00000001 /* 266 */,
- 0x00000000 /* 267 */,
- 0x00000000 /* 268 */,
- 0x00000100 /* 269 */,
- 0x001700c0 /* 270 */,
- 0x020100cc /* 271 */,
- 0x00030066 /* 272 */,
- 0x00000000 /* 273 */,
- 0x00000000 /* 274 */,
- 0x00000000 /* 275 */,
- 0x00000000 /* 276 */,
- 0x00000000 /* 277 */,
- 0x00000000 /* 278 */,
- 0x00000000 /* 279 */,
- 0x00000000 /* 280 */,
- 0x04080000 /* 281 */,
- 0x04080400 /* 282 */,
- 0x10000000 /* 283 */,
- 0x0c008007 /* 284 */,
- 0x00000100 /* 285 */,
- 0x00000100 /* 286 */,
- 0x55555555 /* 287 */,
- 0xaaaaaaaa /* 288 */,
- 0x55555555 /* 289 */,
- 0xaaaaaaaa /* 290 */,
- 0x00015555 /* 291 */,
- 0x00000000 /* 292 */,
- 0x00000000 /* 293 */,
- 0x00000000 /* 294 */,
- 0x00000000 /* 295 */,
- 0x00000000 /* 296 */,
- 0x00000000 /* 297 */,
- 0x00000000 /* 298 */,
- 0x00000000 /* 299 */,
- 0x00000000 /* 300 */,
- 0x00000000 /* 301 */,
- 0x00000000 /* 302 */,
- 0x00000000 /* 303 */,
- 0x00000000 /* 304 */,
- 0x00000000 /* 305 */,
- 0x00000000 /* 306 */,
- 0x00000000 /* 307 */,
- 0x00000000 /* 308 */,
- 0x00000000 /* 309 */,
- 0x00000000 /* 310 */,
- 0x00000004 /* 311 */,
- 0x00000020 /* 312 */,
- 0x00000000 /* 313 */,
- 0x00000000 /* 314 */,
- 0x00000000 /* 315 */,
- 0x00000000 /* 316 */,
- 0x00000000 /* 317 */,
- 0x00000000 /* 318 */,
- 0x00000000 /* 319 */,
- 0x02800280 /* 320 */,
- 0x02800280 /* 321 */,
- 0x02800280 /* 322 */,
- 0x02800280 /* 323 */,
- 0x00000280 /* 324 */,
- 0x00000000 /* 325 */,
- 0x00000000 /* 326 */,
- 0x00000000 /* 327 */,
- 0x00000000 /* 328 */,
- 0x00000000 /* 329 */,
- 0x00800080 /* 330 */,
- 0x00800080 /* 331 */,
- 0x00800080 /* 332 */,
- 0x00800080 /* 333 */,
- 0x00800080 /* 334 */,
- 0x00800080 /* 335 */,
- 0x00800080 /* 336 */,
- 0x00800080 /* 337 */,
- 0x00800080 /* 338 */,
- 0x00020019 /* 339 */,
- 0x000001d0 /* 340 */,
- 0x00000000 /* 341 */,
- 0x00000200 /* 342 */,
- 0x00000004 /* 343 */,
- 0x51816152 /* 344 */,
- 0xc0c08161 /* 345 */,
- 0x00010000 /* 346 */,
- 0x02001000 /* 347 */,
- 0x0c053eff /* 348 */,
- 0x000f0c18 /* 349 */,
- 0x01000140 /* 350 */,
- 0x00000c20 /* 351 */,
- 0x00000000 /* 352 */,
- 0x00000000 /* 353 */,
- 0x00000000 /* 354 */,
- 0x00000000 /* 355 */,
- 0x00000000 /* 356 */,
- 0x00000000 /* 357 */,
- 0x00000000 /* 358 */,
- 0x00000000 /* 359 */,
- 0x00000000 /* 360 */,
- 0x00000000 /* 361 */,
- 0x00000000 /* 362 */,
- 0x00000000 /* 363 */,
- 0x00000000 /* 364 */,
- 0x00000000 /* 365 */,
- 0x00000000 /* 366 */,
- 0x00000000 /* 367 */,
- 0x00000000 /* 368 */,
- 0x00000000 /* 369 */,
- 0x00000000 /* 370 */,
- 0x00000000 /* 371 */,
- 0x00000000 /* 372 */,
- 0x00000000 /* 373 */,
- 0x00000000 /* 374 */,
- 0x00000000 /* 375 */,
- 0x00000000 /* 376 */,
- 0x00000000 /* 377 */,
- 0x00000000 /* 378 */,
- 0x00000000 /* 379 */,
- 0x00000000 /* 380 */,
- 0x00000000 /* 381 */,
- 0x00000000 /* 382 */,
- 0x00000000 /* 383 */,
- 0x76543210 /* 384 */,
- 0x0004f008 /* 385 */,
- 0x00020133 /* 386 */,
- 0x00000000 /* 387 */,
- 0x00000000 /* 388 */,
- 0x00010000 /* 389 */,
- 0x01cece0e /* 390 */,
- 0x02cece0e /* 391 */,
- 0x00010000 /* 392 */,
- 0x00000004 /* 393 */,
- 0x00000001 /* 394 */,
- 0x00000000 /* 395 */,
- 0x00000000 /* 396 */,
- 0x00000100 /* 397 */,
- 0x001700c0 /* 398 */,
- 0x020100cc /* 399 */,
- 0x00030066 /* 400 */,
- 0x00000000 /* 401 */,
- 0x00000000 /* 402 */,
- 0x00000000 /* 403 */,
- 0x00000000 /* 404 */,
- 0x00000000 /* 405 */,
- 0x00000000 /* 406 */,
- 0x00000000 /* 407 */,
- 0x00000000 /* 408 */,
- 0x04080000 /* 409 */,
- 0x04080400 /* 410 */,
- 0x10000000 /* 411 */,
- 0x0c008007 /* 412 */,
- 0x00000100 /* 413 */,
- 0x00000100 /* 414 */,
- 0x55555555 /* 415 */,
- 0xaaaaaaaa /* 416 */,
- 0x55555555 /* 417 */,
- 0xaaaaaaaa /* 418 */,
- 0x00005555 /* 419 */,
- 0x00000000 /* 420 */,
- 0x00000000 /* 421 */,
- 0x00000000 /* 422 */,
- 0x00000000 /* 423 */,
- 0x00000000 /* 424 */,
- 0x00000000 /* 425 */,
- 0x00000000 /* 426 */,
- 0x00000000 /* 427 */,
- 0x00000000 /* 428 */,
- 0x00000000 /* 429 */,
- 0x00000000 /* 430 */,
- 0x00000000 /* 431 */,
- 0x00000000 /* 432 */,
- 0x00000000 /* 433 */,
- 0x00000000 /* 434 */,
- 0x00000000 /* 435 */,
- 0x00000000 /* 436 */,
- 0x00000000 /* 437 */,
- 0x00000000 /* 438 */,
- 0x00000004 /* 439 */,
- 0x00000020 /* 440 */,
- 0x00000000 /* 441 */,
- 0x00000000 /* 442 */,
- 0x00000000 /* 443 */,
- 0x00000000 /* 444 */,
- 0x00000000 /* 445 */,
- 0x00000000 /* 446 */,
- 0x00000000 /* 447 */,
- 0x02800280 /* 448 */,
- 0x02800280 /* 449 */,
- 0x02800280 /* 450 */,
- 0x02800280 /* 451 */,
- 0x00000280 /* 452 */,
- 0x00000000 /* 453 */,
- 0x00000000 /* 454 */,
- 0x00000000 /* 455 */,
- 0x00000000 /* 456 */,
- 0x00000000 /* 457 */,
- 0x00800080 /* 458 */,
- 0x00800080 /* 459 */,
- 0x00800080 /* 460 */,
- 0x00800080 /* 461 */,
- 0x00800080 /* 462 */,
- 0x00800080 /* 463 */,
- 0x00800080 /* 464 */,
- 0x00800080 /* 465 */,
- 0x00800080 /* 466 */,
- 0x00020019 /* 467 */,
- 0x000001d0 /* 468 */,
- 0x00000000 /* 469 */,
- 0x00000200 /* 470 */,
- 0x00000004 /* 471 */,
- 0x51816152 /* 472 */,
- 0xc0c08161 /* 473 */,
- 0x00010000 /* 474 */,
- 0x02001000 /* 475 */,
- 0x0c053eff /* 476 */,
- 0x000f0c18 /* 477 */,
- 0x01000140 /* 478 */,
- 0x00000c20 /* 479 */,
- 0x00000000 /* 480 */,
- 0x00000000 /* 481 */,
- 0x00000000 /* 482 */,
- 0x00000000 /* 483 */,
- 0x00000000 /* 484 */,
- 0x00000000 /* 485 */,
- 0x00000000 /* 486 */,
- 0x00000000 /* 487 */,
- 0x00000000 /* 488 */,
- 0x00000000 /* 489 */,
- 0x00000000 /* 490 */,
- 0x00000000 /* 491 */,
- 0x00000000 /* 492 */,
- 0x00000000 /* 493 */,
- 0x00000000 /* 494 */,
- 0x00000000 /* 495 */,
- 0x00000000 /* 496 */,
- 0x00000000 /* 497 */,
- 0x00000000 /* 498 */,
- 0x00000000 /* 499 */,
- 0x00000000 /* 500 */,
- 0x00000000 /* 501 */,
- 0x00000000 /* 502 */,
- 0x00000000 /* 503 */,
- 0x00000000 /* 504 */,
- 0x00000000 /* 505 */,
- 0x00000000 /* 506 */,
- 0x00000000 /* 507 */,
- 0x00000000 /* 508 */,
- 0x00000000 /* 509 */,
- 0x00000000 /* 510 */,
- 0x00000000 /* 511 */,
- 0x00000000 /* 512 */,
- 0x00000000 /* 513 */,
- 0x00000000 /* 514 */,
- 0x00000000 /* 515 */,
- 0x00000000 /* 516 */,
- 0x00000000 /* 517 */,
- 0x00000000 /* 518 */,
- 0x00010000 /* 519 */,
- 0x00000200 /* 520 */,
- 0x00000000 /* 521 */,
- 0x00000000 /* 522 */,
- 0x00000000 /* 523 */,
- 0x00400320 /* 524 */,
- 0x00000040 /* 525 */,
- 0x00000098 /* 526 */,
- 0x03000098 /* 527 */,
- 0x00000200 /* 528 */,
- 0x00000000 /* 529 */,
- 0x00000000 /* 530 */,
- 0x00000000 /* 531 */,
- 0x0000002a /* 532 */,
- 0x00000015 /* 533 */,
- 0x00000015 /* 534 */,
- 0x0000002a /* 535 */,
- 0x00000033 /* 536 */,
- 0x0000000c /* 537 */,
- 0x0000000c /* 538 */,
- 0x00000033 /* 539 */,
- 0x00000010 /* 540 */,
- 0x00030000 /* 541 */,
- 0x00000003 /* 542 */,
- 0x000300ce /* 543 */,
- 0x03000300 /* 544 */,
- 0x03000300 /* 545 */,
- 0x00000300 /* 546 */,
- 0xff020010 /* 547 */,
- 0x0000033e /* 548 */,
- 0x00000000 /* 549 */,
- 0x00000000 /* 550 */,
- 0x00000000 /* 551 */,
- 0x00000000 /* 552 */,
- 0x00000000 /* 553 */,
- 0x00000000 /* 554 */,
- 0x00000000 /* 555 */,
- 0x00000000 /* 556 */,
- 0x00000000 /* 557 */,
- 0x00000000 /* 558 */,
- 0x00000000 /* 559 */,
- 0x00000000 /* 560 */,
- 0x00000000 /* 561 */,
- 0x00000000 /* 562 */,
- 0x00000000 /* 563 */,
- 0x00000000 /* 564 */,
- 0x00000000 /* 565 */,
- 0x00000000 /* 566 */,
- 0x00000000 /* 567 */,
- 0x00000000 /* 568 */,
- 0x00000000 /* 569 */,
- 0x00000000 /* 570 */,
- 0x00000000 /* 571 */,
- 0x00000000 /* 572 */,
- 0x00000000 /* 573 */,
- 0x00000000 /* 574 */,
- 0x00000000 /* 575 */,
- 0x00000000 /* 576 */,
- 0x00000000 /* 577 */,
- 0x00000000 /* 578 */,
- 0x00000000 /* 579 */,
- 0x00000000 /* 580 */,
- 0x00000000 /* 581 */,
- 0x00000000 /* 582 */,
- 0x00000000 /* 583 */,
- 0x00000000 /* 584 */,
- 0x00000000 /* 585 */,
- 0x00000000 /* 586 */,
- 0x00000000 /* 587 */,
- 0x00000000 /* 588 */,
- 0x00000000 /* 589 */,
- 0x00000000 /* 590 */,
- 0x00000000 /* 591 */,
- 0x00000000 /* 592 */,
- 0x00000000 /* 593 */,
- 0x00000000 /* 594 */,
- 0x00000000 /* 595 */,
- 0x00000000 /* 596 */,
- 0x00000000 /* 597 */,
- 0x00000000 /* 598 */,
- 0x00000000 /* 599 */,
- 0x00000000 /* 600 */,
- 0x00000000 /* 601 */,
- 0x00000000 /* 602 */,
- 0x00000000 /* 603 */,
- 0x00000000 /* 604 */,
- 0x00000000 /* 605 */,
- 0x00000000 /* 606 */,
- 0x00000000 /* 607 */,
- 0x00000000 /* 608 */,
- 0x00000000 /* 609 */,
- 0x00000000 /* 610 */,
- 0x00000000 /* 611 */,
- 0x00000000 /* 612 */,
- 0x00000000 /* 613 */,
- 0x00000000 /* 614 */,
- 0x00000000 /* 615 */,
- 0x00000000 /* 616 */,
- 0x00000000 /* 617 */,
- 0x00000000 /* 618 */,
- 0x00000000 /* 619 */,
- 0x00000000 /* 620 */,
- 0x00000000 /* 621 */,
- 0x00000000 /* 622 */,
- 0x00000000 /* 623 */,
- 0x00000000 /* 624 */,
- 0x00000000 /* 625 */,
- 0x00000000 /* 626 */,
- 0x00000000 /* 627 */,
- 0x00000000 /* 628 */,
- 0x00000000 /* 629 */,
- 0x00000000 /* 630 */,
- 0x00000000 /* 631 */,
- 0x00000000 /* 632 */,
- 0x00000000 /* 633 */,
- 0x00000000 /* 634 */,
- 0x00000000 /* 635 */,
- 0x00000000 /* 636 */,
- 0x00000000 /* 637 */,
- 0x00000000 /* 638 */,
- 0x00000000 /* 639 */,
- 0x00000000 /* 640 */,
- 0x00000000 /* 641 */,
- 0x00000000 /* 642 */,
- 0x00000000 /* 643 */,
- 0x00000000 /* 644 */,
- 0x00000000 /* 645 */,
- 0x00000000 /* 646 */,
- 0x00010000 /* 647 */,
- 0x00000200 /* 648 */,
- 0x00000000 /* 649 */,
- 0x00000000 /* 650 */,
- 0x00000000 /* 651 */,
- 0x00400320 /* 652 */,
- 0x00000040 /* 653 */,
- 0x0000dcba /* 654 */,
- 0x0300dcba /* 655 */,
- 0x00000200 /* 656 */,
- 0x00000000 /* 657 */,
- 0x00000000 /* 658 */,
- 0x00000000 /* 659 */,
- 0x0000002a /* 660 */,
- 0x00000015 /* 661 */,
- 0x00000015 /* 662 */,
- 0x0000002a /* 663 */,
- 0x00000033 /* 664 */,
- 0x0000000c /* 665 */,
- 0x0000000c /* 666 */,
- 0x00000033 /* 667 */,
- 0x00005432 /* 668 */,
- 0x000f0000 /* 669 */,
- 0x0000000f /* 670 */,
- 0x000300ce /* 671 */,
- 0x03000300 /* 672 */,
- 0x03000300 /* 673 */,
- 0x00000300 /* 674 */,
- 0xff020010 /* 675 */,
- 0x0000033e /* 676 */,
- 0x00000000 /* 677 */,
- 0x00000000 /* 678 */,
- 0x00000000 /* 679 */,
- 0x00000000 /* 680 */,
- 0x00000000 /* 681 */,
- 0x00000000 /* 682 */,
- 0x00000000 /* 683 */,
- 0x00000000 /* 684 */,
- 0x00000000 /* 685 */,
- 0x00000000 /* 686 */,
- 0x00000000 /* 687 */,
- 0x00000000 /* 688 */,
- 0x00000000 /* 689 */,
- 0x00000000 /* 690 */,
- 0x00000000 /* 691 */,
- 0x00000000 /* 692 */,
- 0x00000000 /* 693 */,
- 0x00000000 /* 694 */,
- 0x00000000 /* 695 */,
- 0x00000000 /* 696 */,
- 0x00000000 /* 697 */,
- 0x00000000 /* 698 */,
- 0x00000000 /* 699 */,
- 0x00000000 /* 700 */,
- 0x00000000 /* 701 */,
- 0x00000000 /* 702 */,
- 0x00000000 /* 703 */,
- 0x00000000 /* 704 */,
- 0x00000000 /* 705 */,
- 0x00000000 /* 706 */,
- 0x00000000 /* 707 */,
- 0x00000000 /* 708 */,
- 0x00000000 /* 709 */,
- 0x00000000 /* 710 */,
- 0x00000000 /* 711 */,
- 0x00000000 /* 712 */,
- 0x00000000 /* 713 */,
- 0x00000000 /* 714 */,
- 0x00000000 /* 715 */,
- 0x00000000 /* 716 */,
- 0x00000000 /* 717 */,
- 0x00000000 /* 718 */,
- 0x00000000 /* 719 */,
- 0x00000000 /* 720 */,
- 0x00000000 /* 721 */,
- 0x00000000 /* 722 */,
- 0x00000000 /* 723 */,
- 0x00000000 /* 724 */,
- 0x00000000 /* 725 */,
- 0x00000000 /* 726 */,
- 0x00000000 /* 727 */,
- 0x00000000 /* 728 */,
- 0x00000000 /* 729 */,
- 0x00000000 /* 730 */,
- 0x00000000 /* 731 */,
- 0x00000000 /* 732 */,
- 0x00000000 /* 733 */,
- 0x00000000 /* 734 */,
- 0x00000000 /* 735 */,
- 0x00000000 /* 736 */,
- 0x00000000 /* 737 */,
- 0x00000000 /* 738 */,
- 0x00000000 /* 739 */,
- 0x00000000 /* 740 */,
- 0x00000000 /* 741 */,
- 0x00000000 /* 742 */,
- 0x00000000 /* 743 */,
- 0x00000000 /* 744 */,
- 0x00000000 /* 745 */,
- 0x00000000 /* 746 */,
- 0x00000000 /* 747 */,
- 0x00000000 /* 748 */,
- 0x00000000 /* 749 */,
- 0x00000000 /* 750 */,
- 0x00000000 /* 751 */,
- 0x00000000 /* 752 */,
- 0x00000000 /* 753 */,
- 0x00000000 /* 754 */,
- 0x00000000 /* 755 */,
- 0x00000000 /* 756 */,
- 0x00000000 /* 757 */,
- 0x00000000 /* 758 */,
- 0x00000000 /* 759 */,
- 0x00000000 /* 760 */,
- 0x00000000 /* 761 */,
- 0x00000000 /* 762 */,
- 0x00000000 /* 763 */,
- 0x00000000 /* 764 */,
- 0x00000000 /* 765 */,
- 0x00000000 /* 766 */,
- 0x00000000 /* 767 */,
- 0x00000000 /* 768 */,
- 0x00000000 /* 769 */,
- 0x00000000 /* 770 */,
- 0x00000000 /* 771 */,
- 0x00000000 /* 772 */,
- 0x00000000 /* 773 */,
- 0x00000000 /* 774 */,
- 0x00010000 /* 775 */,
- 0x00000200 /* 776 */,
- 0x00000000 /* 777 */,
- 0x00000000 /* 778 */,
- 0x00000000 /* 779 */,
- 0x00400320 /* 780 */,
- 0x00000040 /* 781 */,
- 0x00000098 /* 782 */,
- 0x03000098 /* 783 */,
- 0x00000200 /* 784 */,
- 0x00000000 /* 785 */,
- 0x00000000 /* 786 */,
- 0x00000000 /* 787 */,
- 0x0000002a /* 788 */,
- 0x00000015 /* 789 */,
- 0x00000015 /* 790 */,
- 0x0000002a /* 791 */,
- 0x00000033 /* 792 */,
- 0x0000000c /* 793 */,
- 0x0000000c /* 794 */,
- 0x00000033 /* 795 */,
- 0x00000010 /* 796 */,
- 0x00030000 /* 797 */,
- 0x00000003 /* 798 */,
- 0x000300ce /* 799 */,
- 0x03000300 /* 800 */,
- 0x03000300 /* 801 */,
- 0x00000300 /* 802 */,
- 0xff020010 /* 803 */,
- 0x0000033e /* 804 */,
- 0x00000000 /* 805 */,
- 0x00000000 /* 806 */,
- 0x00000000 /* 807 */,
- 0x00000000 /* 808 */,
- 0x00000000 /* 809 */,
- 0x00000000 /* 810 */,
- 0x00000000 /* 811 */,
- 0x00000000 /* 812 */,
- 0x00000000 /* 813 */,
- 0x00000000 /* 814 */,
- 0x00000000 /* 815 */,
- 0x00000000 /* 816 */,
- 0x00000000 /* 817 */,
- 0x00000000 /* 818 */,
- 0x00000000 /* 819 */,
- 0x00000000 /* 820 */,
- 0x00000000 /* 821 */,
- 0x00000000 /* 822 */,
- 0x00000000 /* 823 */,
- 0x00000000 /* 824 */,
- 0x00000000 /* 825 */,
- 0x00000000 /* 826 */,
- 0x00000000 /* 827 */,
- 0x00000000 /* 828 */,
- 0x00000000 /* 829 */,
- 0x00000000 /* 830 */,
- 0x00000000 /* 831 */,
- 0x00000000 /* 832 */,
- 0x00000000 /* 833 */,
- 0x00000000 /* 834 */,
- 0x00000000 /* 835 */,
- 0x00000000 /* 836 */,
- 0x00000000 /* 837 */,
- 0x00000000 /* 838 */,
- 0x00000000 /* 839 */,
- 0x00000000 /* 840 */,
- 0x00000000 /* 841 */,
- 0x00000000 /* 842 */,
- 0x00000000 /* 843 */,
- 0x00000000 /* 844 */,
- 0x00000000 /* 845 */,
- 0x00000000 /* 846 */,
- 0x00000000 /* 847 */,
- 0x00000000 /* 848 */,
- 0x00000000 /* 849 */,
- 0x00000000 /* 850 */,
- 0x00000000 /* 851 */,
- 0x00000000 /* 852 */,
- 0x00000000 /* 853 */,
- 0x00000000 /* 854 */,
- 0x00000000 /* 855 */,
- 0x00000000 /* 856 */,
- 0x00000000 /* 857 */,
- 0x00000000 /* 858 */,
- 0x00000000 /* 859 */,
- 0x00000000 /* 860 */,
- 0x00000000 /* 861 */,
- 0x00000000 /* 862 */,
- 0x00000000 /* 863 */,
- 0x00000000 /* 864 */,
- 0x00000000 /* 865 */,
- 0x00000000 /* 866 */,
- 0x00000000 /* 867 */,
- 0x00000000 /* 868 */,
- 0x00000000 /* 869 */,
- 0x00000000 /* 870 */,
- 0x00000000 /* 871 */,
- 0x00000000 /* 872 */,
- 0x00000000 /* 873 */,
- 0x00000000 /* 874 */,
- 0x00000000 /* 875 */,
- 0x00000000 /* 876 */,
- 0x00000000 /* 877 */,
- 0x00000000 /* 878 */,
- 0x00000000 /* 879 */,
- 0x00000000 /* 880 */,
- 0x00000000 /* 881 */,
- 0x00000000 /* 882 */,
- 0x00000000 /* 883 */,
- 0x00000000 /* 884 */,
- 0x00000000 /* 885 */,
- 0x00000000 /* 886 */,
- 0x00000000 /* 887 */,
- 0x00000000 /* 888 */,
- 0x00000000 /* 889 */,
- 0x00000000 /* 890 */,
- 0x00000000 /* 891 */,
- 0x00000000 /* 892 */,
- 0x00000000 /* 893 */,
- 0x00000000 /* 894 */,
- 0x00000000 /* 895 */,
- 0x00000000 /* 896 */,
- 0x00000000 /* 897 */,
- 0x00000000 /* 898 */,
- 0x00000000 /* 899 */,
- 0x00000000 /* 900 */,
- 0x00000000 /* 901 */,
- 0x00000000 /* 902 */,
- 0x00010000 /* 903 */,
- 0x00000200 /* 904 */,
- 0x00000000 /* 905 */,
- 0x00000000 /* 906 */,
- 0x00000000 /* 907 */,
- 0x00400320 /* 908 */,
- 0x00000040 /* 909 */,
- 0x0000dcba /* 910 */,
- 0x0300dcba /* 911 */,
- 0x00000200 /* 912 */,
- 0x00000000 /* 913 */,
- 0x00000000 /* 914 */,
- 0x00000000 /* 915 */,
- 0x0000002a /* 916 */,
- 0x00000015 /* 917 */,
- 0x00000015 /* 918 */,
- 0x0000002a /* 919 */,
- 0x00000033 /* 920 */,
- 0x0000000c /* 921 */,
- 0x0000000c /* 922 */,
- 0x00000033 /* 923 */,
- 0x00005432 /* 924 */,
- 0x000f0000 /* 925 */,
- 0x0000000f /* 926 */,
- 0x000300ce /* 927 */,
- 0x03000300 /* 928 */,
- 0x03000300 /* 929 */,
- 0x00000300 /* 930 */,
- 0xff020010 /* 931 */,
- 0x0000033e /* 932 */,
- 0x00000000 /* 933 */,
- 0x00000000 /* 934 */,
- 0x00000000 /* 935 */,
- 0x00000000 /* 936 */,
- 0x00000000 /* 937 */,
- 0x00000000 /* 938 */,
- 0x00000000 /* 939 */,
- 0x00000000 /* 940 */,
- 0x00000000 /* 941 */,
- 0x00000000 /* 942 */,
- 0x00000000 /* 943 */,
- 0x00000000 /* 944 */,
- 0x00000000 /* 945 */,
- 0x00000000 /* 946 */,
- 0x00000000 /* 947 */,
- 0x00000000 /* 948 */,
- 0x00000000 /* 949 */,
- 0x00000000 /* 950 */,
- 0x00000000 /* 951 */,
- 0x00000000 /* 952 */,
- 0x00000000 /* 953 */,
- 0x00000000 /* 954 */,
- 0x00000000 /* 955 */,
- 0x00000000 /* 956 */,
- 0x00000000 /* 957 */,
- 0x00000000 /* 958 */,
- 0x00000000 /* 959 */,
- 0x00000000 /* 960 */,
- 0x00000000 /* 961 */,
- 0x00000000 /* 962 */,
- 0x00000000 /* 963 */,
- 0x00000000 /* 964 */,
- 0x00000000 /* 965 */,
- 0x00000000 /* 966 */,
- 0x00000000 /* 967 */,
- 0x00000000 /* 968 */,
- 0x00000000 /* 969 */,
- 0x00000000 /* 970 */,
- 0x00000000 /* 971 */,
- 0x00000000 /* 972 */,
- 0x00000000 /* 973 */,
- 0x00000000 /* 974 */,
- 0x00000000 /* 975 */,
- 0x00000000 /* 976 */,
- 0x00000000 /* 977 */,
- 0x00000000 /* 978 */,
- 0x00000000 /* 979 */,
- 0x00000000 /* 980 */,
- 0x00000000 /* 981 */,
- 0x00000000 /* 982 */,
- 0x00000000 /* 983 */,
- 0x00000000 /* 984 */,
- 0x00000000 /* 985 */,
- 0x00000000 /* 986 */,
- 0x00000000 /* 987 */,
- 0x00000000 /* 988 */,
- 0x00000000 /* 989 */,
- 0x00000000 /* 990 */,
- 0x00000000 /* 991 */,
- 0x00000000 /* 992 */,
- 0x00000000 /* 993 */,
- 0x00000000 /* 994 */,
- 0x00000000 /* 995 */,
- 0x00000000 /* 996 */,
- 0x00000000 /* 997 */,
- 0x00000000 /* 998 */,
- 0x00000000 /* 999 */,
- 0x00000000 /* 1000 */,
- 0x00000000 /* 1001 */,
- 0x00000000 /* 1002 */,
- 0x00000000 /* 1003 */,
- 0x00000000 /* 1004 */,
- 0x00000000 /* 1005 */,
- 0x00000000 /* 1006 */,
- 0x00000000 /* 1007 */,
- 0x00000000 /* 1008 */,
- 0x00000000 /* 1009 */,
- 0x00000000 /* 1010 */,
- 0x00000000 /* 1011 */,
- 0x00000000 /* 1012 */,
- 0x00000000 /* 1013 */,
- 0x00000000 /* 1014 */,
- 0x00000000 /* 1015 */,
- 0x00000000 /* 1016 */,
- 0x00000000 /* 1017 */,
- 0x00000000 /* 1018 */,
- 0x00000000 /* 1019 */,
- 0x00000000 /* 1020 */,
- 0x00000000 /* 1021 */,
- 0x00000000 /* 1022 */,
- 0x00000000 /* 1023 */,
- 0x00000000 /* 1024 */,
- 0x00000100 /* 1025 */,
- 0x05000000 /* 1026 */,
- 0x01000000 /* 1027 */,
- 0x00040003 /* 1028 */,
- 0x00320100 /* 1029 */,
- 0x02005502 /* 1030 */,
- 0x00000000 /* 1031 */,
- 0x00000000 /* 1032 */,
- 0x00000000 /* 1033 */,
- 0x00000004 /* 1034 */,
- 0x00000000 /* 1035 */,
- 0x01010100 /* 1036 */,
- 0x00000200 /* 1037 */,
- 0x01000000 /* 1038 */,
- 0x00000001 /* 1039 */,
- 0x00000000 /* 1040 */,
- 0x00000000 /* 1041 */,
- 0x00000000 /* 1042 */,
- 0x00000000 /* 1043 */,
- 0x0000ffff /* 1044 */,
- 0x03221302 /* 1045 */,
- 0x00000322 /* 1046 */,
- 0x00000000 /* 1047 */,
- 0x000f1f00 /* 1048 */,
- 0x0f1f0f1f /* 1049 */,
- 0x0f1f0f1f /* 1050 */,
- 0x03000003 /* 1051 */,
- 0x00000300 /* 1052 */,
- 0x0b221b02 /* 1053 */,
- 0x09240b22 /* 1054 */,
- 0x00000000 /* 1055 */,
- 0x00000000 /* 1056 */,
- 0x05030000 /* 1057 */,
- 0x14000001 /* 1058 */,
- 0x63c0ce00 /* 1059 */,
- 0x0000000e /* 1060 */,
- 0x001f0fc0 /* 1061 */,
- 0x003c37c0 /* 1062 */,
- 0x1f0fc0ce /* 1063 */,
- 0x3c37c0ce /* 1064 */,
- 0x00024410 /* 1065 */,
- 0x00004410 /* 1066 */,
- 0x00004410 /* 1067 */,
- 0x00024410 /* 1068 */,
- 0x00004410 /* 1069 */,
- 0x000fc0ff /* 1070 */,
- 0x00004410 /* 1071 */,
- 0x000fc0ff /* 1072 */,
- 0x00004410 /* 1073 */,
- 0x1f0fc0ce /* 1074 */,
- 0x00004410 /* 1075 */,
- 0x00004011 /* 1076 */,
- 0x00004410 /* 1077 */,
- 0x00000000 /* 1078 */,
- 0x00000000 /* 1079 */,
- 0x00000000 /* 1080 */,
- 0x00000000 /* 1081 */,
- 0x00000078 /* 1082 */,
- 0x00000000 /* 1083 */,
- 0x00010108 /* 1084 */,
- 0x00000000 /* 1085 */,
- 0x03000000 /* 1086 */,
- 0x00000008 /* 1087 */,
- 0x00000000 /* 1088 */,
- 0x00000000 /* 1089 */,
- 0x00000000 /* 1090 */,
- 0x00000000 /* 1091 */,
- 0x00000001 /* 1092 */,
- 0x00000000 /* 1093 */,
- 0x00000000 /* 1094 */,
- 0x01000000 /* 1095 */,
- 0x00000001 /* 1096 */,
- 0x00000000 /* 1097 */,
- 0x07010000 /* 1098 */,
- 0x00000204 /* 1099 */,
- 0x00000000 /* 1100 */
- }
-};
-#endif
diff --git a/drivers/misc/mnh/mnh-ddr.c b/drivers/misc/mnh/mnh-ddr.c
index 6ab2b7d..850aa449 100644
--- a/drivers/misc/mnh/mnh-ddr.c
+++ b/drivers/misc/mnh/mnh-ddr.c
@@ -23,77 +23,94 @@
#include "mnh-hwio-ddr-pi.h"
#include "mnh-hwio-ddr-phy.h"
#include "mnh-hwio-scu.h"
-#include "mnh-ddr-33-300-600-400.h"
+#include "mnh-ddr-33-100-400-600.h"
#include "mnh-ddr.h"
#include "mnh-pcie.h"
#include "mnh-sm.h"
#define MNH_DDR_CTL_IN(reg) \
HW_IN(HWIO_DDR_CTL_BASE_ADDR, DDR_CTL, reg)
-#define MNH_DDR_CTL_INf(reg, fld) \
- HW_INf(HWIO_DDR_CTL_BASE_ADDR, DDR_CTL, reg, fld)
-#define MNH_DDR_CTL_OUT(reg, val) \
- HW_OUT(HWIO_DDR_CTL_BASE_ADDR, DDR_CTL, reg, val)
-#define MNH_DDR_CTL_OUTf(reg, fld, val) \
- HW_OUTf(HWIO_DDR_CTL_BASE_ADDR, DDR_CTL, reg, fld, val)
+#define MNH_DDR_CTL_INf(...) \
+ HW_INf(HWIO_DDR_CTL_BASE_ADDR, DDR_CTL, __VA_ARGS__)
+#define MNH_DDR_CTL_OUT(...) \
+ HW_OUT(HWIO_DDR_CTL_BASE_ADDR, DDR_CTL, __VA_ARGS__)
+#define MNH_DDR_CTL_OUTf(...) \
+ HW_OUTf(HWIO_DDR_CTL_BASE_ADDR, DDR_CTL, __VA_ARGS__)
-#define MNH_DDR_PI_INf(reg, fld) \
- HW_INf(HWIO_DDR_PI_BASE_ADDR, DDR_PI, reg, fld)
-#define MNH_DDR_PI_OUTf(reg, fld, val) \
- HW_OUTf(HWIO_DDR_PI_BASE_ADDR, DDR_PI, reg, fld, val)
-#define MNH_DDR_PI_OUT(reg, val) \
- HW_OUT(HWIO_DDR_PI_BASE_ADDR, DDR_PI, reg, val)
+#define MNH_DDR_PI_INf(...) \
+ HW_INf(HWIO_DDR_PI_BASE_ADDR, DDR_PI, __VA_ARGS__)
+#define MNH_DDR_PI_OUTf(...) \
+ HW_OUTf(HWIO_DDR_PI_BASE_ADDR, DDR_PI, __VA_ARGS__)
+#define MNH_DDR_PI_OUT(...) \
+ HW_OUT(HWIO_DDR_PI_BASE_ADDR, DDR_PI, __VA_ARGS__)
-#define MNH_DDR_PHY_INf(reg, fld) \
- HW_INf(HWIO_DDR_PHY_BASE_ADDR, DDR_PHY, reg, fld)
-#define MNH_DDR_PHY_OUTf(reg, fld, val) \
- HW_OUTf(HWIO_DDR_PHY_BASE_ADDR, DDR_PHY, reg, fld, val)
-#define MNH_DDR_PHY_OUT(reg, val) \
- HW_OUT(HWIO_DDR_PHY_BASE_ADDR, DDR_PHY, reg, val)
+#define MNH_DDR_PHY_INf(...) \
+ HW_INf(HWIO_DDR_PHY_BASE_ADDR, DDR_PHY, __VA_ARGS__)
+#define MNH_DDR_PHY_OUTf(...) \
+ HW_OUTf(HWIO_DDR_PHY_BASE_ADDR, DDR_PHY, __VA_ARGS__)
+#define MNH_DDR_PHY_OUT(...) \
+ HW_OUT(HWIO_DDR_PHY_BASE_ADDR, DDR_PHY, __VA_ARGS__)
#define MNH_SCU_IN(reg) \
HW_IN(HWIO_SCU_BASE_ADDR, SCU, reg)
-#define MNH_SCU_INf(reg, fld) \
- HW_INf(HWIO_SCU_BASE_ADDR, SCU, reg, fld)
-#define MNH_SCU_INx(reg, inst) \
- HW_INx(HWIO_SCU_BASE_ADDR, SCU, reg, inst)
-#define MNH_SCU_INxf(reg, inst, fld) \
- HW_INxf(HWIO_SCU_BASE_ADDR, SCU, reg, inst, fld)
-#define MNH_SCU_OUTf(reg, fld, val) \
- HW_OUTf(HWIO_SCU_BASE_ADDR, SCU, reg, fld, val)
-#define MNH_SCU_OUT(reg, val) \
- HW_OUT(HWIO_SCU_BASE_ADDR, SCU, reg, val)
-#define MNH_SCU_OUTx(reg, inst, val) \
- HW_OUTx(HWIO_SCU_BASE_ADDR, SCU, reg, inst, val)
+#define MNH_SCU_INf(...) \
+ HW_INf(HWIO_SCU_BASE_ADDR, SCU, __VA_ARGS__)
+#define MNH_SCU_INx(...) \
+ HW_INx(HWIO_SCU_BASE_ADDR, SCU, __VA_ARGS__)
+#define MNH_SCU_INxf(...) \
+ HW_INxf(HWIO_SCU_BASE_ADDR, SCU, __VA_ARGS__)
+#define MNH_SCU_OUTf(...) \
+ HW_OUTf(HWIO_SCU_BASE_ADDR, SCU, __VA_ARGS__)
+#define MNH_SCU_OUT(...) \
+ HW_OUT(HWIO_SCU_BASE_ADDR, SCU, __VA_ARGS__)
+#define MNH_SCU_OUTx(...) \
+ HW_OUTx(HWIO_SCU_BASE_ADDR, SCU, __VA_ARGS__)
#define MNH_RSTC_INf(fld) \
HW_INf(HWIO_SCU_BASE_ADDR, SCU, RSTC, fld)
-#define MNH_RSTC_OUTf(fld, val) \
- HW_OUTf(HWIO_SCU_BASE_ADDR, SCU, RSTC, fld, val)
+#define MNH_RSTC_OUTf(...) \
+ HW_OUTf(HWIO_SCU_BASE_ADDR, SCU, RSTC, __VA_ARGS__)
#define WRITE_DDR_REG_CONFIG(ddrblock, regindex) \
- mnh_reg_write(_state.ddrblock##_base + (regindex * sizeof(u32)), \
- _state.ddrblock[regindex])
+do { \
+ if (_state.ddrblock[regindex]) { \
+ mnh_reg_write(_state.ddrblock##_base + \
+ (regindex * sizeof(u32)), \
+ _state.ddrblock[regindex]); \
+ } \
+} while (0)
#define WRITE_DDR_PHY_CONFIG(fsp, regindex) \
+do { \
+ if (_state.phy[fsp][regindex]) { \
+ mnh_reg_write(_state.phy_base + (regindex * sizeof(u32)), \
+ _state.phy[fsp][regindex]); \
+ } \
+} while (0)
+
+#define WRITE_SET_ELEMENT(regindex, regvalue) \
mnh_reg_write(_state.phy_base + (regindex * sizeof(u32)),\
- _state.phy[fsp][regindex])
+ regvalue)
#define WRITE_SCU_FSP(fsp) \
+do { \
_state.fsps[fsp] &= 0xFFFFFF00;\
_state.fsps[fsp] |= 0x7d;\
- MNH_SCU_OUTx(LPDDR4_FSP_SETTING, fsp, _state.fsps[fsp])
+ MNH_SCU_OUTx(LPDDR4_FSP_SETTING, fsp, _state.fsps[fsp]); \
+} while (0)
#define SAVE_CURRENT_FSP() \
+do { \
_state.suspend_fsp = \
MNH_SCU_INf(LPDDR4_LOW_POWER_STS, LPDDR4_CUR_FSP); \
- dev_dbg(dev, "%s: saved fsp: %d\n", __func__, _state.suspend_fsp)
+ dev_dbg(dev, "%s: saved fsp: %d\n", __func__, _state.suspend_fsp); \
+} while (0)
#define SAVED_FSP() _state.suspend_fsp
#define WRITE_CLK_FROM_FSP(fsp) \
do { \
- if (fsp < (MNH_DDR_NUM_FSPS)) { \
+ if (fsp < (MNH_DDR_NUM_FSPS)) { \
MNH_SCU_OUTf(CCU_CLK_DIV, LPDDR4_REFCLK_DIV, \
MNH_SCU_INxf(LPDDR4_FSP_SETTING, fsp, \
FSP_LPDDR4_REFCLK_DIV)); \
@@ -129,25 +146,12 @@
/* INT status bits */
#define MR_WRITE_SBIT 26
#define MR_READ_SBIT 23
+#define BIST_SBIT 6
#define LP_CMD_SBIT 5
#define INIT_DONE_SBIT 4
-static struct mnh_ddr_state mnh_ddr_po_config = {
- .bases = {
- HWIO_DDR_CTL_BASE_ADDR,
- HWIO_DDR_PHY_BASE_ADDR,
- HWIO_DDR_PI_BASE_ADDR
- },
- .fsps = {
- 0x131B007D,
- 0x1311007D,
- 0x0311007D,
- 0x0422007D
- },
- &mnh_ddr_33_300_600_400,
-};
-
struct mnh_ddr_internal_state _state;
+
/* read entire int_status */
u64 mnh_ddr_int_status(struct device *dev)
{
@@ -250,27 +254,33 @@
mnh_ddr_send_lp_cmd(dev, LP_CMD_EXIT_LP);
}
-static void mnh_ddr_init_internal_state(struct mnh_ddr_state *state)
+static void mnh_ddr_init_internal_state(struct mnh_ddr_reg_config *cfg)
{
- _state.ctl_base = state->bases.ctl_base;
- _state.phy_base = state->bases.phy_base;
- _state.pi_base = state->bases.pi_base;
+ _state.ctl_base = HWIO_DDR_CTL_BASE_ADDR;
+ _state.pi_base = HWIO_DDR_PI_BASE_ADDR;
+ _state.phy_base = HWIO_DDR_PHY_BASE_ADDR;
+
+ memcpy(&(_state.fsps[0]),
+ &(cfg->fsps[0]),
+ MNH_DDR_NUM_FSPS * sizeof(u32));
+
memcpy(&(_state.ctl[0]),
- &(state->config->ctl[0]),
+ &(cfg->ctl[0]),
MNH_DDR_NUM_CTL_REG * sizeof(u32));
memcpy(&(_state.phy[0][0]),
- &(state->config->phy[0]),
+ &(cfg->phy[0]),
MNH_DDR_NUM_PHY_REG * sizeof(u32));
memcpy(&(_state.pi[0]),
- &(state->config->pi[0]),
+ &(cfg->pi[0]),
MNH_DDR_NUM_PI_REG * sizeof(u32));
- memcpy(&(_state.fsps[0]),
- &(state->fsps[0]),
- MNH_DDR_NUM_FSPS * sizeof(u32));
_state.suspend_fsp = 0;
+ _state.tref[0] = cfg->ctl[56] & 0xFFFF;
+ _state.tref[1] = cfg->ctl[57] & 0xFFFF;
+ _state.tref[2] = cfg->ctl[58] & 0xFFFF;
+ _state.tref[3] = cfg->ctl[59] & 0xFFFF;
}
void mnh_ddr_init_clocks(struct device *dev)
@@ -320,7 +330,7 @@
SAVE_DDR_REG_CONFIG(pi, index);
CLR_START(pi);
- for (fsp = 0; fsp < MNH_DDR_PHY_NUM_FSPS; fsp++) {
+ for (fsp = 0; fsp < MNH_DDR_NUM_FSPS; fsp++) {
MNH_DDR_PHY_OUTf(1025, PHY_FREQ_SEL_INDEX, fsp);
for (index = 0; index < MNH_DDR_NUM_PHY_REG; index++)
SAVE_DDR_PHY_REG_CONFIG(fsp, index);
@@ -331,13 +341,31 @@
{
mnh_ddr_disable_lp(dev);
- /* resume to fsp2 */
- mnh_lpddr_freq_change(LPDDR_FREQ_FSP2);
+ dev_dbg(dev, "%s: tref 0x%04x 0x%04x 0x%04x 0x%04x\n",
+ __func__, MNH_DDR_CTL_INf(56, TREF_F0),
+ MNH_DDR_CTL_INf(57, TREF_F1), MNH_DDR_CTL_INf(58, TREF_F2),
+ MNH_DDR_CTL_INf(59, TREF_F3));
+
+ /*
+ * restore hot tref settings, rather than what was
+ * saved from the MNH to handle the case where
+ * we're suspending in a cold temp and resuming at hot.
+ * If we're not actually hot, the MNH side will adjust
+ * the rate downward.
+ */
+ MNH_DDR_CTL_OUTf(56, TREF_F0, _state.tref[0]);
+ MNH_DDR_CTL_OUTf(57, TREF_F1, _state.tref[1]);
+ MNH_DDR_CTL_OUTf(58, TREF_F2, _state.tref[2]);
+ MNH_DDR_CTL_OUTf(59, TREF_F3, _state.tref[3]);
+
+ /* resume to fsp3 */
+ mnh_lpddr_freq_change(LPDDR_FREQ_FSP3);
SAVE_CURRENT_FSP();
mnh_ddr_pull_config();
- if (mnh_ddr_send_lp_cmd(dev, LP_CMD_DSRPD))
- dev_err(dev, "%s: failed to get LP complete\n", __func__);
+ mnh_ddr_send_lp_cmd(dev, LP_CMD_DSRPD);
+ dev_dbg(dev, "%s LP_STATE is 0x%x", __func__,
+ MNH_DDR_CTL_INf(121, LP_STATE));
/* Enable clock gating */
MNH_SCU_OUTf(CCU_CLK_CTL, HALT_LP4CG_EN, 0);
@@ -374,7 +402,7 @@
for (index = 0; index < MNH_DDR_NUM_PI_REG; index++)
WRITE_DDR_REG_CONFIG(pi, index);
- for (fsp = 0; fsp < MNH_DDR_PHY_NUM_FSPS; fsp++) {
+ for (fsp = 0; fsp < MNH_DDR_NUM_FSPS; fsp++) {
MNH_DDR_PHY_OUTf(1025, PHY_FREQ_SEL_MULTICAST_EN, 0);
MNH_DDR_PHY_OUTf(1025, PHY_FREQ_SEL_INDEX, fsp);
@@ -382,10 +410,9 @@
if (index != 1025)
WRITE_DDR_PHY_CONFIG(fsp, index);
}
+ MNH_DDR_PHY_OUTf(1084, PHY_CAL_CLK_SELECT_0, 0x4);
}
- gpiod_set_value_cansleep(iso_n, 1);
- udelay(1000);
MNH_DDR_CTL_OUTf(81, PWRUP_SREFRESH_EXIT, 1);
while ((timeout < 10) && (MNH_DDR_CTL_INf(450, MEM_RST_VALID) == 0)) {
@@ -397,6 +424,12 @@
dev_err(dev, "%s: failed to see reset valid\n", __func__);
udelay(1000);
+ /* extra delay after getting reset valid */
+ udelay(1000);
+ MNH_DDR_PHY_OUTf(1051, PHY_SET_DFI_INPUT_RST_PAD, 1);
+
+ gpiod_set_value_cansleep(iso_n, 1);
+ udelay(1000);
MNH_DDR_CTL_OUTf(00, START, 1);
@@ -420,6 +453,12 @@
mnh_ddr_int_status(dev));
mnh_ddr_clr_int_status(dev);
mnh_lpddr_freq_change(SAVED_FSP());
+
+ dev_dbg(dev, "%s: tref 0x%04x 0x%04x 0x%04x 0x%04x\n",
+ __func__, MNH_DDR_CTL_INf(56, TREF_F0),
+ MNH_DDR_CTL_INf(57, TREF_F1), MNH_DDR_CTL_INf(58, TREF_F2),
+ MNH_DDR_CTL_INf(59, TREF_F3));
+
mnh_ddr_enable_lp();
return 0;
@@ -428,18 +467,14 @@
int mnh_ddr_po_init(struct device *dev, struct gpio_desc *iso_n)
{
- int index;
+ int index, setindex;
unsigned long timeout;
- struct mnh_ddr_state *state = &mnh_ddr_po_config;
+ struct mnh_ddr_reg_config *cfg = &mnh_ddr_33_100_400_600;
- mnh_ddr_init_internal_state(state);
+ mnh_ddr_init_internal_state(cfg);
dev_dbg(dev, "%s start.", __func__);
- /* Gate CPU clock while initializing DDR */
- MNH_SCU_OUTf(CCU_CLK_CTL, HALT_CPUCG_EN, 0);
- MNH_SCU_OUTf(CCU_CLK_CTL, CPU_CLKEN, 0);
-
/* deassert iso_n */
gpiod_set_value_cansleep(iso_n, 1);
@@ -448,6 +483,9 @@
for (index = 0; index < MNH_DDR_NUM_CTL_REG; index++)
WRITE_DDR_REG_CONFIG(ctl, index);
+ /* Make sure DRAM will request refresh rate adjustments */
+ MNH_DDR_CTL_OUTf(164, MR13_DATA_0, 0xD0);
+
for (index = 0; index < MNH_DDR_NUM_PI_REG; index++)
WRITE_DDR_REG_CONFIG(pi, index);
@@ -461,60 +499,24 @@
MNH_DDR_PHY_OUTf(1025, PHY_FREQ_SEL_INDEX, 1);
/* a */
- MNH_DDR_PHY_OUTf(83, PHY_RDDQS_LATENCY_ADJUST_0, 0x03);
- MNH_DDR_PHY_OUTf(83, PHY_RDDQS_GATE_SLAVE_DELAY_0, 0x011a);
- MNH_DDR_PHY_OUTf(85, PHY_GTLVL_LAT_ADJ_START_0, 0x01);
- MNH_DDR_PHY_OUTf(90, PHY_RDDATA_EN_TSEL_DLY_0, 0x02);
- MNH_DDR_PHY_OUTf(90, PHY_RDDATA_EN_DLY_0, 0x03);
- MNH_DDR_PHY_OUTf(92, PHY_RPTR_UPDATE_0, 0x07);
-
- MNH_DDR_PHY_OUTf(211, PHY_RDDQS_LATENCY_ADJUST_1, 0x03);
- MNH_DDR_PHY_OUTf(211, PHY_RDDQS_GATE_SLAVE_DELAY_1, 0x011a);
- MNH_DDR_PHY_OUTf(213, PHY_GTLVL_LAT_ADJ_START_1, 0x01);
- MNH_DDR_PHY_OUTf(218, PHY_RDDATA_EN_TSEL_DLY_1, 0x02);
- MNH_DDR_PHY_OUTf(218, PHY_RDDATA_EN_DLY_1, 0x03);
- MNH_DDR_PHY_OUTf(220, PHY_RPTR_UPDATE_1, 0x07);
-
- MNH_DDR_PHY_OUTf(339, PHY_RDDQS_LATENCY_ADJUST_2, 0x03);
- MNH_DDR_PHY_OUTf(339, PHY_RDDQS_GATE_SLAVE_DELAY_2, 0x011a);
- MNH_DDR_PHY_OUTf(341, PHY_GTLVL_LAT_ADJ_START_2, 0x01);
- MNH_DDR_PHY_OUTf(346, PHY_RDDATA_EN_TSEL_DLY_2, 0x02);
- MNH_DDR_PHY_OUTf(346, PHY_RDDATA_EN_DLY_2, 0x03);
- MNH_DDR_PHY_OUTf(348, PHY_RPTR_UPDATE_2, 0x07);
-
- MNH_DDR_PHY_OUTf(467, PHY_RDDQS_LATENCY_ADJUST_3, 0x03);
- MNH_DDR_PHY_OUTf(467, PHY_RDDQS_GATE_SLAVE_DELAY_3, 0x011a);
- MNH_DDR_PHY_OUTf(469, PHY_GTLVL_LAT_ADJ_START_3, 0x01);
- MNH_DDR_PHY_OUTf(474, PHY_RDDATA_EN_TSEL_DLY_3, 0x02);
- MNH_DDR_PHY_OUTf(474, PHY_RDDATA_EN_DLY_3, 0x03);
- MNH_DDR_PHY_OUTf(476, PHY_RPTR_UPDATE_3, 0x07);
-
- MNH_DDR_PHY_OUTf(1045, PHY_PLL_CTRL_TOP, 0x0122);
- MNH_DDR_PHY_OUTf(1045, PHY_PLL_CTRL, 0x1102);
- MNH_DDR_PHY_OUTf(1046, PHY_PLL_CTRL_CA, 0x0122);
+ setindex = 0;
+ while ((setindex < MNH_DDR_PHY_SET_SIZE) &&
+ (cfg->phy_setA[setindex][0] != 0xFFFFFFFF)) {
+ WRITE_SET_ELEMENT(cfg->phy_setA[setindex][0],
+ cfg->phy_setA[setindex][1]);
+ setindex++;
+ }
MNH_DDR_PHY_OUTf(1025, PHY_FREQ_SEL_INDEX, 2);
/* b */
- MNH_DDR_PHY_OUTf(83, PHY_RDDQS_GATE_SLAVE_DELAY_0, 0x0119);
- MNH_DDR_PHY_OUTf(90, PHY_RDDATA_EN_TSEL_DLY_0, 0x01);
- MNH_DDR_PHY_OUTf(90, PHY_RDDATA_EN_DLY_0, 0x02);
-
- MNH_DDR_PHY_OUTf(211, PHY_RDDQS_GATE_SLAVE_DELAY_1, 0x0119);
- MNH_DDR_PHY_OUTf(218, PHY_RDDATA_EN_TSEL_DLY_1, 0x01);
- MNH_DDR_PHY_OUTf(218, PHY_RDDATA_EN_DLY_1, 0x02);
-
- MNH_DDR_PHY_OUTf(339, PHY_RDDQS_GATE_SLAVE_DELAY_2, 0x0119);
- MNH_DDR_PHY_OUTf(346, PHY_RDDATA_EN_TSEL_DLY_2, 0x01);
- MNH_DDR_PHY_OUTf(346, PHY_RDDATA_EN_DLY_2, 0x02);
-
- MNH_DDR_PHY_OUTf(467, PHY_RDDQS_GATE_SLAVE_DELAY_3, 0x0119);
- MNH_DDR_PHY_OUTf(474, PHY_RDDATA_EN_TSEL_DLY_3, 0x01);
- MNH_DDR_PHY_OUTf(474, PHY_RDDATA_EN_DLY_3, 0x02);
-
- MNH_DDR_PHY_OUTf(1045, PHY_PLL_CTRL_TOP, 0x0122);
- MNH_DDR_PHY_OUTf(1045, PHY_PLL_CTRL, 0x1102);
- MNH_DDR_PHY_OUTf(1046, PHY_PLL_CTRL_CA, 0x0122);
+ setindex = 0;
+ while ((setindex < MNH_DDR_PHY_SET_SIZE) &&
+ (cfg->phy_setB[setindex][0] != 0xFFFFFFFF)) {
+ WRITE_SET_ELEMENT(cfg->phy_setB[setindex][0],
+ cfg->phy_setB[setindex][1]);
+ setindex++;
+ }
dev_dbg(dev, "%s begin training,", __func__);
MNH_DDR_PI_OUTf(00, PI_START, 1);
@@ -539,14 +541,83 @@
MNH_DDR_CTL_OUTf(165, MR_FSP_DATA_VALID_F2_0, 1);
MNH_DDR_CTL_OUTf(166, MR_FSP_DATA_VALID_F3_0, 1);
+ dev_dbg(dev, "%s: tref 0x%04x 0x%04x 0x%04x 0x%04x\n",
+ __func__, MNH_DDR_CTL_INf(56, TREF_F0),
+ MNH_DDR_CTL_INf(57, TREF_F1), MNH_DDR_CTL_INf(58, TREF_F2),
+ MNH_DDR_CTL_INf(59, TREF_F3));
+
mnh_ddr_enable_lp();
- /* Enable FSP2 => 2400 */
- mnh_lpddr_freq_change(LPDDR_FREQ_FSP2);
-
- /* Enable CPU clock after DDR init is done */
- MNH_SCU_OUTf(CCU_CLK_CTL, CPU_CLKEN, 1);
+ /* Enable FSP3 => 2400 */
+ mnh_lpddr_freq_change(LPDDR_FREQ_FSP3);
return 0;
}
EXPORT_SYMBOL(mnh_ddr_po_init);
+
+u32 mnh_ddr_mbist(struct device *dev, enum mnh_ddr_bist_type bist_type)
+{
+ u32 result = 0;
+ u32 timeout = 1000000;
+ const u32 pattern[] = {
+ 0x55555555, 0x33333333, 0x0f0f0f0f, 0x00ff00ff };
+
+ u32 old_in_order_accept;
+
+ switch (bist_type) {
+ case MOVI1_3N:
+ case LIMITED_MOVI1_3N:
+ dev_info(dev, "%s: type %d\n", __func__, bist_type);
+ break;
+ default:
+ dev_err(dev, "%s: type %d is not supported\n",
+ __func__, bist_type);
+ return 0;
+ }
+
+ mnh_ddr_disable_lp(dev);
+
+ old_in_order_accept = MNH_DDR_CTL_INf(223, IN_ORDER_ACCEPT);
+ MNH_DDR_CTL_OUTf(223, IN_ORDER_ACCEPT, 1);
+
+ /* 512MB */
+ MNH_DDR_CTL_OUTf(171, ADDR_SPACE, 0x1D);
+ MNH_DDR_CTL_OUTf(173, BIST_START_ADDRESS, 0);
+
+ MNH_DDR_CTL_OUTf(171, BIST_DATA_CHECK, 1);
+ MNH_DDR_CTL_OUTf(172, BIST_ADDR_CHECK, 0);
+
+ /* if limited movi, use these patterns */
+ MNH_DDR_CTL_OUTf(178, BIST_DATA_PATTERN, pattern[0]);
+ MNH_DDR_CTL_OUTf(179, BIST_DATA_PATTERN, pattern[1]);
+ MNH_DDR_CTL_OUTf(180, BIST_DATA_PATTERN, pattern[2]);
+ MNH_DDR_CTL_OUTf(181, BIST_DATA_PATTERN, pattern[3]);
+
+ MNH_DDR_CTL_OUTf(175, BIST_DATA_MASK, 0);
+
+ MNH_DDR_CTL_OUTf(177, BIST_TEST_MODE, bist_type);
+
+ MNH_DDR_CTL_OUTf(171, BIST_GO, 1);
+ dev_info(dev, "%s: waiting to finish BIST\n", __func__);
+
+ while (!mnh_ddr_int_status_bit(BIST_SBIT) && --timeout)
+ msleep(20);
+
+ if (!mnh_ddr_int_status_bit(BIST_SBIT)) {
+ dev_err(dev, "%s: BIST timedout: %llx\n",
+ __func__, mnh_ddr_int_status(dev));
+ } else {
+ result = MNH_DDR_CTL_INf(171, BIST_RESULT);
+ dev_info(dev, "%s: result 0x%02x\n", __func__, result);
+ }
+
+ MNH_DDR_CTL_OUTf(171, BIST_GO, 0);
+ mnh_ddr_clr_int_status(dev);
+
+ MNH_DDR_CTL_OUTf(223, IN_ORDER_ACCEPT,
+ old_in_order_accept);
+
+ mnh_ddr_enable_lp();
+ return result;
+}
+EXPORT_SYMBOL(mnh_ddr_mbist);
diff --git a/drivers/misc/mnh/mnh-ddr.h b/drivers/misc/mnh/mnh-ddr.h
index 5d6fcea..86a7d5e 100644
--- a/drivers/misc/mnh/mnh-ddr.h
+++ b/drivers/misc/mnh/mnh-ddr.h
@@ -26,36 +26,36 @@
#define MNH_DDR_NUM_PI_REG (191 + 1)
#define MNH_DDR_NUM_FSPS (4)
-#define MNH_DDR_PHY_NUM_FSPS MNH_DDR_NUM_FSPS
+#define MNH_DDR_NUM_BASES (3)
-
-struct mnh_ddr_reg_bases {
- u32 ctl_base;
- u32 phy_base;
- u32 pi_base;
-};
-
+/* arbitrary but sufficient size for phy deltas */
+#define MNH_DDR_PHY_SET_SIZE 32
+/* need reg index for setA/B */
+#define MNH_DDR_PHY_SET_ELEMS 2
struct mnh_ddr_reg_config {
+ u32 fsps[MNH_DDR_NUM_FSPS];
u32 ctl[MNH_DDR_NUM_CTL_REG];
u32 pi[MNH_DDR_NUM_PI_REG];
u32 phy[MNH_DDR_NUM_PHY_REG];
-};
-
-struct mnh_ddr_state {
- struct mnh_ddr_reg_bases bases;
- u32 fsps[MNH_DDR_NUM_FSPS];
- struct mnh_ddr_reg_config *config;
+ u32 phy_setA[MNH_DDR_PHY_SET_SIZE][MNH_DDR_PHY_SET_ELEMS];
+ u32 phy_setB[MNH_DDR_PHY_SET_SIZE][MNH_DDR_PHY_SET_ELEMS];
};
struct mnh_ddr_internal_state {
u32 ctl_base;
- u32 phy_base;
- u32 pi_base;
u32 ctl[MNH_DDR_NUM_CTL_REG];
+ u32 pi_base;
u32 pi[MNH_DDR_NUM_PI_REG];
- u32 phy[MNH_DDR_PHY_NUM_FSPS][MNH_DDR_NUM_PHY_REG];
+ u32 phy_base;
+ u32 phy[MNH_DDR_NUM_FSPS][MNH_DDR_NUM_PHY_REG];
u32 fsps[MNH_DDR_NUM_FSPS];
u32 suspend_fsp;
+ u32 tref[MNH_DDR_NUM_FSPS];
+};
+
+enum mnh_ddr_bist_type {
+ MOVI1_3N = 0,
+ LIMITED_MOVI1_3N,
};
int mnh_ddr_po_init(struct device *dev, struct gpio_desc *iso_n);
@@ -63,5 +63,6 @@
int mnh_ddr_suspend(struct device *dev, struct gpio_desc *iso_n);
int mnh_ddr_clr_int_status(struct device *dev);
u64 mnh_ddr_int_status(struct device *dev);
+u32 mnh_ddr_mbist(struct device *dev, enum mnh_ddr_bist_type bist_type);
#endif /* __MNH_DDR_H__ */
diff --git a/drivers/misc/mnh/mnh-hwio.h b/drivers/misc/mnh/mnh-hwio.h
index 205d8eb..299a9fc 100644
--- a/drivers/misc/mnh/mnh-hwio.h
+++ b/drivers/misc/mnh/mnh-hwio.h
@@ -42,9 +42,9 @@
return data;
}
-static inline void mnh_reg_write(uint32_t addr, uint32_t value)
+static inline int mnh_reg_write(uint32_t addr, uint32_t value)
{
- mnh_config_write(addr, 4, value);
+ return mnh_config_write(addr, 4, value);
}
#define HW_IN(bAddr, mod, reg) \
diff --git a/drivers/misc/mnh/mnh-mipi.c b/drivers/misc/mnh/mnh-mipi.c
index d7ed21c..a70b029 100644
--- a/drivers/misc/mnh/mnh-mipi.c
+++ b/drivers/misc/mnh/mnh-mipi.c
@@ -25,20 +25,37 @@
#define REF_FREQ_KHZ 25000
-#define TOP_IN(reg) HW_IN(HWIO_MIPI_TOP_BASE_ADDR, MIPI_TOP, reg)
-#define TOP_MASK(reg, fld) HWIO_MIPI_TOP_##reg##_##fld##_FLDMASK
-#define TOP_OUTf(reg, fld, val) \
- HW_OUTf(HWIO_MIPI_TOP_BASE_ADDR, MIPI_TOP, reg, fld, val)
-#define TOP_OUTf_LOCAL(reg, fld, val, curr) \
- ((curr & ~HWIO_MIPI_TOP_##reg##_##fld##_FLDMASK) | \
- ((val << HWIO_MIPI_TOP_##reg##_##fld##_FLDSHFT) & \
- HWIO_MIPI_TOP_##reg##_##fld##_FLDMASK))
+#define TOP_IN(reg) \
+ HW_IN(HWIO_MIPI_TOP_BASE_ADDR, MIPI_TOP, reg)
+#define TOP_MASK(reg, fld) \
+ HWIO_MIPI_TOP_##reg##_##fld##_FLDMASK
+#define TOP_OUT(...) \
+ HW_OUT(HWIO_MIPI_TOP_BASE_ADDR, MIPI_TOP, __VA_ARGS__)
+#define TOP_OUTf(...) \
+ HW_OUTf(HWIO_MIPI_TOP_BASE_ADDR, MIPI_TOP, __VA_ARGS__)
-#define TX_IN(reg) HW_IN(baddr, MIPI_TX, reg)
-#define TX_MASK(reg, fld) HWIO_MIPI_TX_##reg##_##fld##_FLDMASK
+#define TX_IN(device, reg) \
+ HW_IN(HWIO_MIPI_TX_BASE_ADDR(device), MIPI_TX, reg)
+#define TX_MASK(reg, fld) \
+ HWIO_MIPI_TX_##reg##_##fld##_FLDMASK
+#define TX_OUT(device, ...) \
+ HW_OUT(HWIO_MIPI_TX_BASE_ADDR(device), MIPI_TX, __VA_ARGS__)
+#define TX_OUTf(device, ...) \
+ HW_OUTf(HWIO_MIPI_TX_BASE_ADDR(device), MIPI_TX, __VA_ARGS__)
-#define RX_IN(reg) HW_IN(baddr, MIPI_RX, reg)
-#define RX_MASK(reg, fld) HWIO_MIPI_RX_##reg##_##fld##_FLDMASK
+#define RX_IN(device, reg) \
+ HW_IN(HWIO_MIPI_RX_BASE_ADDR(device), MIPI_RX, reg)
+#define RX_MASK(reg, fld) \
+ HWIO_MIPI_RX_##reg##_##fld##_FLDMASK
+#define RX_OUT(device, ...) \
+ HW_OUT(HWIO_MIPI_RX_BASE_ADDR(device), MIPI_RX, __VA_ARGS__)
+#define RX_OUTf(device, ...) \
+ HW_OUTf(HWIO_MIPI_RX_BASE_ADDR(device), MIPI_RX, __VA_ARGS__)
+
+#define TX_DPHY_OUT(...) \
+ mnh_sm_mipi_tx_dphy_write_gen3(__VA_ARGS__)
+#define RX_DPHY_OUT(...) \
+ mnh_sm_mipi_rx_dphy_write_gen3(__VA_ARGS__)
struct mipi_rate_config {
uint32_t rate;
@@ -181,92 +198,58 @@
static int mipi_debug;
static void mnh_sm_mipi_rx_dphy_write_gen3(int command, int data,
- uint32_t device)
+ uint32_t device)
{
/* Write 4-bit testcode MSB */
- HW_OUTf(HWIO_MIPI_RX_BASE_ADDR(device), MIPI_RX, PHY_TEST_CTRL0,
- PHY_TESTCLK, 0);
- HW_OUTf(HWIO_MIPI_RX_BASE_ADDR(device), MIPI_RX, PHY_TEST_CTRL1,
- PHY_TESTEN, 0);
- HW_OUTf(HWIO_MIPI_RX_BASE_ADDR(device), MIPI_RX, PHY_TEST_CTRL1,
- PHY_TESTEN, 1);
- HW_OUTf(HWIO_MIPI_RX_BASE_ADDR(device), MIPI_RX, PHY_TEST_CTRL0,
- PHY_TESTCLK, 1);
- HW_OUTf(HWIO_MIPI_RX_BASE_ADDR(device), MIPI_RX, PHY_TEST_CTRL1,
- PHY_TESTDIN, 0);
- HW_OUTf(HWIO_MIPI_RX_BASE_ADDR(device), MIPI_RX, PHY_TEST_CTRL0,
- PHY_TESTCLK, 0);
- HW_OUTf(HWIO_MIPI_RX_BASE_ADDR(device), MIPI_RX, PHY_TEST_CTRL1,
- PHY_TESTEN, 0);
- HW_OUTf(HWIO_MIPI_RX_BASE_ADDR(device), MIPI_RX, PHY_TEST_CTRL1,
- PHY_TESTDIN, ((command & 0xF00) >> 8));
- HW_OUTf(HWIO_MIPI_RX_BASE_ADDR(device), MIPI_RX, PHY_TEST_CTRL0,
- PHY_TESTCLK, 1);
+ RX_OUTf(device, PHY_TEST_CTRL0, PHY_TESTCLK, 0);
+ RX_OUTf(device, PHY_TEST_CTRL1, PHY_TESTEN, 0);
+ RX_OUTf(device, PHY_TEST_CTRL1, PHY_TESTEN, 1);
+ RX_OUTf(device, PHY_TEST_CTRL0, PHY_TESTCLK, 1);
+ RX_OUTf(device, PHY_TEST_CTRL1, PHY_TESTDIN, 0);
+ RX_OUTf(device, PHY_TEST_CTRL0, PHY_TESTCLK, 0);
+ RX_OUTf(device, PHY_TEST_CTRL1, PHY_TESTEN, 0);
+ RX_OUTf(device, PHY_TEST_CTRL1, PHY_TESTDIN, ((command & 0xF00) >> 8));
+ RX_OUTf(device, PHY_TEST_CTRL0, PHY_TESTCLK, 1);
/* Write 8-bit testcode LSB */
- HW_OUTf(HWIO_MIPI_RX_BASE_ADDR(device), MIPI_RX, PHY_TEST_CTRL0,
- PHY_TESTCLK, 0);
- HW_OUTf(HWIO_MIPI_RX_BASE_ADDR(device), MIPI_RX, PHY_TEST_CTRL1,
- PHY_TESTEN, 1);
- HW_OUTf(HWIO_MIPI_RX_BASE_ADDR(device), MIPI_RX, PHY_TEST_CTRL0,
- PHY_TESTCLK, 1);
- HW_OUTf(HWIO_MIPI_RX_BASE_ADDR(device), MIPI_RX, PHY_TEST_CTRL1,
- PHY_TESTDIN, (command & 0xFF));
- HW_OUTf(HWIO_MIPI_RX_BASE_ADDR(device), MIPI_RX, PHY_TEST_CTRL0,
- PHY_TESTCLK, 0);
- HW_OUTf(HWIO_MIPI_RX_BASE_ADDR(device), MIPI_RX, PHY_TEST_CTRL1,
- PHY_TESTEN, 0);
+ RX_OUTf(device, PHY_TEST_CTRL0, PHY_TESTCLK, 0);
+ RX_OUTf(device, PHY_TEST_CTRL1, PHY_TESTEN, 1);
+ RX_OUTf(device, PHY_TEST_CTRL0, PHY_TESTCLK, 1);
+ RX_OUTf(device, PHY_TEST_CTRL1, PHY_TESTDIN, (command & 0xFF));
+ RX_OUTf(device, PHY_TEST_CTRL0, PHY_TESTCLK, 0);
+ RX_OUTf(device, PHY_TEST_CTRL1, PHY_TESTEN, 0);
/* Write the data */
- HW_OUTf(HWIO_MIPI_RX_BASE_ADDR(device), MIPI_RX, PHY_TEST_CTRL1,
- PHY_TESTDIN, data);
- HW_OUTf(HWIO_MIPI_RX_BASE_ADDR(device), MIPI_RX, PHY_TEST_CTRL0,
- PHY_TESTCLK, 1);
+ RX_OUTf(device, PHY_TEST_CTRL1, PHY_TESTDIN, data);
+ RX_OUTf(device, PHY_TEST_CTRL0, PHY_TESTCLK, 1);
udelay(1);
}
static void mnh_sm_mipi_tx_dphy_write_gen3(int command, int data,
- uint32_t device)
+ uint32_t device)
{
/* Write 4-bit testcode MSB */
- HW_OUTf(HWIO_MIPI_TX_BASE_ADDR(device), MIPI_TX, PHY0_TST_CTRL0,
- PHY0_TESTCLK, 0);
- HW_OUTf(HWIO_MIPI_TX_BASE_ADDR(device), MIPI_TX, PHY0_TST_CTRL1,
- PHY0_TESTEN, 0);
- HW_OUTf(HWIO_MIPI_TX_BASE_ADDR(device), MIPI_TX, PHY0_TST_CTRL1,
- PHY0_TESTEN, 1);
- HW_OUTf(HWIO_MIPI_TX_BASE_ADDR(device), MIPI_TX, PHY0_TST_CTRL0,
- PHY0_TESTCLK, 1);
- HW_OUTf(HWIO_MIPI_TX_BASE_ADDR(device), MIPI_TX, PHY0_TST_CTRL1,
- PHY0_TESTDIN, 0);
- HW_OUTf(HWIO_MIPI_TX_BASE_ADDR(device), MIPI_TX, PHY0_TST_CTRL0,
- PHY0_TESTCLK, 0);
- HW_OUTf(HWIO_MIPI_TX_BASE_ADDR(device), MIPI_TX, PHY0_TST_CTRL1,
- PHY0_TESTEN, 0);
- HW_OUTf(HWIO_MIPI_TX_BASE_ADDR(device), MIPI_TX, PHY0_TST_CTRL1,
- PHY0_TESTDIN, ((command & 0xF00) >> 8));
- HW_OUTf(HWIO_MIPI_TX_BASE_ADDR(device), MIPI_TX, PHY0_TST_CTRL0,
- PHY0_TESTCLK, 1);
+ TX_OUTf(device, PHY0_TST_CTRL0, PHY0_TESTCLK, 0);
+ TX_OUTf(device, PHY0_TST_CTRL1, PHY0_TESTEN, 0);
+ TX_OUTf(device, PHY0_TST_CTRL1, PHY0_TESTEN, 1);
+ TX_OUTf(device, PHY0_TST_CTRL0, PHY0_TESTCLK, 1);
+ TX_OUTf(device, PHY0_TST_CTRL1, PHY0_TESTDIN, 0);
+ TX_OUTf(device, PHY0_TST_CTRL0, PHY0_TESTCLK, 0);
+ TX_OUTf(device, PHY0_TST_CTRL1, PHY0_TESTEN, 0);
+ TX_OUTf(device, PHY0_TST_CTRL1, PHY0_TESTDIN, ((command & 0xF00) >> 8));
+ TX_OUTf(device, PHY0_TST_CTRL0, PHY0_TESTCLK, 1);
/* Write 8-bit testcode LSB */
- HW_OUTf(HWIO_MIPI_TX_BASE_ADDR(device), MIPI_TX, PHY0_TST_CTRL0,
- PHY0_TESTCLK, 0);
- HW_OUTf(HWIO_MIPI_TX_BASE_ADDR(device), MIPI_TX, PHY0_TST_CTRL1,
- PHY0_TESTEN, 1);
- HW_OUTf(HWIO_MIPI_TX_BASE_ADDR(device), MIPI_TX, PHY0_TST_CTRL0,
- PHY0_TESTCLK, 1);
- HW_OUTf(HWIO_MIPI_TX_BASE_ADDR(device), MIPI_TX, PHY0_TST_CTRL1,
- PHY0_TESTDIN, (command & 0xFF));
- HW_OUTf(HWIO_MIPI_TX_BASE_ADDR(device), MIPI_TX, PHY0_TST_CTRL0,
- PHY0_TESTCLK, 0);
- HW_OUTf(HWIO_MIPI_TX_BASE_ADDR(device), MIPI_TX, PHY0_TST_CTRL1,
- PHY0_TESTEN, 0);
+ TX_OUTf(device, PHY0_TST_CTRL0, PHY0_TESTCLK, 0);
+ TX_OUTf(device, PHY0_TST_CTRL1, PHY0_TESTEN, 1);
+ TX_OUTf(device, PHY0_TST_CTRL0, PHY0_TESTCLK, 1);
+ TX_OUTf(device, PHY0_TST_CTRL1, PHY0_TESTDIN, (command & 0xFF));
+ TX_OUTf(device, PHY0_TST_CTRL0, PHY0_TESTCLK, 0);
+ TX_OUTf(device, PHY0_TST_CTRL1, PHY0_TESTEN, 0);
/* Write the data */
- HW_OUTf(HWIO_MIPI_TX_BASE_ADDR(device), MIPI_TX, PHY0_TST_CTRL1,
- PHY0_TESTDIN, data);
- HW_OUTf(HWIO_MIPI_TX_BASE_ADDR(device), MIPI_TX, PHY0_TST_CTRL0,
- PHY0_TESTCLK, 1);
+ TX_OUTf(device, PHY0_TST_CTRL1, PHY0_TESTDIN, data);
+ TX_OUTf(device, PHY0_TST_CTRL0, PHY0_TESTCLK, 1);
udelay(1);
}
@@ -276,21 +259,21 @@
{
dev_dbg(dev, "%s: dev %d\n", __func__, device);
if (enable == 1) {
- mnh_sm_mipi_rx_dphy_write_gen3(0x1AE, 0x07, device);
- mnh_sm_mipi_rx_dphy_write_gen3(0x1AD, 0xe0, device);
- mnh_sm_mipi_rx_dphy_write_gen3(0x305, 0x06, device);
- mnh_sm_mipi_rx_dphy_write_gen3(0x505, 0x06, device);
- mnh_sm_mipi_rx_dphy_write_gen3(0x705, 0x06, device);
- mnh_sm_mipi_rx_dphy_write_gen3(0x905, 0x06, device);
- mnh_sm_mipi_rx_dphy_write_gen3(0xB05, 0x06, device);
+ RX_DPHY_OUT(0x1AE, 0x07, device);
+ RX_DPHY_OUT(0x1AD, 0xe0, device);
+ RX_DPHY_OUT(0x305, 0x06, device);
+ RX_DPHY_OUT(0x505, 0x06, device);
+ RX_DPHY_OUT(0x705, 0x06, device);
+ RX_DPHY_OUT(0x905, 0x06, device);
+ RX_DPHY_OUT(0xB05, 0x06, device);
} else {
- mnh_sm_mipi_rx_dphy_write_gen3(0x1AE, 0x00, device);
- mnh_sm_mipi_rx_dphy_write_gen3(0x1AD, 0x00, device);
- mnh_sm_mipi_rx_dphy_write_gen3(0x305, 0x00, device);
- mnh_sm_mipi_rx_dphy_write_gen3(0x505, 0x00, device);
- mnh_sm_mipi_rx_dphy_write_gen3(0x705, 0x00, device);
- mnh_sm_mipi_rx_dphy_write_gen3(0x905, 0x00, device);
- mnh_sm_mipi_rx_dphy_write_gen3(0xB05, 0x00, device);
+ RX_DPHY_OUT(0x1AE, 0x00, device);
+ RX_DPHY_OUT(0x1AD, 0x00, device);
+ RX_DPHY_OUT(0x305, 0x00, device);
+ RX_DPHY_OUT(0x505, 0x00, device);
+ RX_DPHY_OUT(0x705, 0x00, device);
+ RX_DPHY_OUT(0x905, 0x00, device);
+ RX_DPHY_OUT(0xB05, 0x00, device);
}
}
@@ -307,108 +290,100 @@
return i - 1;
}
- return i;
+ return i - 1;
}
static int mnh_mipi_gen3_lookup_device_cfg_idx(uint32_t rate)
{
int i;
- for (i = 0; i < ARRAY_SIZE(mipi_dev_ovr_cfgs); i++)
- if (rate == mipi_dev_ovr_cfgs[i].rate)
- return i;
+ if ((rate < mipi_dev_ovr_cfgs[0].rate) ||
+ (rate > mipi_dev_ovr_cfgs[ARRAY_SIZE(mipi_dev_ovr_cfgs)-1].rate))
+ return -EINVAL;
- return -EINVAL;
+ for (i = 0; i < ARRAY_SIZE(mipi_dev_ovr_cfgs); i++) {
+ if (rate <= mipi_dev_ovr_cfgs[i].rate)
+ return i;
+ }
+
+ return i - 1;
}
-static void mnh_mipi_gen3_host(struct device *dev, uint32_t device,
+static int mnh_mipi_gen3_host(struct device *dev, uint32_t device,
uint32_t rate)
{
uint32_t code_index, freq_range_code, osc_freq_code;
/* only support devices 0-2 */
if (device > 2)
- return;
+ return -EINVAL;
dev_dbg(dev, "%s: dev %d, rate %d\n", __func__, device, rate);
- HW_OUTf(HWIO_MIPI_RX_BASE_ADDR(device), MIPI_RX, N_LANES, N_LANES, 0x0);
+ RX_OUTf(device, N_LANES, N_LANES, 0x0);
/* clear interrupts */
- HW_OUT(HWIO_MIPI_RX_BASE_ADDR(device), MIPI_RX, INT_MSK_PHY_FATAL,
- 0xFFFFFFFF);
- HW_OUT(HWIO_MIPI_RX_BASE_ADDR(device), MIPI_RX, INT_MSK_PKT_FATAL,
- 0xFFFFFFFF);
- HW_OUT(HWIO_MIPI_RX_BASE_ADDR(device), MIPI_RX, INT_MSK_FRAME_FATAL,
- 0xFFFFFFFF);
- HW_OUT(HWIO_MIPI_RX_BASE_ADDR(device), MIPI_RX, INT_MSK_PHY,
- 0xFFFFFFFF);
- HW_OUT(HWIO_MIPI_RX_BASE_ADDR(device), MIPI_RX, INT_MSK_PKT,
- 0xFFFFFFFF);
- HW_OUT(HWIO_MIPI_RX_BASE_ADDR(device), MIPI_RX, INT_MSK_LINE,
- 0xFFFFFFFF);
+ RX_OUT(device, INT_MSK_PHY_FATAL, 0xFFFFFFFF);
+ RX_OUT(device, INT_MSK_PKT_FATAL, 0xFFFFFFFF);
+ RX_OUT(device, INT_MSK_FRAME_FATAL, 0xFFFFFFFF);
+ RX_OUT(device, INT_MSK_PHY, 0xFFFFFFFF);
+ RX_OUT(device, INT_MSK_PKT, 0xFFFFFFFF);
+ RX_OUT(device, INT_MSK_LINE, 0xFFFFFFFF);
/* enable clock to controller */
if (device == 0)
- HW_OUTf(HWIO_MIPI_TOP_BASE_ADDR, MIPI_TOP, CSI_CLK_CTRL,
- CSI2_RX0_CG, 0x0);
+ TOP_OUTf(CSI_CLK_CTRL, CSI2_RX0_CG, 0x0);
else if (device == 1)
- HW_OUTf(HWIO_MIPI_TOP_BASE_ADDR, MIPI_TOP, CSI_CLK_CTRL,
- CSI2_RX1_CG, 0x0);
+ TOP_OUTf(CSI_CLK_CTRL, CSI2_RX1_CG, 0x0);
else
- HW_OUTf(HWIO_MIPI_TOP_BASE_ADDR, MIPI_TOP, CSI_CLK_CTRL,
- CSI2_RX2_CG, 0x0);
+ TOP_OUTf(CSI_CLK_CTRL, CSI2_RX2_CG, 0x0);
/* disable the PHY before changing settings */
- HW_OUTf(HWIO_MIPI_RX_BASE_ADDR(device), MIPI_RX, PHY_SHUTDOWNZ,
- PHY_SHUTDOWNZ, 0x0);
- HW_OUTf(HWIO_MIPI_RX_BASE_ADDR(device), MIPI_RX, DPHY_RSTZ, DPHY_RSTZ,
- 0x0);
- HW_OUT(HWIO_MIPI_RX_BASE_ADDR(device), MIPI_RX, PHY_TEST_CTRL0, 0x1);
+ RX_OUTf(device, PHY_SHUTDOWNZ, PHY_SHUTDOWNZ, 0x0);
+ RX_OUTf(device, DPHY_RSTZ, DPHY_RSTZ, 0x0);
+ RX_OUT(device, PHY_TEST_CTRL0, 0x1);
udelay(1);
- HW_OUT(HWIO_MIPI_RX_BASE_ADDR(device), MIPI_RX, PHY_TEST_CTRL0, 0x0);
+ RX_OUT(device, PHY_TEST_CTRL0, 0x0);
/* get the PHY settings */
code_index = mnh_mipi_gen3_lookup_freq_code(rate);
+ if (code_index < 0)
+ return -EINVAL;
freq_range_code = mipi_rate_configs[code_index].freq_range_code;
osc_freq_code = mipi_rate_configs[code_index].osc_freq_code;
/* update PHY configuration */
if (device == 0)
- HW_OUT(HWIO_MIPI_TOP_BASE_ADDR, MIPI_TOP, RX0_DPHY_CONFIG,
- 0x8400 | freq_range_code);
+ TOP_OUT(RX0_DPHY_CONFIG, 0x8400 | freq_range_code);
else if (device == 1)
- HW_OUT(HWIO_MIPI_TOP_BASE_ADDR, MIPI_TOP, RX1_DPHY_CONFIG,
- 0x8400 | freq_range_code);
+ TOP_OUT(RX1_DPHY_CONFIG, 0x8400 | freq_range_code);
else
- HW_OUT(HWIO_MIPI_TOP_BASE_ADDR, MIPI_TOP, RX2_DPHY_CONFIG,
- 0x8400 | freq_range_code);
+ TOP_OUT(RX2_DPHY_CONFIG, 0x8400 | freq_range_code);
/* update PHY PLL settings */
- mnh_sm_mipi_rx_dphy_write_gen3(0xe2, osc_freq_code & 0xFF, device);
- mnh_sm_mipi_rx_dphy_write_gen3(0xe3, osc_freq_code >> 8, device);
- mnh_sm_mipi_rx_dphy_write_gen3(0xe4, 0x01, device);
- mnh_sm_mipi_rx_dphy_write_gen3(0x08, 0x20, device);
+ RX_DPHY_OUT(0xe2, osc_freq_code & 0xFF, device);
+ RX_DPHY_OUT(0xe3, osc_freq_code >> 8, device);
+ RX_DPHY_OUT(0xe4, 0x01, device);
+ RX_DPHY_OUT(0x08, 0x20, device);
udelay(1);
- HW_OUTf(HWIO_MIPI_RX_BASE_ADDR(device), MIPI_RX, N_LANES, N_LANES, 0x3);
+ RX_OUTf(device, N_LANES, N_LANES, 0x3);
udelay(1);
/* b/63578602 */
mnh_mipi_gen3_lprxpon_wa(dev, device, 1);
/* release reset */
- HW_OUTf(HWIO_MIPI_RX_BASE_ADDR(device), MIPI_RX, PHY_SHUTDOWNZ,
- PHY_SHUTDOWNZ, 0x1);
+ RX_OUTf(device, PHY_SHUTDOWNZ, PHY_SHUTDOWNZ, 0x1);
udelay(20);
- HW_OUTf(HWIO_MIPI_RX_BASE_ADDR(device), MIPI_RX, DPHY_RSTZ, DPHY_RSTZ,
- 0x1);
- HW_OUTf(HWIO_MIPI_RX_BASE_ADDR(device), MIPI_RX, CSI2_RESETN,
- CSI2_RESETN, 0x1);
+ RX_OUTf(device, DPHY_RSTZ, DPHY_RSTZ, 0x1);
+ RX_OUTf(device, CSI2_RESETN, CSI2_RESETN, 0x1);
udelay(30);
mnh_mipi_gen3_lprxpon_wa(dev, device, 0);
+
+ return 0;
}
static uint8_t mnh_mipi_get_vco_cntrl(uint32_t rate)
@@ -474,8 +449,8 @@
*n = best_n;
}
-static void mnh_mipi_gen3_device(struct device *dev, uint32_t device,
- uint32_t rate)
+static int mnh_mipi_gen3_device(struct device *dev, uint32_t device,
+ uint32_t rate)
{
uint32_t code_index, freq_range_code, osc_freq_code;
struct mipi_dev_ovr_cfg *dev_ovr_cfg;
@@ -530,88 +505,73 @@
/* only support devices 0-1 */
if (device > 1)
- return;
+ return -EINVAL;
if (device == 0)
- HW_OUTf(HWIO_MIPI_TOP_BASE_ADDR, MIPI_TOP, CSI_CLK_CTRL,
- CSI2_TX0_CG, 0x0);
+ TOP_OUTf(CSI_CLK_CTRL, CSI2_TX0_CG, 0x0);
else
- HW_OUTf(HWIO_MIPI_TOP_BASE_ADDR, MIPI_TOP, CSI_CLK_CTRL,
- CSI2_TX1_CG, 0x0);
+ TOP_OUTf(CSI_CLK_CTRL, CSI2_TX1_CG, 0x0);
/* mipicsi_device_hw_init */
- HW_OUTf(HWIO_MIPI_TX_BASE_ADDR(device), MIPI_TX, PHY_RSTZ,
- PHY_SHUTDOWNZ, 0x1);
+ TX_OUTf(device, PHY_RSTZ, PHY_SHUTDOWNZ, 0x1);
/* mipicsi_device_dphy_reset */
- HW_OUTf(HWIO_MIPI_TX_BASE_ADDR(device), MIPI_TX, PHY_RSTZ, PHY_RSTZ,
- 0x1);
+ TX_OUTf(device, PHY_RSTZ, PHY_RSTZ, 0x1);
udelay(1000);
- HW_OUTf(HWIO_MIPI_TX_BASE_ADDR(device), MIPI_TX, PHY_RSTZ, PHY_RSTZ,
- 0x0);
- HW_OUT(HWIO_MIPI_TX_BASE_ADDR(device), MIPI_TX, CSI2_RESETN, 0x0);
+ TX_OUTf(device, PHY_RSTZ, PHY_RSTZ, 0x0);
+ TX_OUT(device, CSI2_RESETN, 0x0);
udelay(1);
- HW_OUT(HWIO_MIPI_TX_BASE_ADDR(device), MIPI_TX, CSI2_RESETN, 0x1);
- HW_OUT(HWIO_MIPI_TX_BASE_ADDR(device), MIPI_TX, INT_MASK_N_VPG,
- 0xFFFFFFFF);
- HW_OUT(HWIO_MIPI_TX_BASE_ADDR(device), MIPI_TX, INT_MASK_N_IDI,
- 0xFFFFFFFF);
- HW_OUTf(HWIO_MIPI_TX_BASE_ADDR(device), MIPI_TX, PHY_RSTZ,
- PHY_SHUTDOWNZ,
- 0x0);
+ TX_OUT(device, CSI2_RESETN, 0x1);
+ TX_OUT(device, INT_MASK_N_VPG, 0xFFFFFFFF);
+ TX_OUT(device, INT_MASK_N_IDI, 0xFFFFFFFF);
+ TX_OUTf(device, PHY_RSTZ, PHY_SHUTDOWNZ, 0x0);
/* disable clock gating */
if (device == 0) {
- HW_OUTf(HWIO_MIPI_TOP_BASE_ADDR, MIPI_TOP, TX0_DPHY_PLL_CNTRL,
- PLL_SHADOW_CONTROL, 0x1);
- HW_OUTf(HWIO_MIPI_TOP_BASE_ADDR, MIPI_TOP, TX0_DPHY_PLL_CNTRL,
- CLK_SEL, 0x1);
+ TOP_OUTf(TX0_DPHY_PLL_CNTRL, PLL_SHADOW_CONTROL, 0x1);
+ TOP_OUTf(TX0_DPHY_PLL_CNTRL, CLK_SEL, 0x1);
} else {
- HW_OUTf(HWIO_MIPI_TOP_BASE_ADDR, MIPI_TOP, TX1_DPHY_PLL_CNTRL,
- PLL_SHADOW_CONTROL, 0x1);
- HW_OUTf(HWIO_MIPI_TOP_BASE_ADDR, MIPI_TOP, TX1_DPHY_PLL_CNTRL,
- CLK_SEL, 0x1);
+ TOP_OUTf(TX1_DPHY_PLL_CNTRL, PLL_SHADOW_CONTROL, 0x1);
+ TOP_OUTf(TX1_DPHY_PLL_CNTRL, CLK_SEL, 0x1);
}
/* disable the PHY before changing settings */
- HW_OUTf(HWIO_MIPI_TX_BASE_ADDR(device), MIPI_TX, CSI2_RESETN,
- CSI2_RESETN_RW, 1);
- HW_OUT(HWIO_MIPI_TX_BASE_ADDR(device), MIPI_TX, PHY_RSTZ, 0x0);
- HW_OUTf(HWIO_MIPI_TX_BASE_ADDR(device), MIPI_TX, PHY_RSTZ,
- PHY_ENABLECLK, 1);
- HW_OUT(HWIO_MIPI_TX_BASE_ADDR(device), MIPI_TX, PHY0_TST_CTRL0, 0x1);
+ TX_OUTf(device, CSI2_RESETN, CSI2_RESETN_RW, 1);
+ TX_OUT(device, PHY_RSTZ, 0x0);
+ TX_OUTf(device, PHY_RSTZ, PHY_ENABLECLK, 1);
+ TX_OUT(device, PHY0_TST_CTRL0, 0x1);
udelay(1);
- HW_OUT(HWIO_MIPI_TX_BASE_ADDR(device), MIPI_TX, PHY0_TST_CTRL0, 0x0);
+ TX_OUT(device, PHY0_TST_CTRL0, 0x0);
/* get the PHY settings */
code_index = mnh_mipi_gen3_lookup_freq_code(rate);
+ if (code_index < 0)
+ return -EINVAL;
freq_range_code = mipi_rate_configs[code_index].freq_range_code;
osc_freq_code = mipi_rate_configs[code_index].osc_freq_code;
/* update PHY configuration */
if (device == 0)
- HW_OUT(HWIO_MIPI_TOP_BASE_ADDR, MIPI_TOP, TX0_DPHY_CONFIG,
- 0x8400 | freq_range_code);
+ TOP_OUT(TX0_DPHY_CONFIG, 0x8400 | freq_range_code);
else
- HW_OUT(HWIO_MIPI_TOP_BASE_ADDR, MIPI_TOP, TX1_DPHY_CONFIG,
- 0x8400 | freq_range_code);
+ TOP_OUT(TX1_DPHY_CONFIG, 0x8400 | freq_range_code);
/* configure slew rate calibration */
if (rate > 1500) {
- mnh_sm_mipi_tx_dphy_write_gen3(0x26B, 0x44, device);
- mnh_sm_mipi_tx_dphy_write_gen3(0x272, 0x00, device);
+ TX_DPHY_OUT(0x26B, 0x44, device);
+ TX_DPHY_OUT(0x272, 0x00, device);
} else if (rate > 1000) {
- mnh_sm_mipi_tx_dphy_write_gen3(0x270, 0xD0, device);
- mnh_sm_mipi_tx_dphy_write_gen3(0x271, 0x07, device);
- mnh_sm_mipi_tx_dphy_write_gen3(0x272, 0x10, device);
+ TX_DPHY_OUT(0x270, 0xD0, device);
+ TX_DPHY_OUT(0x271, 0x07, device);
+ TX_DPHY_OUT(0x272, 0x10, device);
} else if (rate > 500) {
- mnh_sm_mipi_tx_dphy_write_gen3(0x270, 0xE2, device);
- mnh_sm_mipi_tx_dphy_write_gen3(0x271, 0x04, device);
- mnh_sm_mipi_tx_dphy_write_gen3(0x272, 0x11, device);
+ TX_DPHY_OUT(0x270, 0xE2, device);
+ TX_DPHY_OUT(0x271, 0x04, device);
+ TX_DPHY_OUT(0x272, 0x11, device);
} else {
- mnh_sm_mipi_tx_dphy_write_gen3(0x270, 0x84, device);
- mnh_sm_mipi_tx_dphy_write_gen3(0x271, 0x03, device);
- mnh_sm_mipi_tx_dphy_write_gen3(0x272, 0x11, device);
+ TX_DPHY_OUT(0x270, 0x84, device);
+ TX_DPHY_OUT(0x271, 0x03, device);
+ TX_DPHY_OUT(0x272, 0x11, device);
}
/* get PLL coefficients and codes */
@@ -620,6 +580,8 @@
/* mipi device configuration settings from lookup table */
mipi_dev_cfg_index = mnh_mipi_gen3_lookup_device_cfg_idx(rate);
+ if (mipi_dev_cfg_index < 0)
+ return -EINVAL;
dev_ovr_cfg = &mipi_dev_ovr_cfgs[mipi_dev_cfg_index];
dev_dbg(dev, "%s: Device configuration index: %d\n", __func__,
mipi_dev_cfg_index);
@@ -629,93 +591,73 @@
pll_n -= 1;
/* configure PLL frequency multiplication and division factors */
- mnh_sm_mipi_tx_dphy_write_gen3(0x17B, ((vco_cntrl & 0x3F) << 1) | 0x81,
+ TX_DPHY_OUT(0x17B, ((vco_cntrl & 0x3F) << 1) | 0x81,
device);
- mnh_sm_mipi_tx_dphy_write_gen3(0x178, 0x80 | ((pll_n & 0xF) << 3),
+ TX_DPHY_OUT(0x178, 0x80 | ((pll_n & 0xF) << 3),
device);
- mnh_sm_mipi_tx_dphy_write_gen3(0x179, pll_m & 0xFF, device);
- mnh_sm_mipi_tx_dphy_write_gen3(0x17A, (pll_m >> 8) & 0x3, device);
+ TX_DPHY_OUT(0x179, pll_m & 0xFF, device);
+ TX_DPHY_OUT(0x17A, (pll_m >> 8) & 0x3, device);
/* configure rate-independent PLL settings */
- mnh_sm_mipi_tx_dphy_write_gen3(0x15E, 0x10, device);
- mnh_sm_mipi_tx_dphy_write_gen3(0x162, 0x04, device);
- mnh_sm_mipi_tx_dphy_write_gen3(0x16E, 0x0C, device);
- mnh_sm_mipi_tx_dphy_write_gen3(0x173, 0x02, device);
- mnh_sm_mipi_tx_dphy_write_gen3(0x174, 0x00, device);
- mnh_sm_mipi_tx_dphy_write_gen3(0x175, 0x60, device);
- mnh_sm_mipi_tx_dphy_write_gen3(0x176, 0x03, device);
+ TX_DPHY_OUT(0x15E, 0x10, device);
+ TX_DPHY_OUT(0x162, 0x04, device);
+ TX_DPHY_OUT(0x16E, 0x0C, device);
+ TX_DPHY_OUT(0x173, 0x02, device);
+ TX_DPHY_OUT(0x174, 0x00, device);
+ TX_DPHY_OUT(0x175, 0x60, device);
+ TX_DPHY_OUT(0x176, 0x03, device);
/* TODO: Maybe do a read-modify-write */
if (rate <= 450)
- mnh_sm_mipi_tx_dphy_write_gen3(0x1AC, 1 << 4, device);
+ TX_DPHY_OUT(0x1AC, 1 << 4, device);
/* Wait for 15ns */
udelay(1);
/* Timing register overrides */
if (!mipi_debug && dev_ovr_cfg->use_ovrd) {
- mnh_sm_mipi_tx_dphy_write_gen3(0x5A, dev_ovr_cfg->reg_0x5A,
- device);
- mnh_sm_mipi_tx_dphy_write_gen3(0x5B, dev_ovr_cfg->reg_0x5B,
- device);
- mnh_sm_mipi_tx_dphy_write_gen3(0x5C, dev_ovr_cfg->reg_0x5C,
- device);
- mnh_sm_mipi_tx_dphy_write_gen3(0x5D, dev_ovr_cfg->reg_0x5D,
- device);
- /*
- * mnh_sm_mipi_tx_dphy_write_gen3(0x5E, dev_ovr_cfg->reg_0x5E,
- * device);
- */
- mnh_sm_mipi_tx_dphy_write_gen3(0x5F, dev_ovr_cfg->reg_0x5F,
- device);
- mnh_sm_mipi_tx_dphy_write_gen3(0x61, dev_ovr_cfg->reg_0x61,
- device);
- mnh_sm_mipi_tx_dphy_write_gen3(0x62, dev_ovr_cfg->reg_0x62,
- device);
- mnh_sm_mipi_tx_dphy_write_gen3(0x63, dev_ovr_cfg->reg_0x63,
- device);
- /*
- * mnh_sm_mipi_tx_dphy_write_gen3(0x64, dev_ovr_cfg->reg_0x64,
- * device);
- */
- mnh_sm_mipi_tx_dphy_write_gen3(0x65, dev_ovr_cfg->reg_0x65,
- device);
+ TX_DPHY_OUT(0x5A, dev_ovr_cfg->reg_0x5A, device);
+ TX_DPHY_OUT(0x5B, dev_ovr_cfg->reg_0x5B, device);
+ TX_DPHY_OUT(0x5C, dev_ovr_cfg->reg_0x5C, device);
+ TX_DPHY_OUT(0x5D, dev_ovr_cfg->reg_0x5D, device);
+ /* TX_DPHY_OUT(0x5E, dev_ovr_cfg->reg_0x5E, device); */
+ TX_DPHY_OUT(0x5F, dev_ovr_cfg->reg_0x5F, device);
+ TX_DPHY_OUT(0x61, dev_ovr_cfg->reg_0x61, device);
+ TX_DPHY_OUT(0x62, dev_ovr_cfg->reg_0x62, device);
+ TX_DPHY_OUT(0x63, dev_ovr_cfg->reg_0x63, device);
+ /* TX_DPHY_OUT(0x64, dev_ovr_cfg->reg_0x64, device); */
+ TX_DPHY_OUT(0x65, dev_ovr_cfg->reg_0x65, device);
}
/* Enable high speed drivers (required for Tlpx < 500ns) */
/* Clock lane driver */
- mnh_sm_mipi_tx_dphy_write_gen3(0x304, (1 << 2) | (1 << 3), device);
+ TX_DPHY_OUT(0x304, (1 << 2) | (1 << 3), device);
/* Data lane 0 driver */
- mnh_sm_mipi_tx_dphy_write_gen3(0x504, (1 << 2) | (1 << 3), device);
+ TX_DPHY_OUT(0x504, (1 << 2) | (1 << 3), device);
/* Data lane 1 driver */
- mnh_sm_mipi_tx_dphy_write_gen3(0x704, (1 << 2) | (1 << 3), device);
+ TX_DPHY_OUT(0x704, (1 << 2) | (1 << 3), device);
/* Data lane 2 driver */
- mnh_sm_mipi_tx_dphy_write_gen3(0x904, (1 << 2) | (1 << 3), device);
+ TX_DPHY_OUT(0x904, (1 << 2) | (1 << 3), device);
/* Data lane 3 driver */
- mnh_sm_mipi_tx_dphy_write_gen3(0xB04, (1 << 2) | (1 << 3), device);
+ TX_DPHY_OUT(0xB04, (1 << 2) | (1 << 3), device);
/* Enable lanes */
- HW_OUTf(HWIO_MIPI_TX_BASE_ADDR(device), MIPI_TX, PHY_IF_CFG,
- LANE_EN_NUM, 3);
+ TX_OUTf(device, PHY_IF_CFG, LANE_EN_NUM, 3);
/* Table A-4 High-Speed Transition Times: Data HS-LP */
- HW_OUTf(HWIO_MIPI_TX_BASE_ADDR(device), MIPI_TX, PHY_IF_CFG,
- PHY_STOP_WAIT_TIME, dev_ovr_cfg->wait_time);
+ TX_OUTf(device, PHY_IF_CFG, PHY_STOP_WAIT_TIME, dev_ovr_cfg->wait_time);
/* enable the PHY */
- HW_OUTf(HWIO_MIPI_TX_BASE_ADDR(device), MIPI_TX, PHY_RSTZ,
- PHY_ENABLECLK, 1);
+ TX_OUTf(device, PHY_RSTZ, PHY_ENABLECLK, 1);
udelay(1);
- HW_OUTf(HWIO_MIPI_TX_BASE_ADDR(device), MIPI_TX, PHY_RSTZ,
- PHY_SHUTDOWNZ, 1);
+ TX_OUTf(device, PHY_RSTZ, PHY_SHUTDOWNZ, 1);
udelay(1);
- HW_OUTf(HWIO_MIPI_TX_BASE_ADDR(device), MIPI_TX, PHY_RSTZ, PHY_RSTZ, 1);
+ TX_OUTf(device, PHY_RSTZ, PHY_RSTZ, 1);
/* wait for data & clk lanes to reach the low-power state */
i = 0;
do {
- data = HW_IN(HWIO_MIPI_TX_BASE_ADDR(device), MIPI_TX,
- PHY_STATUS);
+ data = TX_IN(device, PHY_STATUS);
udelay(10);
i++;
} while ((i < 40) && ((data & 0x1550) != 0x1550));
@@ -730,8 +672,9 @@
/* Enable ct clock, this takes immediate effect */
dev_dbg(dev, "%s: enabling ct. clock mode\n", __func__);
- HW_OUTf(HWIO_MIPI_TX_BASE_ADDR(device), MIPI_TX,
- LPCLK_CTRL, PHY_TXREQCLKHS_CON, 1);
+ TX_OUTf(device, LPCLK_CTRL, PHY_TXREQCLKHS_CON, 1);
+
+ return 0;
}
static int mnh_mipi_config_mux_sel(struct device *dev,
@@ -770,11 +713,11 @@
* need to write once.
*/
if (rxdev == MIPI_RX0)
- rx_mode = HW_IN(HWIO_MIPI_TOP_BASE_ADDR, MIPI_TOP, RX0_MODE);
+ rx_mode = TOP_IN(RX0_MODE);
else if (rxdev == MIPI_RX1)
- rx_mode = HW_IN(HWIO_MIPI_TOP_BASE_ADDR, MIPI_TOP, RX1_MODE);
+ rx_mode = TOP_IN(RX1_MODE);
else if (rxdev == MIPI_RX2)
- rx_mode = HW_IN(HWIO_MIPI_TOP_BASE_ADDR, MIPI_TOP, RX2_MODE);
+ rx_mode = TOP_IN(RX2_MODE);
else
rx_mode = 0; /* rx_mode register not required for IPU source */
/*
@@ -824,26 +767,19 @@
case MIPI_TX0:
switch (rxdev) { /* Source */
case MIPI_RX0: /* RX0 -> TX0 */
- HW_OUT(HWIO_MIPI_TOP_BASE_ADDR,
- MIPI_TOP, RX0_MODE, rx_mode);
- HW_OUT(HWIO_MIPI_TOP_BASE_ADDR,
- MIPI_TOP, TX0_MODE, tx_mode);
+ TOP_OUT(RX0_MODE, rx_mode);
+ TOP_OUT(TX0_MODE, tx_mode);
break;
case MIPI_RX1: /* RX1 -> TX0 */
- HW_OUT(HWIO_MIPI_TOP_BASE_ADDR,
- MIPI_TOP, RX1_MODE, rx_mode);
- HW_OUT(HWIO_MIPI_TOP_BASE_ADDR,
- MIPI_TOP, TX0_MODE, tx_mode);
+ TOP_OUT(RX1_MODE, rx_mode);
+ TOP_OUT(TX0_MODE, tx_mode);
break;
case MIPI_RX2: /* RX2 -> TX0 */
- HW_OUT(HWIO_MIPI_TOP_BASE_ADDR,
- MIPI_TOP, RX2_MODE, rx_mode);
- HW_OUT(HWIO_MIPI_TOP_BASE_ADDR,
- MIPI_TOP, TX0_MODE, tx_mode);
+ TOP_OUT(RX2_MODE, rx_mode);
+ TOP_OUT(TX0_MODE, tx_mode);
break;
case MIPI_RX_IPU: /* IPU -> TX0 */
- HW_OUT(HWIO_MIPI_TOP_BASE_ADDR,
- MIPI_TOP, TX0_MODE, tx_mode);
+ TOP_OUT(TX0_MODE, tx_mode);
break;
default:
dev_err(dev, "%s: invalid rx device %d!\n", __func__,
@@ -854,27 +790,20 @@
case MIPI_TX1:
switch (rxdev) {
case MIPI_RX0: /* RX0 -> TX1 */
- HW_OUT(HWIO_MIPI_TOP_BASE_ADDR,
- MIPI_TOP, RX0_MODE, rx_mode);
- HW_OUT(HWIO_MIPI_TOP_BASE_ADDR,
- MIPI_TOP, TX1_MODE, tx_mode);
+ TOP_OUT(RX0_MODE, rx_mode);
+ TOP_OUT(TX1_MODE, tx_mode);
break;
case MIPI_RX1: /* RX1 -> TX1 */
- HW_OUT(HWIO_MIPI_TOP_BASE_ADDR,
- MIPI_TOP, RX1_MODE, rx_mode);
- HW_OUT(HWIO_MIPI_TOP_BASE_ADDR,
- MIPI_TOP, TX1_MODE, tx_mode);
+ TOP_OUT(RX1_MODE, rx_mode);
+ TOP_OUT(TX1_MODE, tx_mode);
break;
case MIPI_RX2: /* RX2 -> TX1 */
- HW_OUT(HWIO_MIPI_TOP_BASE_ADDR,
- MIPI_TOP, RX2_MODE, rx_mode);
- HW_OUT(HWIO_MIPI_TOP_BASE_ADDR,
- MIPI_TOP, TX1_MODE, tx_mode);
+ TOP_OUT(RX2_MODE, rx_mode);
+ TOP_OUT(TX1_MODE, tx_mode);
break;
case MIPI_RX_IPU:
/* Configure IPU -> Tx1 */
- HW_OUT(HWIO_MIPI_TOP_BASE_ADDR,
- MIPI_TOP, TX1_MODE, tx_mode);
+ TOP_OUT(TX1_MODE, tx_mode);
break;
default:
dev_err(dev, "%s: invalid rx device %d!\n", __func__,
@@ -885,16 +814,13 @@
case MIPI_TX_IPU:
switch (rxdev) {
case MIPI_RX0: /* Rx0 -> IPU */
- HW_OUT(HWIO_MIPI_TOP_BASE_ADDR,
- MIPI_TOP, RX0_MODE, rx_mode);
+ TOP_OUT(RX0_MODE, rx_mode);
break;
case MIPI_RX1:
- HW_OUT(HWIO_MIPI_TOP_BASE_ADDR,
- MIPI_TOP, RX1_MODE, rx_mode);
+ TOP_OUT(RX1_MODE, rx_mode);
break;
case MIPI_RX2:
- HW_OUT(HWIO_MIPI_TOP_BASE_ADDR,
- MIPI_TOP, RX2_MODE, rx_mode);
+ TOP_OUT(RX2_MODE, rx_mode);
break;
default:
dev_err(dev, "%s: invalid rx device %d!\n", __func__,
@@ -906,6 +832,7 @@
dev_err(dev, "%s: invalid tx device %d!\n", __func__, txdev);
return -EINVAL;
}
+
return 0;
}
@@ -916,6 +843,7 @@
uint32_t rx_rate = config.rx_rate;
uint32_t tx_rate = config.tx_rate;
uint32_t vc_en_mask = config.vc_en_mask;
+ int ret = 0;
dev_dbg(dev, "%s: init rxdev %d, txdev %d, rx_rate %d, tx_rate %d, vc_en_mask 0x%1x\n",
__func__, rxdev, txdev, rx_rate, tx_rate, vc_en_mask);
@@ -927,7 +855,12 @@
case MIPI_RX2:
dev_dbg(dev, "%s: configuring host controller Rx%d\n",
__func__, rxdev);
- mnh_mipi_gen3_host(dev, rxdev, rx_rate);
+ ret = mnh_mipi_gen3_host(dev, rxdev, rx_rate);
+ if (ret) {
+ dev_err(dev, "%s: mnh_mipi_gen3_host failed (%d)\n",
+ __func__, ret);
+ return ret;
+ }
break;
case MIPI_RX_IPU:
dev_dbg(dev, "%s: configuring IPU IDI Tx%d as MIPI source\n",
@@ -944,7 +877,12 @@
case MIPI_TX1:
dev_dbg(dev, "%s: configuring device controller Tx%d\n",
__func__, txdev);
- mnh_mipi_gen3_device(dev, txdev, tx_rate);
+ ret = mnh_mipi_gen3_device(dev, txdev, tx_rate);
+ if (ret) {
+ dev_err(dev, "%s: mnh_mipi_gen3_device failed (%d)\n",
+ __func__, ret);
+ return ret;
+ }
break;
case MIPI_TX_IPU:
dev_dbg(dev, "%s: configuring IPU IDI Rx%d as MIPI sink\n",
@@ -956,9 +894,14 @@
}
/* configure mux select */
- mnh_mipi_config_mux_sel(dev, &config);
+ ret = mnh_mipi_config_mux_sel(dev, &config);
+ if (ret) {
+ dev_err(dev, "%s: mnh_mipi_config_mux_sel failed (%d)\n",
+ __func__, ret);
+ return ret;
+ }
- return 0;
+ return ret;
}
EXPORT_SYMBOL_GPL(mnh_mipi_config);
@@ -966,80 +909,88 @@
{
uint32_t txdev = config.txdev;
uint32_t rxdev = config.rxdev;
+ int ret = 0;
dev_dbg(dev, "%s: stopping rxdev %d, txdev %d\n", __func__, rxdev,
txdev);
/* Shutdown host */
- mnh_mipi_stop_host(dev, rxdev);
+ ret = mnh_mipi_stop_host(dev, rxdev);
+ if (ret) {
+ dev_err(dev, "%s: mnh_mipi_stop_host failed (%d)\n",
+ __func__, ret);
+ return ret;
+ }
/* Shutdown device */
- mnh_mipi_stop_device(dev, txdev);
+ ret = mnh_mipi_stop_device(dev, txdev);
+ if (ret) {
+ dev_err(dev, "%s: mnh_mipi_stop_device failed (%d)\n",
+ __func__, ret);
+ return ret;
+ }
- return 0;
+ return ret;
}
EXPORT_SYMBOL_GPL(mnh_mipi_stop);
-void mnh_mipi_stop_device(struct device *dev, int txdev)
+int mnh_mipi_stop_device(struct device *dev, int txdev)
{
+ int ret = 0;
+
/* shut down the PHY */
- HW_OUTf(HWIO_MIPI_TX_BASE_ADDR(txdev), MIPI_TX, PHY_RSTZ,
- PHY_RSTZ, 0);
- HW_OUTf(HWIO_MIPI_TX_BASE_ADDR(txdev), MIPI_TX, PHY_RSTZ,
- PHY_SHUTDOWNZ, 0);
- HW_OUTf(HWIO_MIPI_TX_BASE_ADDR(txdev), MIPI_TX, PHY_RSTZ,
- PHY_ENABLECLK, 0);
+ TX_OUTf(txdev, PHY_RSTZ, PHY_RSTZ, 0);
+ TX_OUTf(txdev, PHY_RSTZ, PHY_SHUTDOWNZ, 0);
+ TX_OUTf(txdev, PHY_RSTZ, PHY_ENABLECLK, 0);
/* shut down the controller logic */
- HW_OUTf(HWIO_MIPI_TX_BASE_ADDR(txdev), MIPI_TX, CSI2_RESETN,
- CSI2_RESETN_RW, 0);
+ TX_OUTf(txdev, CSI2_RESETN, CSI2_RESETN_RW, 0);
/* enable clock gating (disable clock) */
switch (txdev) {
case MIPI_TX0:
- HW_OUTf(HWIO_MIPI_TOP_BASE_ADDR, MIPI_TOP, CSI_CLK_CTRL,
- CSI2_TX0_CG, 0x1);
+ TOP_OUTf(CSI_CLK_CTRL, CSI2_TX0_CG, 0x1);
break;
case MIPI_TX1:
- HW_OUTf(HWIO_MIPI_TOP_BASE_ADDR, MIPI_TOP, CSI_CLK_CTRL,
- CSI2_TX1_CG, 0x1);
+ TOP_OUTf(CSI_CLK_CTRL, CSI2_TX1_CG, 0x1);
break;
default:
dev_err(dev, "%s invalid mipi device!\n", __func__);
+ ret = -EINVAL;
break;
}
+
+ return ret;
}
EXPORT_SYMBOL_GPL(mnh_mipi_stop_device);
-void mnh_mipi_stop_host(struct device *dev, int rxdev)
+int mnh_mipi_stop_host(struct device *dev, int rxdev)
{
+ int ret = 0;
+
/* shut down the PHY */
/* see 7.3.1: Rx-DPHY databook */
- HW_OUTf(HWIO_MIPI_RX_BASE_ADDR(rxdev), MIPI_RX, PHY_SHUTDOWNZ,
- PHY_SHUTDOWNZ, 0x0);
- HW_OUTf(HWIO_MIPI_RX_BASE_ADDR(rxdev), MIPI_RX, DPHY_RSTZ,
- DPHY_RSTZ, 0x0);
- HW_OUTf(HWIO_MIPI_RX_BASE_ADDR(rxdev), MIPI_RX, PHY_TEST_CTRL0,
- PHY_TESTCLR, 0x1);
+ RX_OUTf(rxdev, PHY_SHUTDOWNZ, PHY_SHUTDOWNZ, 0x0);
+ RX_OUTf(rxdev, DPHY_RSTZ, DPHY_RSTZ, 0x0);
+ RX_OUTf(rxdev, PHY_TEST_CTRL0, PHY_TESTCLR, 0x1);
/* shut down the controller logic */
- HW_OUTf(HWIO_MIPI_RX_BASE_ADDR(rxdev), MIPI_RX, CSI2_RESETN,
- CSI2_RESETN, 0x0);
+ RX_OUTf(rxdev, CSI2_RESETN, CSI2_RESETN, 0x0);
/* enable clock gating (disable clock) */
switch (rxdev) {
case MIPI_RX0:
- HW_OUTf(HWIO_MIPI_TOP_BASE_ADDR, MIPI_TOP, CSI_CLK_CTRL,
- CSI2_RX0_CG, 0x1);
+ TOP_OUTf(CSI_CLK_CTRL, CSI2_RX0_CG, 0x1);
break;
case MIPI_RX1:
- HW_OUTf(HWIO_MIPI_TOP_BASE_ADDR, MIPI_TOP, CSI_CLK_CTRL,
- CSI2_RX1_CG, 0x1);
+ TOP_OUTf(CSI_CLK_CTRL, CSI2_RX1_CG, 0x1);
break;
case MIPI_RX2:
- HW_OUTf(HWIO_MIPI_TOP_BASE_ADDR, MIPI_TOP, CSI_CLK_CTRL,
- CSI2_RX2_CG, 0x1);
+ TOP_OUTf(CSI_CLK_CTRL, CSI2_RX2_CG, 0x1);
break;
default:
dev_err(dev, "%s invalid mipi host!\n", __func__);
+ ret = -EINVAL;
break;
}
+
+ return ret;
}
EXPORT_SYMBOL_GPL(mnh_mipi_stop_host);
@@ -1080,22 +1031,20 @@
int mnh_mipi_get_device_interrupts(struct device *dev,
struct mipi_device_irq_st *int_status)
{
- uint32_t baddr = HWIO_MIPI_TX_BASE_ADDR(int_status->dev);
-
- int_status->main = TX_IN(INT_ST_MAIN);
+ int_status->main = TX_IN(int_status->dev, INT_ST_MAIN);
dev_info(dev, "MIPI device controller %d interrupt main: %x\n",
int_status->dev, int_status->main);
if (int_status->main & TX_MASK(INT_ST_MAIN, INT_ST_VPG)) {
- int_status->vpg = TX_IN(INT_ST_VPG);
+ int_status->vpg = TX_IN(int_status->dev, INT_ST_VPG);
dev_info(dev, "CSI INT_ST_VPG: %x\n", int_status->vpg);
}
if (int_status->main & TX_MASK(INT_ST_MAIN, INT_ST_IDI)) {
- int_status->idi = TX_IN(INT_ST_IDI);
+ int_status->idi = TX_IN(int_status->dev, INT_ST_IDI);
dev_info(dev, "CSI INT_ST_IDI: %x\n", int_status->idi);
}
if (int_status->main & TX_MASK(INT_ST_MAIN, INT_ST_PHY)) {
- int_status->phy = TX_IN(INT_ST_PHY);
+ int_status->phy = TX_IN(int_status->dev, INT_ST_PHY);
dev_info(dev, "CSI INT_ST_PHY: %x\n", int_status->phy);
}
mnh_mipi_get_top_interrupts(dev, int_status);
@@ -1107,43 +1056,43 @@
int mnh_mipi_get_host_interrupts(struct device *dev,
struct mipi_host_irq_st *int_status)
{
-
- uint32_t baddr = HWIO_MIPI_RX_BASE_ADDR(int_status->dev);
-
- int_status->main = RX_IN(INT_ST_MAIN);
+ int_status->main = RX_IN(int_status->dev, INT_ST_MAIN);
dev_info(dev, "MIPI host controller %d interrupt main: %x\n",
int_status->dev, int_status->main);
if (int_status->main & RX_MASK(INT_ST_MAIN, STATUS_INT_PHY_FATAL)) {
- int_status->phy_fatal = RX_IN(INT_ST_PHY_FATAL);
+ int_status->phy_fatal = RX_IN(int_status->dev,
+ INT_ST_PHY_FATAL);
dev_info(dev, "CSI INT PHY FATAL: %x\n",
int_status->phy_fatal);
}
if (int_status->main & RX_MASK(INT_ST_MAIN, STATUS_INT_PKT_FATAL)) {
- int_status->pkt_fatal = RX_IN(INT_ST_PKT_FATAL);
+ int_status->pkt_fatal = RX_IN(int_status->dev,
+ INT_ST_PKT_FATAL);
dev_info(dev, "CSI INT PKT FATAL: %x\n",
int_status->pkt_fatal);
}
if (int_status->main & RX_MASK(INT_ST_MAIN, STATUS_INT_FRAME_FATAL)) {
- int_status->frame_fatal = RX_IN(INT_ST_FRAME_FATAL);
+ int_status->frame_fatal = RX_IN(int_status->dev,
+ INT_ST_FRAME_FATAL);
dev_info(dev, "CSI INT FRAME FATAL: %x\n",
int_status->frame_fatal);
}
if (int_status->main & RX_MASK(INT_ST_MAIN, STATUS_INT_PHY)) {
- int_status->phy = RX_IN(INT_ST_PHY);
+ int_status->phy = RX_IN(int_status->dev, INT_ST_PHY);
dev_info(dev, "CSI INT PHY: %x\n", int_status->phy);
}
if (int_status->main & RX_MASK(INT_ST_MAIN, STATUS_INT_PKT)) {
- int_status->pkt = RX_IN(INT_ST_PKT);
+ int_status->pkt = RX_IN(int_status->dev, INT_ST_PKT);
dev_info(dev, "CSI INT PKT: %x\n", int_status->pkt);
}
if (int_status->main & RX_MASK(INT_ST_MAIN, STATUS_INT_LINE)) {
- int_status->line = RX_IN(INT_ST_LINE);
+ int_status->line = RX_IN(int_status->dev, INT_ST_LINE);
dev_info(dev, "CSI INT LINE: %x\n", int_status->line);
}
diff --git a/drivers/misc/mnh/mnh-mipi.h b/drivers/misc/mnh/mnh-mipi.h
index 0a3e87d..366020d 100644
--- a/drivers/misc/mnh/mnh-mipi.h
+++ b/drivers/misc/mnh/mnh-mipi.h
@@ -54,8 +54,8 @@
int mnh_mipi_config(struct device *dev, struct mnh_mipi_config cfg);
int mnh_mipi_stop(struct device *dev, struct mnh_mipi_config cfg);
-void mnh_mipi_stop_device(struct device *dev, int txdev);
-void mnh_mipi_stop_host(struct device *dev, int rxdev);
+int mnh_mipi_stop_device(struct device *dev, int txdev);
+int mnh_mipi_stop_host(struct device *dev, int rxdev);
void mnh_mipi_set_debug(int val);
int mnh_mipi_get_device_interrupts(struct device *dev,
struct mipi_device_irq_st *int_status);
diff --git a/drivers/misc/mnh/mnh-pcie.c b/drivers/misc/mnh/mnh-pcie.c
index 7ec0efd..c4ae378 100644
--- a/drivers/misc/mnh/mnh-pcie.c
+++ b/drivers/misc/mnh/mnh-pcie.c
@@ -155,7 +155,7 @@
if (!mnh_dev || !mnh_dev->pdev)
return -ENODEV;
- if (mnh_dev->pdev->current_state != PCI_D0)
+ if ((mnh_dev->pdev->current_state != PCI_D0) || !mnh_dev->powered)
return -EIO;
if (len != sizeof(uint32_t))
@@ -164,13 +164,6 @@
if (offset > mnh_dev->bar_size[BAR_0] - sizeof(uint32_t))
return -EINVAL; /* address invalid */
- if (WARN_ON(!mnh_dev->powered)) {
- dev_err(&mnh_dev->pdev->dev,
- "%s: cannot do pcie transfer while powering down\n",
- __func__);
- return -EIO;
- }
-
pci_read_config_dword(mnh_dev->pdev, offset, data);
dev_dbg(&mnh_dev->pdev->dev, "Read PCIE Config [0x%08x]-0x%x\n",
@@ -195,7 +188,7 @@
if (!mnh_dev || !mnh_dev->pdev)
return -ENODEV;
- if (mnh_dev->pdev->current_state != PCI_D0)
+ if ((mnh_dev->pdev->current_state != PCI_D0) || !mnh_dev->powered)
return -EIO;
if (len != sizeof(uint32_t))
@@ -207,13 +200,6 @@
dev_dbg(&mnh_dev->pdev->dev, "Write PCIE Config[0x%08x]-0x%x",
offset, data);
- if (WARN_ON(!mnh_dev->powered)) {
- dev_err(&mnh_dev->pdev->dev,
- "%s: cannot do pcie transfer while powering down\n",
- __func__);
- return -EIO;
- }
-
pci_write_config_dword(mnh_dev->pdev, offset, data);
return 0;
@@ -236,7 +222,7 @@
if (!mnh_dev || !mnh_dev->pdev)
return -ENODEV;
- if (mnh_dev->pdev->current_state != PCI_D0)
+ if ((mnh_dev->pdev->current_state != PCI_D0) || !mnh_dev->powered)
return -EIO;
if (offset > HW_MNH_PCIE_BAR_2_ADDR_END - len) {
@@ -246,13 +232,6 @@
new_offset = mnh_check_iatu_bar2(offset);
- if (WARN_ON(!mnh_dev->powered)) {
- dev_err(&mnh_dev->pdev->dev,
- "%s: cannot do pcie transfer while powering down\n",
- __func__);
- return -EIO;
- }
-
if (len == sizeof(uint32_t))
*data = ioread32(mnh_dev->config + new_offset);
else if (len == sizeof(uint16_t))
@@ -289,7 +268,7 @@
if (!mnh_dev || !mnh_dev->pdev)
return -ENODEV;
- if (mnh_dev->pdev->current_state != PCI_D0)
+ if ((mnh_dev->pdev->current_state != PCI_D0) || !mnh_dev->powered)
return -EIO;
if (offset > HW_MNH_PCIE_BAR_2_ADDR_END - len)
@@ -300,13 +279,6 @@
dev_dbg(&mnh_dev->pdev->dev, "Write Config[0x%08x] - 0x%x",
new_offset, data);
- if (WARN_ON(!mnh_dev->powered)) {
- dev_err(&mnh_dev->pdev->dev,
- "%s: cannot do pcie transfer while powering down\n",
- __func__);
- return -EIO;
- }
-
if (len == sizeof(uint32_t))
iowrite32(data, mnh_dev->config + new_offset);
else if (len == sizeof(uint16_t))
@@ -339,7 +311,7 @@
if (!mnh_dev || !mnh_dev->pdev)
return -ENODEV;
- if (mnh_dev->pdev->current_state != PCI_D0)
+ if ((mnh_dev->pdev->current_state != PCI_D0) || !mnh_dev->powered)
return -EIO;
if (len > mnh_dev->bar_size[BAR_4])
@@ -351,13 +323,6 @@
dev_dbg(&mnh_dev->pdev->dev, "Read DDR[0x%08x], len-%d, data-0x%0x",
offset, len, *(uint32_t *)data);
- if (WARN_ON(!mnh_dev->powered)) {
- dev_err(&mnh_dev->pdev->dev,
- "%s: cannot do pcie transfer while powering down\n",
- __func__);
- return -EIO;
- }
-
memcpy(data, mnh_dev->ddr + offset, len);
return 0;
@@ -378,7 +343,7 @@
if (!mnh_dev || !mnh_dev->pdev)
return -ENODEV;
- if (mnh_dev->pdev->current_state != PCI_D0)
+ if ((mnh_dev->pdev->current_state != PCI_D0) || !mnh_dev->powered)
return -EIO;
if (len > mnh_dev->bar_size[BAR_4])
@@ -395,13 +360,6 @@
dev_dbg(&mnh_dev->pdev->dev, "Write DDR[0x%08x], len-%d, data-0x%x",
offset, len, *(uint32_t *)data);
- if (WARN_ON(!mnh_dev->powered)) {
- dev_err(&mnh_dev->pdev->dev,
- "%s: cannot do pcie transfer while powering down\n",
- __func__);
- return -EIO;
- }
-
memcpy(mnh_dev->ddr + offset, data, len);
return 0;
@@ -539,10 +497,11 @@
mnh_pcie_config_write(IATU_LWR_TARGET_ADDR, size, lower);
mnh_pcie_config_write(IATU_UPPER_TARGET_ADDR, size, upper);
mnh_pcie_config_write(IATU_REGION_CTRL_1, size, IATU_MEM);
- mnh_pcie_config_write(IATU_REGION_CTRL_2, size, IATU_ENABLE);
+ mnh_pcie_config_write(IATU_REGION_CTRL_2, size,
+ IATU_ENABLE | IATU_DMA_BYPASS);
udelay(1);
mnh_pcie_config_read(IATU_REGION_CTRL_2, size, &data);
- if (data != IATU_ENABLE)
+ if (data != (IATU_ENABLE | IATU_DMA_BYPASS))
dev_err(&mnh_dev->pdev->dev, "Set outbound IATU Fail\n");
return 0;
@@ -753,31 +712,34 @@
u = 0; /* iterator of *sg */
for_each_sg(sc_list, in_sg, count, i) {
- if (u < maxsg) {
- sg[u].paddr = sg_dma_address(in_sg);
- sg[u].size = sg_dma_len(in_sg);
+ /* Last entry is reserved for the NULL terminator */
+ if (u >= (maxsg - 1)) {
+ dev_err(&mnh_dev->pdev->dev, "maxsg exceeded\n");
+ return -EINVAL;
+ }
+ sg[u].paddr = sg_dma_address(in_sg);
+ sg[u].size = sg_dma_len(in_sg);
dev_dbg(&mnh_dev->pdev->dev,
"sg[%d] : Address %pa , length %zu\n",
u, &sg[u].paddr, sg[u].size);
#ifdef COMBINE_SG
- if ((u > 0) && (sg[u-1].paddr + sg[u-1].size ==
- sg[u].paddr)) {
- sg[u-1].size = sg[u-1].size
- + sg[u].size;
- sg[u].size = 0;
- } else {
- u++;
- }
-#else
- u++;
-#endif
+ if ((u > 0) && (sg[u-1].paddr + sg[u-1].size ==
+ sg[u].paddr)) {
+ sg[u-1].size = sg[u-1].size
+ + sg[u].size;
+ sg[u].size = 0;
} else {
- dev_err(&mnh_dev->pdev->dev, "maxsg exceeded\n");
- return -EINVAL;
+ u++;
}
+#else
+ u++;
+#endif
}
- sg[u].paddr = 0x0;
+ /* Zero out the list terminator entry.
+ * mnh dma engine looks at sg.paddr=0 for end of chain */
+ memset(&sg[u], 0, sizeof(sg[0]));
+ u++; /* Count of entries includes list terminator entry */
dev_dbg(&mnh_dev->pdev->dev, "SGL with %d/%d entries\n", u, i);
@@ -956,7 +918,7 @@
sgl->mypage = kcalloc(p_num, sizeof(struct page *), GFP_KERNEL);
if (!sgl->mypage) {
- kfree((*sg));
+ vfree((*sg));
*sg = NULL;
sgl->n_num = 0;
sgl->length = 0;
@@ -964,7 +926,7 @@
}
sgl->sc_list = kcalloc(p_num, sizeof(struct scatterlist), GFP_KERNEL);
if (!sgl->sc_list) {
- kfree((*sg));
+ vfree((*sg));
*sg = NULL;
kfree(sgl->mypage);
sgl->mypage = NULL;
@@ -1021,7 +983,7 @@
for (i = 0; i < sgl->n_num; i++)
page_cache_release(*(sgl->mypage + i));
free_mem:
- kfree((*sg));
+ vfree((*sg));
*sg = NULL;
kfree(sgl->mypage);
sgl->mypage = NULL;
@@ -1064,6 +1026,40 @@
}
EXPORT_SYMBOL(mnh_sg_destroy);
+int mnh_sg_verify(struct mnh_sg_entry *sg, size_t size, struct mnh_sg_list *sgl)
+{
+ int i;
+ size_t len = size / sizeof(struct mnh_sg_entry);
+
+ /* At least one entry plus null terminator required */
+ if (len < 2) {
+ dev_err(&mnh_dev->pdev->dev,
+ "Invalid SG list length %zu\n", len);
+ return -EINVAL;
+ }
+ if (sg == NULL) {
+ dev_err(&mnh_dev->pdev->dev, "No SG list\n");
+ return -EINVAL;
+ }
+ for (i = 0; i < len - 1; i++) {
+ if (sg[i].paddr == 0) {
+ dev_err(&mnh_dev->pdev->dev, "Early list end\n");
+ return -EINVAL;
+ }
+ if (sg[i].size == 0) {
+ dev_err(&mnh_dev->pdev->dev, "Invalid entry size\n");
+ return -EINVAL;
+ }
+ }
+ /* Verify terminator */
+ if (sg[i].paddr != 0) {
+ dev_err(&mnh_dev->pdev->dev, "Missing list terminator\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(mnh_sg_verify);
+
/**
* API to read/write multi blocks on specific channel.
* Multi block read/write are based on Linked List(Transfer List) built by MNH.
diff --git a/drivers/misc/mnh/mnh-pcie.h b/drivers/misc/mnh/mnh-pcie.h
index 690fc53..e361557 100644
--- a/drivers/misc/mnh/mnh-pcie.h
+++ b/drivers/misc/mnh/mnh-pcie.h
@@ -382,6 +382,17 @@
int mnh_sg_destroy(struct mnh_sg_list *sgl);
/**
+ * API to verify that a scatter gather list is valid
+ * @param[in] *sg pointer to the scatter gather list that was built during
+ * mnh_sg_build
+ * @param[in] size size of the list in bytes
+ * @param[in] sgl pointer to the scatter gather local data, if any
+ * @return 0 for SUCCESS
+ */
+int mnh_sg_verify(struct mnh_sg_entry *sg, size_t size,
+ struct mnh_sg_list *sgl);
+
+/**
* API to build a scatter-gather list for multi-block DMA transfer for a
* dma_buf
* @param[in] fd Handle of dma_buf passed from user
diff --git a/drivers/misc/mnh/mnh-pwr.c b/drivers/misc/mnh/mnh-pwr.c
index 28d8fe3..6a68fbf 100644
--- a/drivers/misc/mnh/mnh-pwr.c
+++ b/drivers/misc/mnh/mnh-pwr.c
@@ -67,6 +67,8 @@
struct work_struct shutdown_work;
+ bool pcie_failure;
+
enum mnh_pwr_state state;
struct mutex lock;
@@ -74,18 +76,11 @@
static struct mnh_pwr_data *mnh_pwr;
-static int __mnh_pwr_down(bool pcie_failure);
-
-static inline int mnh_pwr_down_pcie_failure(void)
-{
- return __mnh_pwr_down(true);
-}
-
static void mnh_pwr_shutdown_work(struct work_struct *data)
{
dev_err(mnh_pwr->dev, "%s: begin emergency power down\n", __func__);
- mnh_pwr_down_pcie_failure();
+ mnh_pwr_set_state(MNH_PWR_S4);
mnh_sm_pwr_error_cb();
}
@@ -94,6 +89,8 @@
{
dev_dbg(mnh_pwr->dev, "%s: received event %ld\n", __func__, event);
+ mnh_pwr->pcie_failure = true;
+
/* force emergency shutdown if regulator output has failed */
if (event == REGULATOR_EVENT_FAIL) {
dev_err(mnh_pwr->dev,
@@ -112,6 +109,8 @@
{
dev_dbg(mnh_pwr->dev, "%s: received event %ld\n", __func__, event);
+ mnh_pwr->pcie_failure = true;
+
/* force emergency shutdown if regulator output has failed */
if (event == REGULATOR_EVENT_FAIL) {
dev_err(mnh_pwr->dev,
@@ -130,6 +129,8 @@
{
dev_dbg(mnh_pwr->dev, "%s: received event %ld\n", __func__, event);
+ mnh_pwr->pcie_failure = true;
+
/* force emergency shutdown if regulator output has failed */
if (event == REGULATOR_EVENT_FAIL) {
dev_err(mnh_pwr->dev,
@@ -148,6 +149,8 @@
{
dev_dbg(mnh_pwr->dev, "%s: received event %ld\n", __func__, event);
+ mnh_pwr->pcie_failure = true;
+
/* force emergency shutdown if regulator output has failed */
if (event == REGULATOR_EVENT_FAIL) {
dev_err(mnh_pwr->dev,
@@ -171,6 +174,8 @@
"%s: PCIe link is down, forcing power down\n",
__func__);
+ mnh_pwr->pcie_failure = true;
+
/* force emergency shutdown */
schedule_work(&mnh_pwr->shutdown_work);
break;
@@ -235,7 +240,7 @@
return 0;
}
-static int mnh_pwr_pcie_suspend(bool pcie_failure)
+static int mnh_pwr_pcie_suspend(void)
{
struct pci_dev *pcidev = mnh_pwr->pcidev;
int ret;
@@ -250,7 +255,7 @@
__func__, ret);
}
- if (pcie_failure) {
+ if (mnh_pwr->pcie_failure) {
/* call the platform driver to update link status */
ret = msm_pcie_pm_control(MSM_PCIE_SUSPEND, pcidev->bus->number,
pcidev, NULL,
@@ -261,6 +266,8 @@
__func__, ret);
return ret;
}
+
+ mnh_pwr->pcie_failure = false;
} else {
/* prepare the root complex and endpoint for going to suspend */
ret = pci_prepare_to_sleep(pcidev);
@@ -354,6 +361,8 @@
}
}
+ mnh_pwr->pcie_failure = false;
+
return 0;
fail_pcie_resume_mnh_init:
@@ -366,13 +375,13 @@
return ret;
}
-static int __mnh_pwr_down(bool pcie_failure)
+static int mnh_pwr_down(void)
{
int ret;
if (mnh_sm_get_boot_mode() == MNH_BOOT_MODE_PCIE) {
/* suspend pcie link */
- ret = mnh_pwr_pcie_suspend(pcie_failure);
+ ret = mnh_pwr_pcie_suspend();
if (ret) {
dev_err(mnh_pwr->dev, "%s: failed to suspend pcie link (%d)\n",
__func__, ret);
@@ -464,18 +473,13 @@
return ret;
}
-static inline int mnh_pwr_down(void)
-{
- return __mnh_pwr_down(false);
-}
-
static int mnh_pwr_suspend(void)
{
int ret;
if (mnh_sm_get_boot_mode() == MNH_BOOT_MODE_PCIE) {
/* suspend pcie link */
- ret = mnh_pwr_pcie_suspend(false);
+ ret = mnh_pwr_pcie_suspend();
if (ret) {
dev_err(mnh_pwr->dev, "%s: failed to suspend pcie link (%d)\n",
__func__, ret);
@@ -769,13 +773,6 @@
return 0;
}
-int mnh_pwr_set_asr_voltage(int voltage_uV)
-{
- return regulator_set_voltage(mnh_pwr->asr_supply, voltage_uV,
- voltage_uV);
-}
-EXPORT_SYMBOL(mnh_pwr_set_asr_voltage);
-
int mnh_pwr_set_state(enum mnh_pwr_state system_state)
{
int ret = 0;
diff --git a/drivers/misc/mnh/mnh-pwr.h b/drivers/misc/mnh/mnh-pwr.h
index 140421b..a70414c 100644
--- a/drivers/misc/mnh/mnh-pwr.h
+++ b/drivers/misc/mnh/mnh-pwr.h
@@ -32,6 +32,5 @@
int mnh_pwr_set_state(enum mnh_pwr_state system_state);
enum mnh_pwr_state mnh_pwr_get_state(void);
int mnh_pwr_init(struct platform_device *pdev, struct device *dev);
-int mnh_pwr_set_asr_voltage(int voltage_uV);
#endif /* __MNH_PWR_H__ */
diff --git a/drivers/misc/mnh/mnh-sm-ion.c b/drivers/misc/mnh/mnh-sm-ion.c
index 61a08ec..561d6dd 100644
--- a/drivers/misc/mnh/mnh-sm-ion.c
+++ b/drivers/misc/mnh/mnh-sm-ion.c
@@ -233,6 +233,7 @@
int mnh_ion_stage_firmware_update(struct mnh_ion *ion, struct mnh_ion *ion_sec)
{
int err;
+ int i;
dev_dbg(ion->device, "%s: staging update\n", __func__);
if (!ion || !ion_sec || !ion->client ||
@@ -244,6 +245,10 @@
if (err)
return err;
+ for (i = 0; i < MAX_NR_MNH_FW_SLOTS; i++) {
+ ion->fw_array[i].cert_size =
+ ion_sec->fw_array[i].cert_size;
+ }
ion->is_fw_ready = true;
return 0;
diff --git a/drivers/misc/mnh/mnh-sm.c b/drivers/misc/mnh/mnh-sm.c
index 43e8f89..ff8042c 100644
--- a/drivers/misc/mnh/mnh-sm.c
+++ b/drivers/misc/mnh/mnh-sm.c
@@ -42,6 +42,7 @@
#include "hw-mnh-regs.h"
#include "mnh-clk.h"
+#include "mnh-crypto.h"
#include "mnh-ddr.h"
#include "mnh-hwio.h"
#include "mnh-hwio-bases.h"
@@ -124,11 +125,19 @@
};
enum fw_image_partition_index {
- FW_PART_PRI = 0,
- FW_PART_SEC,
+ FW_PART_PRI = 0, /* Carveout region, stores final FW for easel */
+ FW_PART_SEC, /* Temp allocation for buf sharing with PS app */
+ FW_PART_TER, /* Temp allocation to protect shared buffer */
FW_PART_MAX
};
+enum fw_update_status {
+ FW_UPD_NONE = 0, /* no update available */
+ FW_UPD_INIT, /* update service requested firmware buffer */
+ FW_UPD_RECEIVED, /* service uploaded new FW, authentication pending */
+ FW_UPD_VERIFIED /* fw signature check successful */
+};
+
struct mnh_sm_device {
struct platform_device *pdev;
struct miscdevice *misc_dev;
@@ -187,10 +196,13 @@
bool firmware_downloaded;
/* flag indicating a valid update buffer */
- bool pending_update;
+ enum fw_update_status update_status;
/* node to carveout memory, owned by mnh_sm_device */
struct mnh_ion *ion[FW_PART_MAX];
+
+ /* firmware version */
+ char mnh_fw_ver[FW_VER_SIZE];
};
static struct mnh_sm_device *mnh_sm_dev;
@@ -466,6 +478,64 @@
return err;
}
+/*
+ * Returns:
+ * 0 if all firmware slots verify correct
+ * >1 bitmask indicating firmware slots with wrong signature
+ * bit 0: SBL
+ * bit 1: Kernel
+ * bit 2: DTB
+ * bit 3: Ramdisk
+ * -ENODEV in case ION buffer is not available
+ */
+int mnh_verify_firmware_ion(struct mnh_ion *ion)
+{
+#if IS_ENABLED(CONFIG_MNH_SIG)
+ int err = 0, ret = 0;
+ int i;
+
+ struct cert_info info = {0};
+
+ if (!ion)
+ return -ENODEV;
+
+ for (i = 0; i < MAX_NR_MNH_FW_SLOTS; i++) {
+ dev_dbg(mnh_sm_dev->dev,
+ "Slot %d size %zu\n", i, ion->fw_array[i].size);
+ if (ion->fw_array[i].size > 0) {
+ dev_dbg(mnh_sm_dev->dev,
+ "Signature check of slot[%d]...\n", i);
+
+ info.img = ion->vaddr + ion->fw_array[i].ap_offs;
+ info.img_len = ion->fw_array[i].size;
+
+ err = mnh_crypto_verify(mnh_sm_dev->dev, &info);
+
+ ion->fw_array[i].cert = info.cert;
+ ion->fw_array[i].cert_size = info.cert_size;
+ if (!err) {
+ dev_dbg(mnh_sm_dev->dev,
+ "->Sig. check PASS for slot[%d]\n", i);
+ dev_dbg(mnh_sm_dev->dev,
+ "->Img size %zu, total img size %zu\n",
+ ion->fw_array[i].size -
+ ion->fw_array[i].cert_size,
+ ion->fw_array[i].size);
+ } else {
+ dev_err(mnh_sm_dev->dev,
+ "->Sig. check failed for slot[%d]=%d\n",
+ i,
+ ion->fw_array[i].cert);
+ ret |= (1 << i);
+ }
+ }
+ }
+ return ret;
+#else
+ return 0;
+#endif
+}
+
int mnh_transfer_slot_ion(struct mnh_ion *ion, int slot)
{
int err;
@@ -493,7 +563,8 @@
ion->fw_array[slot].ep_addr);
} else {
err = mnh_transfer_firmware_contig(
- ion->fw_array[slot].size,
+ ion->fw_array[slot].size -
+ ion->fw_array[slot].cert_size,
ion->fw_array[slot].ap_addr,
ion->fw_array[slot].ep_addr);
}
@@ -522,6 +593,15 @@
}
for (i = 0; i < MAX_NR_MNH_FW_SLOTS; i++) {
+#if IS_ENABLED(CONFIG_MNH_SIG_FORCE_OTA)
+ if (ion[FW_PART_PRI]->fw_array[i].cert != FW_IMAGE_CERT_OK) {
+ dev_err(mnh_sm_dev->dev,
+ "Firmware tainted, upload aborted - %d\n",
+ ion[FW_PART_PRI]->fw_array[i].cert);
+ mnh_reg_irq_callback(NULL, NULL, NULL);
+ return -EKEYREJECTED;
+ }
+#endif
/* only upload from primary ION buffer */
err = mnh_transfer_slot_ion(ion[FW_PART_PRI], i);
if (err) {
@@ -703,15 +783,19 @@
"%s: ION download successful\n", __func__);
return 0;
}
-
+#if !IS_ENABLED(CONFIG_MNH_SIG_FORCE_OTA)
/* Otherwise fall back to legacy mode */
dev_err(mnh_sm_dev->dev, "%s: Fallback to legacy mode\n", __func__);
return mnh_download_firmware_legacy();
+#else
+ return -EKEYREJECTED;
+#endif
}
/**
* Verify the integrity of the images in the secondary ion buffer
- * and populate the fw_array structure
+ * and populate the fw_array structure. Use tertiary buffer only
+ * if signature verification is enabled.
* @return 0 if success or -1 on failure to verify the image integrity
*/
int mnh_validate_update_buf(struct mnh_update_configs configs)
@@ -719,15 +803,39 @@
int i, slot;
int err_mask = 0;
struct mnh_update_config config;
+ enum fw_image_partition_index fw_idx;
+
+#if IS_ENABLED(CONFIG_MNH_SIG)
+ fw_idx = FW_PART_TER;
+#else
+ fw_idx = FW_PART_SEC;
+#endif
if (!mnh_sm_dev->ion[FW_PART_SEC])
return -ENOMEM;
+#if IS_ENABLED(CONFIG_MNH_SIG)
+ /* buffer to prevent future data corruption from userspace */
+ if (mnh_sm_dev->ion[FW_PART_TER])
+ mnh_ion_destroy_buffer(mnh_sm_dev->ion[FW_PART_TER]);
+ if (mnh_ion_create_buffer(mnh_sm_dev->ion[FW_PART_TER],
+ MNH_ION_BUFFER_SIZE,
+ ION_SYSTEM_HEAP_ID)) {
+ dev_err(mnh_sm_dev->dev, "%s: cannot claim ION buffer\n",
+ __func__);
+ return -ENOMEM;
+ }
+ memcpy(mnh_sm_dev->ion[FW_PART_TER]->vaddr,
+ mnh_sm_dev->ion[FW_PART_SEC]->vaddr,
+ MNH_ION_BUFFER_SIZE);
+ mnh_ion_destroy_buffer(mnh_sm_dev->ion[FW_PART_SEC]);
+#endif
/* Initialize update buffer struct */
for (i = 0; i < MAX_NR_MNH_FW_SLOTS; i++) {
- mnh_sm_dev->ion[FW_PART_SEC]->fw_array[i].ap_offs = 0;
- mnh_sm_dev->ion[FW_PART_SEC]->fw_array[i].size = 0;
+ mnh_sm_dev->ion[fw_idx]->fw_array[i].ap_offs = 0;
+ mnh_sm_dev->ion[fw_idx]->fw_array[i].size = 0;
}
+
/* Parse update slots */
for (i = 0; i < MAX_NR_MNH_FW_SLOTS; i++) {
config = configs.config[i];
@@ -735,32 +843,30 @@
/* only configure slots masked by FW_SLOT_UPDATE_MASK */
if ((FW_SLOT_UPDATE_MASK & (1 << slot)) &&
(config.size > 0)) {
- mnh_sm_dev->ion[FW_PART_SEC]->fw_array[slot].ap_offs =
+ mnh_sm_dev->ion[fw_idx]->fw_array[slot].ap_offs =
config.offset;
- mnh_sm_dev->ion[FW_PART_SEC]->fw_array[slot].size =
+ mnh_sm_dev->ion[fw_idx]->fw_array[slot].size =
config.size;
- dev_dbg(mnh_sm_dev->dev,
- "%s: Validating slot[%d]: size %zd offs %lu\n",
- __func__, slot, config.size, config.offset);
- /* perform signature check */
- dev_dbg(mnh_sm_dev->dev, "%s: Performing sig. check\n",
- __func__);
- err_mask = 0; /* todo: b/36782736 add sig.check here */
}
}
- /* all update slots must verify positive or they are discarded */
+ /* Signature verification */
+ err_mask = mnh_verify_firmware_ion(mnh_sm_dev->ion[fw_idx]);
+
+#if IS_ENABLED(CONFIG_MNH_SIG_FORCE_PS)
+ /* all update slots must verify for the update to be accepted */
if (err_mask) {
- dev_err(mnh_sm_dev->dev, "%s: Update firmware tainted\n",
- __func__);
- mnh_sm_dev->ion[FW_PART_SEC]->is_fw_ready = false;
- mnh_sm_dev->pending_update = false;
+ dev_err(mnh_sm_dev->dev, "%s: Update firmware tainted: %d\n",
+ __func__, err_mask);
+ mnh_sm_dev->ion[fw_idx]->is_fw_ready = false;
return err_mask;
}
- dev_dbg(mnh_sm_dev->dev, "%s: Update firmware validated\n",
+#endif
+ dev_info(mnh_sm_dev->dev, "%s: Update firmware validated\n",
__func__);
- mnh_sm_dev->ion[FW_PART_SEC]->is_fw_ready = true;
- mnh_sm_dev->pending_update = true;
+
+ mnh_sm_dev->ion[fw_idx]->is_fw_ready = true;
+
return 0;
}
@@ -792,6 +898,24 @@
static DEVICE_ATTR_RO(suspend);
+int mnh_update_fw_version(struct mnh_ion *ion)
+{
+ if ((ion != NULL) && (ion->fw_array[MNH_FW_SLOT_RAMDISK].size <
+ sizeof(mnh_sm_dev->mnh_fw_ver)))
+ return -EINVAL;
+ memcpy(mnh_sm_dev->mnh_fw_ver,
+ (uint8_t *)(ion->vaddr +
+ ion->fw_array[MNH_FW_SLOT_RAMDISK].ap_offs +
+ ion->fw_array[MNH_FW_SLOT_RAMDISK].size -
+ ion->fw_array[MNH_FW_SLOT_RAMDISK].cert_size -
+ sizeof(mnh_sm_dev->mnh_fw_ver)),
+ sizeof(mnh_sm_dev->mnh_fw_ver));
+ dev_dbg(mnh_sm_dev->dev,
+ "%s: Easel firmware version: %s size %zu\n", __func__,
+ mnh_sm_dev->mnh_fw_ver, sizeof(mnh_sm_dev->mnh_fw_ver));
+ return sizeof(mnh_sm_dev->mnh_fw_ver);
+}
+
int mnh_resume_firmware(void)
{
dev_dbg(mnh_sm_dev->dev, "%s sbl dl:0x%x ex:0x%x size:0x%x\n", __func__,
@@ -1066,6 +1190,15 @@
static DEVICE_ATTR_RW(power_mode);
+static ssize_t fw_ver_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return scnprintf(buf, MAX_STR_COPY, "%s\n", mnh_sm_dev->mnh_fw_ver);
+}
+
+static DEVICE_ATTR_RO(fw_ver);
+
static int mnh_sm_read_mipi_interrupts(void)
{
int i, int_event = 0;
@@ -1143,6 +1276,46 @@
static DEVICE_ATTR_RO(error_event);
+#if IS_ENABLED(CONFIG_MNH_SIG)
+/* issue signature verification of the firmware in memory and print results */
+static ssize_t verify_fw_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int err, i;
+
+ dev_info(dev, "Starting signature check\n");
+ err = mnh_verify_firmware_ion(mnh_sm_dev->ion[FW_PART_PRI]);
+ for (i = 0; i < MAX_NR_MNH_FW_SLOTS; i++) {
+ dev_info(dev, "Signature check slot[%d]:%d\n", i,
+ mnh_sm_dev->ion[FW_PART_PRI]->fw_array[i].cert);
+ }
+ return scnprintf(buf, MAX_STR_COPY, "%d", err);
+}
+
+static DEVICE_ATTR_RO(verify_fw);
+#endif
+
+static ssize_t ddr_mbist_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int val = 0;
+ int ret;
+
+ ret = kstrtoint(buf, 0, &val);
+ if (ret || ((val != LIMITED_MOVI1_3N) && (val != MOVI1_3N)))
+ return -EINVAL;
+
+ mnh_pwr_set_state(MNH_PWR_S0);
+ mnh_ddr_po_init(mnh_sm_dev->dev, mnh_sm_dev->ddr_pad_iso_n_pin);
+ mnh_ddr_mbist(dev, val);
+ mnh_pwr_set_state(MNH_PWR_S4);
+
+ return count;
+}
+static DEVICE_ATTR_WO(ddr_mbist);
+
static struct attribute *mnh_sm_attrs[] = {
&dev_attr_stage_fw.attr,
&dev_attr_poweron.attr,
@@ -1163,6 +1336,11 @@
&dev_attr_spi_boot_mode.attr,
&dev_attr_boot_trace.attr,
&dev_attr_error_event.attr,
+ &dev_attr_fw_ver.attr,
+#if IS_ENABLED(CONFIG_MNH_SIG)
+ &dev_attr_verify_fw.attr,
+#endif
+ &dev_attr_ddr_mbist.attr,
NULL
};
ATTRIBUTE_GROUPS(mnh_sm);
@@ -1430,6 +1608,13 @@
{
struct device *dev = mnh_sm_dev->dev;
int ret = 0;
+ enum fw_image_partition_index fw_idx;
+
+#if IS_ENABLED(CONFIG_MNH_SIG)
+ fw_idx = FW_PART_TER;
+#else
+ fw_idx = FW_PART_SEC;
+#endif
if (state == mnh_sm_dev->state) {
dev_dbg(dev, "%s: already in state %d\n", __func__, state);
@@ -1444,25 +1629,26 @@
mnh_sm_dev->powered = false;
reinit_completion(&mnh_sm_dev->powered_complete);
- /* stage firmware copy to ION if valid update was received */
- if (mnh_sm_dev->pending_update) {
- dev_dbg(mnh_sm_dev->dev,
- "%s: staging firmware update",
- __func__);
- mnh_ion_stage_firmware_update(
- mnh_sm_dev->ion[FW_PART_PRI],
- mnh_sm_dev->ion[FW_PART_SEC]);
- mnh_sm_dev->pending_update = false;
- mnh_ion_destroy_buffer(mnh_sm_dev->ion[FW_PART_SEC]);
- }
-
ret = mnh_sm_poweroff();
disable_irq(mnh_sm_dev->ready_irq);
break;
case MNH_STATE_ACTIVE:
- enable_irq(mnh_sm_dev->ready_irq);
+ /* stage firmware copy to ION if valid update was received */
+ if (mnh_sm_dev->update_status == FW_UPD_VERIFIED) {
+ dev_info(mnh_sm_dev->dev,
+ "%s: staging firmware update",
+ __func__);
+ mnh_ion_stage_firmware_update(
+ mnh_sm_dev->ion[FW_PART_PRI],
+ mnh_sm_dev->ion[fw_idx]
+ );
+ mnh_ion_destroy_buffer(mnh_sm_dev->ion[fw_idx]);
+ mnh_update_fw_version(mnh_sm_dev->ion[FW_PART_PRI]);
+ mnh_sm_dev->update_status = FW_UPD_NONE;
+ }
+ enable_irq(mnh_sm_dev->ready_irq);
ret = mnh_sm_poweron();
if (ret)
break;
@@ -1637,8 +1823,15 @@
dev_dbg(mnh_sm_dev->dev, "%s: staging firmware\n",
__func__);
mnh_ion_stage_firmware(mnh_sm_dev->ion[FW_PART_PRI]);
+ /* Extract firmware version from ramdisk image */
+ mnh_update_fw_version(mnh_sm_dev->ion[FW_PART_PRI]);
}
}
+
+ /* perform signature check on firmware images */
+#if IS_ENABLED(CONFIG_MNH_SIG_FORCE_OTA)
+ mnh_verify_firmware_ion(mnh_sm_dev->ion[FW_PART_PRI]);
+#endif
return 0;
}
@@ -1798,11 +1991,13 @@
err = copy_to_user((void __user *)arg, &fd, sizeof(fd));
if (err) {
mnh_ion_destroy_buffer(mnh_sm_dev->ion[FW_PART_SEC]);
+ mnh_sm_dev->update_status = FW_UPD_NONE;
dev_err(mnh_sm_dev->dev,
"%s: failed to copy to userspace (%d)\n",
__func__, err);
return err;
}
+ mnh_sm_dev->update_status = FW_UPD_INIT;
break;
case MNH_SM_IOC_POST_UPDATE_BUF:
err = copy_from_user(&update_configs, (void __user *)arg,
@@ -1813,13 +2008,27 @@
__func__, err);
return err;
}
-
- err = mnh_validate_update_buf(update_configs);
+ if (mnh_sm_dev->update_status == FW_UPD_INIT) {
+ mnh_sm_dev->update_status = FW_UPD_RECEIVED;
+ err = mnh_validate_update_buf(update_configs);
+ if (err) {
+ mnh_ion_destroy_buffer(
+ mnh_sm_dev->ion[FW_PART_SEC]);
+ mnh_sm_dev->update_status = FW_UPD_NONE;
+ dev_err(mnh_sm_dev->dev,
+ "%s: failed to validate slots (%d)\n",
+ __func__, err);
+ return err;
+ }
+ mnh_sm_dev->update_status = FW_UPD_VERIFIED;
+ }
+ break;
+ case MNH_SM_IOC_GET_FW_VER:
+ err = copy_to_user((void __user *)arg, &mnh_sm_dev->mnh_fw_ver,
+ sizeof(mnh_sm_dev->mnh_fw_ver));
if (err) {
- mnh_ion_destroy_buffer(mnh_sm_dev->ion[FW_PART_SEC]);
-
dev_err(mnh_sm_dev->dev,
- "%s: failed to validate slots (%d)\n",
+ "%s: failed to copy to userspace (%d)\n",
__func__, err);
return err;
}
@@ -1939,12 +2148,17 @@
init_completion(&mnh_sm_dev->work_complete);
init_completion(&mnh_sm_dev->suspend_complete);
+ /* Allocate ion buffer structures */
+ for (i = 0; i < FW_PART_MAX; i++) {
+ mnh_sm_dev->ion[i] = devm_kzalloc(dev,
+ sizeof(struct mnh_ion),
+ GFP_KERNEL);
+ if (mnh_sm_dev->ion[i])
+ mnh_sm_dev->ion[i]->device = dev;
+ }
+
/* Allocate primary ION buffer (from carveout region) */
- mnh_sm_dev->ion[FW_PART_PRI] = devm_kzalloc(dev,
- sizeof(struct mnh_ion),
- GFP_KERNEL);
if (mnh_sm_dev->ion[FW_PART_PRI]) {
- mnh_sm_dev->ion[FW_PART_PRI]->device = dev;
if (mnh_ion_create_buffer(mnh_sm_dev->ion[FW_PART_PRI],
MNH_ION_BUFFER_SIZE,
ION_GOOGLE_HEAP_ID))
@@ -1952,13 +2166,6 @@
__func__);
}
- /* Initialize ION structure of firmware update buffer */
- mnh_sm_dev->ion[FW_PART_SEC] = devm_kzalloc(dev,
- sizeof(struct mnh_ion),
- GFP_KERNEL);
- if (mnh_sm_dev->ion[FW_PART_SEC])
- mnh_sm_dev->ion[FW_PART_SEC]->device = dev;
-
/* get boot mode gpio */
mnh_sm_dev->boot_mode_gpio = devm_gpiod_get(&pdev->dev, "boot-mode",
GPIOD_OUT_LOW);
@@ -2016,6 +2223,9 @@
goto fail_probe_2;
}
+ /* initialize mnh-crypto */
+ mnh_crypto_config_sysfs();
+
/* initialize mnh-clk driver */
mnh_clk_init(dev, HWIO_SCU_BASE_ADDR);
diff --git a/drivers/misc/mnh/mnh-sm.h b/drivers/misc/mnh/mnh-sm.h
index b9a277d..5d5f7c4 100644
--- a/drivers/misc/mnh/mnh-sm.h
+++ b/drivers/misc/mnh/mnh-sm.h
@@ -20,6 +20,7 @@
#include <linux/msm_ion.h>
#include <uapi/linux/mnh-sm.h>
#include "mnh-pcie.h"
+#include "mnh-crypto.h"
/* Firmware download address */
#define HW_MNH_DRAM_BASE 0x40000000
@@ -43,6 +44,8 @@
unsigned long ap_offs; /* Slot's offset in the ion buffer */
uint64_t ep_addr; /* EP side addr (phys) */
size_t size; /* size of firmware */
+ enum cert_state cert; /* signature status */
+ size_t cert_size; /* length of the signature */
};
struct mnh_ion {
diff --git a/drivers/misc/uid_sys_stats.c b/drivers/misc/uid_sys_stats.c
index 3c9d311..f126bfa 100644
--- a/drivers/misc/uid_sys_stats.c
+++ b/drivers/misc/uid_sys_stats.c
@@ -26,6 +26,7 @@
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
+#include <linux/cpufreq.h>
#define UID_HASH_BITS 10
DECLARE_HASHTABLE(hash_table, UID_HASH_BITS);
@@ -186,6 +187,14 @@
kstrtol(end_uid, 10, &uid_end) != 0) {
return -EINVAL;
}
+
+ /* TODO need to unify uid_sys_stats interface with uid_time_in_state.
+ * Here we are reusing remove_uid_range to reduce the number of
+ * sys calls made by userspace clients, remove_uid_range removes uids
+ * from both here as well as from cpufreq uid_time_in_state
+ */
+ cpufreq_task_stats_remove_uids(uid_start, uid_end);
+
rt_mutex_lock(&uid_lock);
for (; uid_start <= uid_end; uid_start++) {
@@ -233,13 +242,29 @@
struct io_stats *io_last,
struct io_stats *io_dead)
{
- io_bucket->read_bytes += io_curr->read_bytes + io_dead->read_bytes -
+ s64 delta;
+
+ delta = io_curr->read_bytes + io_dead->read_bytes -
io_last->read_bytes;
- io_bucket->write_bytes += io_curr->write_bytes + io_dead->write_bytes -
+ if (delta > 0)
+ io_bucket->read_bytes += delta;
+
+ delta = io_curr->write_bytes + io_dead->write_bytes -
io_last->write_bytes;
- io_bucket->rchar += io_curr->rchar + io_dead->rchar - io_last->rchar;
- io_bucket->wchar += io_curr->wchar + io_dead->wchar - io_last->wchar;
- io_bucket->fsync += io_curr->fsync + io_dead->fsync - io_last->fsync;
+ if (delta > 0)
+ io_bucket->write_bytes += delta;
+
+ delta = io_curr->rchar + io_dead->rchar - io_last->rchar;
+ if (delta > 0)
+ io_bucket->rchar += delta;
+
+ delta = io_curr->wchar + io_dead->wchar - io_last->wchar;
+ if (delta > 0)
+ io_bucket->wchar += delta;
+
+ delta = io_curr->fsync + io_dead->fsync - io_last->fsync;
+ if (delta > 0)
+ io_bucket->fsync += delta;
io_last->read_bytes = io_curr->read_bytes;
io_last->write_bytes = io_curr->write_bytes;
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index 1f1582f..8d83877 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -804,6 +804,7 @@
switch (uhs) {
case MMC_TIMING_UHS_SDR50:
+ case MMC_TIMING_UHS_DDR50:
pinctrl = imx_data->pins_100mhz;
break;
case MMC_TIMING_UHS_SDR104:
diff --git a/drivers/mmc/host/sdhci-iproc.c b/drivers/mmc/host/sdhci-iproc.c
index 3b423b0..f280744 100644
--- a/drivers/mmc/host/sdhci-iproc.c
+++ b/drivers/mmc/host/sdhci-iproc.c
@@ -156,7 +156,8 @@
};
static const struct sdhci_pltfm_data sdhci_iproc_pltfm_data = {
- .quirks = SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK,
+ .quirks = SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
+ SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
.quirks2 = SDHCI_QUIRK2_ACMD23_BROKEN,
.ops = &sdhci_iproc_ops,
};
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index ddb9947..206b419 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -1438,7 +1438,9 @@
return;
}
timeout--;
- udelay(1);
+ spin_unlock_irq(&host->lock);
+ usleep_range(900, 1100);
+ spin_lock_irq(&host->lock);
}
clk |= SDHCI_CLOCK_CARD_EN;
diff --git a/drivers/mmc/host/ushc.c b/drivers/mmc/host/ushc.c
index d2c386f..1d84335 100644
--- a/drivers/mmc/host/ushc.c
+++ b/drivers/mmc/host/ushc.c
@@ -426,6 +426,9 @@
struct ushc_data *ushc;
int ret;
+ if (intf->cur_altsetting->desc.bNumEndpoints < 1)
+ return -ENODEV;
+
mmc = mmc_alloc_host(sizeof(struct ushc_data), &intf->dev);
if (mmc == NULL)
return -ENOMEM;
diff --git a/drivers/mtd/bcm47xxpart.c b/drivers/mtd/bcm47xxpart.c
index c0720c1..9190057 100644
--- a/drivers/mtd/bcm47xxpart.c
+++ b/drivers/mtd/bcm47xxpart.c
@@ -66,11 +66,13 @@
{
uint32_t buf;
size_t bytes_read;
+ int err;
- if (mtd_read(master, offset, sizeof(buf), &bytes_read,
- (uint8_t *)&buf) < 0) {
- pr_err("mtd_read error while parsing (offset: 0x%X)!\n",
- offset);
+ err = mtd_read(master, offset, sizeof(buf), &bytes_read,
+ (uint8_t *)&buf);
+ if (err && !mtd_is_bitflip(err)) {
+ pr_err("mtd_read error while parsing (offset: 0x%X): %d\n",
+ offset, err);
goto out_default;
}
@@ -95,6 +97,7 @@
int trx_part = -1;
int last_trx_part = -1;
int possible_nvram_sizes[] = { 0x8000, 0xF000, 0x10000, };
+ int err;
/*
* Some really old flashes (like AT45DB*) had smaller erasesize-s, but
@@ -118,8 +121,8 @@
/* Parse block by block looking for magics */
for (offset = 0; offset <= master->size - blocksize;
offset += blocksize) {
- /* Nothing more in higher memory */
- if (offset >= 0x2000000)
+ /* Nothing more in higher memory on BCM47XX (MIPS) */
+ if (config_enabled(CONFIG_BCM47XX) && offset >= 0x2000000)
break;
if (curr_part >= BCM47XXPART_MAX_PARTS) {
@@ -128,10 +131,11 @@
}
/* Read beginning of the block */
- if (mtd_read(master, offset, BCM47XXPART_BYTES_TO_READ,
- &bytes_read, (uint8_t *)buf) < 0) {
- pr_err("mtd_read error while parsing (offset: 0x%X)!\n",
- offset);
+ err = mtd_read(master, offset, BCM47XXPART_BYTES_TO_READ,
+ &bytes_read, (uint8_t *)buf);
+ if (err && !mtd_is_bitflip(err)) {
+ pr_err("mtd_read error while parsing (offset: 0x%X): %d\n",
+ offset, err);
continue;
}
@@ -225,12 +229,10 @@
last_trx_part = curr_part - 1;
- /*
- * We have whole TRX scanned, skip to the next part. Use
- * roundown (not roundup), as the loop will increase
- * offset in next step.
- */
- offset = rounddown(offset + trx->length, blocksize);
+ /* Jump to the end of TRX */
+ offset = roundup(offset + trx->length, blocksize);
+ /* Next loop iteration will increase the offset */
+ offset -= blocksize;
continue;
}
@@ -254,10 +256,11 @@
}
/* Read middle of the block */
- if (mtd_read(master, offset + 0x8000, 0x4,
- &bytes_read, (uint8_t *)buf) < 0) {
- pr_err("mtd_read error while parsing (offset: 0x%X)!\n",
- offset);
+ err = mtd_read(master, offset + 0x8000, 0x4, &bytes_read,
+ (uint8_t *)buf);
+ if (err && !mtd_is_bitflip(err)) {
+ pr_err("mtd_read error while parsing (offset: 0x%X): %d\n",
+ offset, err);
continue;
}
@@ -277,10 +280,11 @@
}
offset = master->size - possible_nvram_sizes[i];
- if (mtd_read(master, offset, 0x4, &bytes_read,
- (uint8_t *)buf) < 0) {
- pr_err("mtd_read error while reading at offset 0x%X!\n",
- offset);
+ err = mtd_read(master, offset, 0x4, &bytes_read,
+ (uint8_t *)buf);
+ if (err && !mtd_is_bitflip(err)) {
+ pr_err("mtd_read error while reading (offset 0x%X): %d\n",
+ offset, err);
continue;
}
diff --git a/drivers/mtd/chips/Kconfig b/drivers/mtd/chips/Kconfig
index 54479c4..8a25adc 100644
--- a/drivers/mtd/chips/Kconfig
+++ b/drivers/mtd/chips/Kconfig
@@ -111,6 +111,7 @@
config MTD_MAP_BANK_WIDTH_32
bool "Support 256-bit buswidth" if MTD_CFI_GEOMETRY
+ select MTD_COMPLEX_MAPPINGS if HAS_IOMEM
default n
help
If you wish to support CFI devices on a physical bus which is
diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c
index 37e4135..64d6f05 100644
--- a/drivers/mtd/spi-nor/spi-nor.c
+++ b/drivers/mtd/spi-nor/spi-nor.c
@@ -1057,6 +1057,13 @@
return -EINVAL;
}
+ ret = spi_nor_wait_till_ready(nor);
+ if (ret) {
+ dev_err(nor->dev,
+ "timeout while writing configuration register\n");
+ return ret;
+ }
+
/* read back and check it */
ret = read_cr(nor);
if (!(ret > 0 && (ret & CR_QUAD_EN_SPAN))) {
diff --git a/drivers/mtd/ubi/upd.c b/drivers/mtd/ubi/upd.c
index 0134ba3..3971256 100644
--- a/drivers/mtd/ubi/upd.c
+++ b/drivers/mtd/ubi/upd.c
@@ -148,11 +148,11 @@
return err;
}
- if (bytes == 0) {
- err = ubi_wl_flush(ubi, UBI_ALL, UBI_ALL);
- if (err)
- return err;
+ err = ubi_wl_flush(ubi, UBI_ALL, UBI_ALL);
+ if (err)
+ return err;
+ if (bytes == 0) {
err = clear_update_marker(ubi, vol, 0);
if (err)
return err;
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
index cbc99d5..ae57093 100644
--- a/drivers/net/can/usb/gs_usb.c
+++ b/drivers/net/can/usb/gs_usb.c
@@ -246,6 +246,8 @@
sizeof(*dm),
1000);
+ kfree(dm);
+
return rc;
}
diff --git a/drivers/net/ethernet/adaptec/starfire.c b/drivers/net/ethernet/adaptec/starfire.c
index ac72882..f089fa9 100644
--- a/drivers/net/ethernet/adaptec/starfire.c
+++ b/drivers/net/ethernet/adaptec/starfire.c
@@ -1153,6 +1153,12 @@
if (skb == NULL)
break;
np->rx_info[i].mapping = pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
+ if (pci_dma_mapping_error(np->pci_dev,
+ np->rx_info[i].mapping)) {
+ dev_kfree_skb(skb);
+ np->rx_info[i].skb = NULL;
+ break;
+ }
/* Grrr, we cannot offset to correctly align the IP header. */
np->rx_ring[i].rxaddr = cpu_to_dma(np->rx_info[i].mapping | RxDescValid);
}
@@ -1183,8 +1189,9 @@
{
struct netdev_private *np = netdev_priv(dev);
unsigned int entry;
+ unsigned int prev_tx;
u32 status;
- int i;
+ int i, j;
/*
* be cautious here, wrapping the queue has weird semantics
@@ -1202,6 +1209,7 @@
}
#endif /* ZEROCOPY && HAS_BROKEN_FIRMWARE */
+ prev_tx = np->cur_tx;
entry = np->cur_tx % TX_RING_SIZE;
for (i = 0; i < skb_num_frags(skb); i++) {
int wrap_ring = 0;
@@ -1235,6 +1243,11 @@
skb_frag_size(this_frag),
PCI_DMA_TODEVICE);
}
+ if (pci_dma_mapping_error(np->pci_dev,
+ np->tx_info[entry].mapping)) {
+ dev->stats.tx_dropped++;
+ goto err_out;
+ }
np->tx_ring[entry].addr = cpu_to_dma(np->tx_info[entry].mapping);
np->tx_ring[entry].status = cpu_to_le32(status);
@@ -1269,8 +1282,30 @@
netif_stop_queue(dev);
return NETDEV_TX_OK;
-}
+err_out:
+ entry = prev_tx % TX_RING_SIZE;
+ np->tx_info[entry].skb = NULL;
+ if (i > 0) {
+ pci_unmap_single(np->pci_dev,
+ np->tx_info[entry].mapping,
+ skb_first_frag_len(skb),
+ PCI_DMA_TODEVICE);
+ np->tx_info[entry].mapping = 0;
+ entry = (entry + np->tx_info[entry].used_slots) % TX_RING_SIZE;
+ for (j = 1; j < i; j++) {
+ pci_unmap_single(np->pci_dev,
+ np->tx_info[entry].mapping,
+ skb_frag_size(
+ &skb_shinfo(skb)->frags[j-1]),
+ PCI_DMA_TODEVICE);
+ entry++;
+ }
+ }
+ dev_kfree_skb_any(skb);
+ np->cur_tx = prev_tx;
+ return NETDEV_TX_OK;
+}
/* The interrupt handler does all of the Rx thread work and cleans up
after the Tx thread. */
@@ -1570,6 +1605,12 @@
break; /* Better luck next round. */
np->rx_info[entry].mapping =
pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
+ if (pci_dma_mapping_error(np->pci_dev,
+ np->rx_info[entry].mapping)) {
+ dev_kfree_skb(skb);
+ np->rx_info[entry].skb = NULL;
+ break;
+ }
np->rx_ring[entry].rxaddr =
cpu_to_dma(np->rx_info[entry].mapping | RxDescValid);
}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
index b6fa891..66ba1e0 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
@@ -913,8 +913,8 @@
#define RX_PACKET_ATTRIBUTES_CSUM_DONE_WIDTH 1
#define RX_PACKET_ATTRIBUTES_VLAN_CTAG_INDEX 1
#define RX_PACKET_ATTRIBUTES_VLAN_CTAG_WIDTH 1
-#define RX_PACKET_ATTRIBUTES_INCOMPLETE_INDEX 2
-#define RX_PACKET_ATTRIBUTES_INCOMPLETE_WIDTH 1
+#define RX_PACKET_ATTRIBUTES_LAST_INDEX 2
+#define RX_PACKET_ATTRIBUTES_LAST_WIDTH 1
#define RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_INDEX 3
#define RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_WIDTH 1
#define RX_PACKET_ATTRIBUTES_CONTEXT_INDEX 4
@@ -923,6 +923,8 @@
#define RX_PACKET_ATTRIBUTES_RX_TSTAMP_WIDTH 1
#define RX_PACKET_ATTRIBUTES_RSS_HASH_INDEX 6
#define RX_PACKET_ATTRIBUTES_RSS_HASH_WIDTH 1
+#define RX_PACKET_ATTRIBUTES_FIRST_INDEX 7
+#define RX_PACKET_ATTRIBUTES_FIRST_WIDTH 1
#define RX_NORMAL_DESC0_OVT_INDEX 0
#define RX_NORMAL_DESC0_OVT_WIDTH 16
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
index f6a7161..75e6e7e 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
@@ -1658,10 +1658,15 @@
/* Get the header length */
if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, FD)) {
+ XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
+ FIRST, 1);
rdata->rx.hdr_len = XGMAC_GET_BITS_LE(rdesc->desc2,
RX_NORMAL_DESC2, HL);
if (rdata->rx.hdr_len)
pdata->ext_stats.rx_split_header_packets++;
+ } else {
+ XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
+ FIRST, 0);
}
/* Get the RSS hash */
@@ -1684,19 +1689,16 @@
}
}
- /* Get the packet length */
- rdata->rx.len = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, PL);
-
- if (!XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, LD)) {
- /* Not all the data has been transferred for this packet */
- XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
- INCOMPLETE, 1);
+ /* Not all the data has been transferred for this packet */
+ if (!XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, LD))
return 0;
- }
/* This is the last of the data for this packet */
XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
- INCOMPLETE, 0);
+ LAST, 1);
+
+ /* Get the packet length */
+ rdata->rx.len = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, PL);
/* Set checksum done indicator as appropriate */
if (netdev->features & NETIF_F_RXCSUM)
@@ -2730,8 +2732,10 @@
/* Flush Tx queues */
ret = xgbe_flush_tx_queues(pdata);
- if (ret)
+ if (ret) {
+ netdev_err(pdata->netdev, "error flushing TX queues\n");
return ret;
+ }
/*
* Initialize DMA related features
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index 53ce122..64034ff 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -877,7 +877,9 @@
DBGPR("-->xgbe_start\n");
- hw_if->init(pdata);
+ ret = hw_if->init(pdata);
+ if (ret)
+ return ret;
ret = phy_if->phy_start(pdata);
if (ret)
@@ -1760,13 +1762,12 @@
{
struct sk_buff *skb;
u8 *packet;
- unsigned int copy_len;
skb = napi_alloc_skb(napi, rdata->rx.hdr.dma_len);
if (!skb)
return NULL;
- /* Start with the header buffer which may contain just the header
+ /* Pull in the header buffer which may contain just the header
* or the header plus data
*/
dma_sync_single_range_for_cpu(pdata->dev, rdata->rx.hdr.dma_base,
@@ -1775,30 +1776,49 @@
packet = page_address(rdata->rx.hdr.pa.pages) +
rdata->rx.hdr.pa.pages_offset;
- copy_len = (rdata->rx.hdr_len) ? rdata->rx.hdr_len : len;
- copy_len = min(rdata->rx.hdr.dma_len, copy_len);
- skb_copy_to_linear_data(skb, packet, copy_len);
- skb_put(skb, copy_len);
-
- len -= copy_len;
- if (len) {
- /* Add the remaining data as a frag */
- dma_sync_single_range_for_cpu(pdata->dev,
- rdata->rx.buf.dma_base,
- rdata->rx.buf.dma_off,
- rdata->rx.buf.dma_len,
- DMA_FROM_DEVICE);
-
- skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
- rdata->rx.buf.pa.pages,
- rdata->rx.buf.pa.pages_offset,
- len, rdata->rx.buf.dma_len);
- rdata->rx.buf.pa.pages = NULL;
- }
+ skb_copy_to_linear_data(skb, packet, len);
+ skb_put(skb, len);
return skb;
}
+static unsigned int xgbe_rx_buf1_len(struct xgbe_ring_data *rdata,
+ struct xgbe_packet_data *packet)
+{
+ /* Always zero if not the first descriptor */
+ if (!XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, FIRST))
+ return 0;
+
+ /* First descriptor with split header, return header length */
+ if (rdata->rx.hdr_len)
+ return rdata->rx.hdr_len;
+
+ /* First descriptor but not the last descriptor and no split header,
+ * so the full buffer was used
+ */
+ if (!XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, LAST))
+ return rdata->rx.hdr.dma_len;
+
+ /* First descriptor and last descriptor and no split header, so
+ * calculate how much of the buffer was used
+ */
+ return min_t(unsigned int, rdata->rx.hdr.dma_len, rdata->rx.len);
+}
+
+static unsigned int xgbe_rx_buf2_len(struct xgbe_ring_data *rdata,
+ struct xgbe_packet_data *packet,
+ unsigned int len)
+{
+ /* Always the full buffer if not the last descriptor */
+ if (!XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, LAST))
+ return rdata->rx.buf.dma_len;
+
+ /* Last descriptor so calculate how much of the buffer was used
+ * for the last bit of data
+ */
+ return rdata->rx.len - len;
+}
+
static int xgbe_tx_poll(struct xgbe_channel *channel)
{
struct xgbe_prv_data *pdata = channel->pdata;
@@ -1881,8 +1901,8 @@
struct napi_struct *napi;
struct sk_buff *skb;
struct skb_shared_hwtstamps *hwtstamps;
- unsigned int incomplete, error, context_next, context;
- unsigned int len, rdesc_len, max_len;
+ unsigned int last, error, context_next, context;
+ unsigned int len, buf1_len, buf2_len, max_len;
unsigned int received = 0;
int packet_count = 0;
@@ -1892,7 +1912,7 @@
if (!ring)
return 0;
- incomplete = 0;
+ last = 0;
context_next = 0;
napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi;
@@ -1926,9 +1946,8 @@
received++;
ring->cur++;
- incomplete = XGMAC_GET_BITS(packet->attributes,
- RX_PACKET_ATTRIBUTES,
- INCOMPLETE);
+ last = XGMAC_GET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
+ LAST);
context_next = XGMAC_GET_BITS(packet->attributes,
RX_PACKET_ATTRIBUTES,
CONTEXT_NEXT);
@@ -1937,7 +1956,7 @@
CONTEXT);
/* Earlier error, just drain the remaining data */
- if ((incomplete || context_next) && error)
+ if ((!last || context_next) && error)
goto read_again;
if (error || packet->errors) {
@@ -1949,16 +1968,22 @@
}
if (!context) {
- /* Length is cumulative, get this descriptor's length */
- rdesc_len = rdata->rx.len - len;
- len += rdesc_len;
+ /* Get the data length in the descriptor buffers */
+ buf1_len = xgbe_rx_buf1_len(rdata, packet);
+ len += buf1_len;
+ buf2_len = xgbe_rx_buf2_len(rdata, packet, len);
+ len += buf2_len;
- if (rdesc_len && !skb) {
+ if (!skb) {
skb = xgbe_create_skb(pdata, napi, rdata,
- rdesc_len);
- if (!skb)
+ buf1_len);
+ if (!skb) {
error = 1;
- } else if (rdesc_len) {
+ goto skip_data;
+ }
+ }
+
+ if (buf2_len) {
dma_sync_single_range_for_cpu(pdata->dev,
rdata->rx.buf.dma_base,
rdata->rx.buf.dma_off,
@@ -1968,13 +1993,14 @@
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
rdata->rx.buf.pa.pages,
rdata->rx.buf.pa.pages_offset,
- rdesc_len,
+ buf2_len,
rdata->rx.buf.dma_len);
rdata->rx.buf.pa.pages = NULL;
}
}
- if (incomplete || context_next)
+skip_data:
+ if (!last || context_next)
goto read_again;
if (!skb)
@@ -2033,7 +2059,7 @@
}
/* Check if we need to save state before leaving */
- if (received && (incomplete || context_next)) {
+ if (received && (!last || context_next)) {
rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
rdata->state_saved = 1;
rdata->state.skb = skb;
diff --git a/drivers/net/ethernet/aurora/nb8800.c b/drivers/net/ethernet/aurora/nb8800.c
index ecc4a33..0a54e7d 100644
--- a/drivers/net/ethernet/aurora/nb8800.c
+++ b/drivers/net/ethernet/aurora/nb8800.c
@@ -608,7 +608,7 @@
mac_mode |= HALF_DUPLEX;
if (gigabit) {
- if (priv->phy_mode == PHY_INTERFACE_MODE_RGMII)
+ if (phy_interface_is_rgmii(dev->phydev))
mac_mode |= RGMII_MODE;
mac_mode |= GMAC_MODE;
@@ -1295,11 +1295,10 @@
break;
case PHY_INTERFACE_MODE_RGMII:
- pad_mode = PAD_MODE_RGMII;
- break;
-
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
case PHY_INTERFACE_MODE_RGMII_TXID:
- pad_mode = PAD_MODE_RGMII | PAD_MODE_GTX_CLK_DELAY;
+ pad_mode = PAD_MODE_RGMII;
break;
default:
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index b56c9c5..a5e4b4b 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -255,15 +255,16 @@
while (ring->start != ring->end) {
int slot_idx = ring->start % BGMAC_TX_RING_SLOTS;
struct bgmac_slot_info *slot = &ring->slots[slot_idx];
- u32 ctl1;
+ u32 ctl0, ctl1;
int len;
if (slot_idx == empty_slot)
break;
+ ctl0 = le32_to_cpu(ring->cpu_base[slot_idx].ctl0);
ctl1 = le32_to_cpu(ring->cpu_base[slot_idx].ctl1);
len = ctl1 & BGMAC_DESC_CTL1_LEN;
- if (ctl1 & BGMAC_DESC_CTL0_SOF)
+ if (ctl0 & BGMAC_DESC_CTL0_SOF)
/* Unmap no longer used buffer */
dma_unmap_single(dma_dev, slot->dma_addr, len,
DMA_TO_DEVICE);
@@ -469,6 +470,11 @@
len -= ETH_FCS_LEN;
skb = build_skb(buf, BGMAC_RX_ALLOC_SIZE);
+ if (unlikely(!skb)) {
+ bgmac_err(bgmac, "build_skb failed\n");
+ put_page(virt_to_head_page(buf));
+ break;
+ }
skb_put(skb, BGMAC_RX_FRAME_OFFSET +
BGMAC_RX_BUF_OFFSET + len);
skb_pull(skb, BGMAC_RX_FRAME_OFFSET +
@@ -1302,7 +1308,8 @@
phy_start(bgmac->phy_dev);
- netif_carrier_on(net_dev);
+ netif_start_queue(net_dev);
+
return 0;
}
@@ -1576,6 +1583,11 @@
dev_warn(&core->dev, "Using random MAC: %pM\n", mac);
}
+ /* This (reset &) enable is not preset in specs or reference driver but
+ * Broadcom does it in arch PCI code when enabling fake PCI device.
+ */
+ bcma_core_enable(core, 0);
+
/* Allocation and references */
net_dev = alloc_etherdev(sizeof(*bgmac));
if (!net_dev)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index c82ab87..e5911cc 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -1949,7 +1949,7 @@
}
/* select a non-FCoE queue */
- return fallback(dev, skb) % BNX2X_NUM_ETH_QUEUES(bp);
+ return fallback(dev, skb) % (BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos);
}
void bnx2x_set_num_queues(struct bnx2x *bp)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 07f5f23..4744919 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -2473,7 +2473,8 @@
INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]);
bp->ntp_fltr_count = 0;
- bp->ntp_fltr_bmap = kzalloc(BITS_TO_LONGS(BNXT_NTP_FLTR_MAX_FLTR),
+ bp->ntp_fltr_bmap = kcalloc(BITS_TO_LONGS(BNXT_NTP_FLTR_MAX_FLTR),
+ sizeof(long),
GFP_KERNEL);
if (!bp->ntp_fltr_bmap)
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 9162756..f971d92 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -3495,7 +3495,8 @@
bcmgenet_netif_stop(dev);
- phy_suspend(priv->phydev);
+ if (!device_may_wakeup(d))
+ phy_suspend(priv->phydev);
netif_device_detach(dev);
@@ -3592,7 +3593,8 @@
netif_device_attach(dev);
- phy_resume(priv->phydev);
+ if (!device_may_wakeup(d))
+ phy_resume(priv->phydev);
if (priv->eee.eee_enabled)
bcmgenet_eee_enable_set(dev, true);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index 8bdfe53..e96d1f9 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -220,20 +220,6 @@
udelay(60);
}
-static void bcmgenet_internal_phy_setup(struct net_device *dev)
-{
- struct bcmgenet_priv *priv = netdev_priv(dev);
- u32 reg;
-
- /* Power up PHY */
- bcmgenet_phy_power_set(dev, true);
- /* enable APD */
- reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
- reg |= EXT_PWR_DN_EN_LD;
- bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
- bcmgenet_mii_reset(dev);
-}
-
static void bcmgenet_moca_phy_setup(struct bcmgenet_priv *priv)
{
u32 reg;
@@ -281,7 +267,6 @@
if (priv->internal_phy) {
phy_name = "internal PHY";
- bcmgenet_internal_phy_setup(dev);
} else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
phy_name = "MoCA";
bcmgenet_moca_phy_setup(priv);
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 49056c3..3613469 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -8722,11 +8722,14 @@
tg3_mem_rx_release(tp);
tg3_mem_tx_release(tp);
+ /* Protect tg3_get_stats64() from reading freed tp->hw_stats. */
+ tg3_full_lock(tp, 0);
if (tp->hw_stats) {
dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
tp->hw_stats, tp->stats_mapping);
tp->hw_stats = NULL;
}
+ tg3_full_unlock(tp);
}
/*
@@ -12031,7 +12034,7 @@
int ret;
u32 offset, len, b_offset, odd_len;
u8 *buf;
- __be32 start, end;
+ __be32 start = 0, end;
if (tg3_flag(tp, NO_NVRAM) ||
eeprom->magic != TG3_EEPROM_MAGIC)
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index 7445da2..cc17256 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -2823,7 +2823,7 @@
if (!g) {
netif_info(lio, tx_err, lio->netdev,
"Transmit scatter gather: glist null!\n");
- goto lio_xmit_failed;
+ goto lio_xmit_dma_failed;
}
cmdsetup.s.gather = 1;
@@ -2894,7 +2894,7 @@
else
status = octnet_send_nic_data_pkt(oct, &ndata, xmit_more);
if (status == IQ_SEND_FAILED)
- goto lio_xmit_failed;
+ goto lio_xmit_dma_failed;
netif_info(lio, tx_queued, lio->netdev, "Transmit queued successfully\n");
@@ -2908,12 +2908,13 @@
return NETDEV_TX_OK;
+lio_xmit_dma_failed:
+ dma_unmap_single(&oct->pci_dev->dev, ndata.cmd.dptr,
+ ndata.datasize, DMA_TO_DEVICE);
lio_xmit_failed:
stats->tx_dropped++;
netif_info(lio, tx_err, lio->netdev, "IQ%d Transmit dropped:%llu\n",
iq_no, stats->tx_dropped);
- dma_unmap_single(&oct->pci_dev->dev, ndata.cmd.dptr,
- ndata.datasize, DMA_TO_DEVICE);
recv_buffer_free(skb);
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 0d14761..090e006 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -2714,10 +2714,14 @@
if (err)
goto irq_err;
}
+
+ mutex_lock(&uld_mutex);
enable_rx(adap);
t4_sge_start(adap);
t4_intr_enable(adap);
adap->flags |= FULL_INIT_DONE;
+ mutex_unlock(&uld_mutex);
+
notify_ulds(adap, CXGB4_STATE_UP);
#if IS_ENABLED(CONFIG_IPV6)
update_clip(adap);
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 1795c93..7b8638ddb 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -1052,7 +1052,7 @@
err:
spin_unlock_bh(&adapter->mcc_lock);
- if (status == MCC_STATUS_UNAUTHORIZED_REQUEST)
+ if (base_status(status) == MCC_STATUS_UNAUTHORIZED_REQUEST)
status = -EPERM;
return status;
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 8a1d9ff..2625586 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -5260,9 +5260,11 @@
struct be_adapter *adapter = netdev_priv(dev);
u8 l4_hdr = 0;
- /* The code below restricts offload features for some tunneled packets.
+ /* The code below restricts offload features for some tunneled and
+ * Q-in-Q packets.
* Offload features for normal (non tunnel) packets are unchanged.
*/
+ features = vlan_features_check(skb, features);
if (!skb->encapsulation ||
!(adapter->flags & BE_FLAGS_VXLAN_OFFLOADS))
return features;
diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c
index ff66549..52f2230 100644
--- a/drivers/net/ethernet/ethoc.c
+++ b/drivers/net/ethernet/ethoc.c
@@ -713,6 +713,8 @@
if (ret)
return ret;
+ napi_enable(&priv->napi);
+
ethoc_init_ring(priv, dev->mem_start);
ethoc_reset(priv);
@@ -725,7 +727,6 @@
}
phy_start(priv->phy);
- napi_enable(&priv->napi);
if (netif_msg_ifup(priv)) {
dev_info(&dev->dev, "I/O: %08lx Memory: %08lx-%08lx\n",
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index 3e233d9..4cd2a7d 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -1999,8 +1999,8 @@
if (!rxb->page)
continue;
- dma_unmap_single(rx_queue->dev, rxb->dma,
- PAGE_SIZE, DMA_FROM_DEVICE);
+ dma_unmap_page(rx_queue->dev, rxb->dma,
+ PAGE_SIZE, DMA_FROM_DEVICE);
__free_page(rxb->page);
rxb->page = NULL;
@@ -2939,7 +2939,7 @@
size, GFAR_RXB_TRUESIZE);
/* try reuse page */
- if (unlikely(page_count(page) != 1))
+ if (unlikely(page_count(page) != 1 || page_is_pfmemalloc(page)))
return false;
/* change offset to the other half */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index 08cef0d..2fa54b0 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -105,8 +105,8 @@
struct hns_nic_ring_data *ring_data)
{
struct hns_nic_priv *priv = netdev_priv(ndev);
- struct device *dev = priv->dev;
struct hnae_ring *ring = ring_data->ring;
+ struct device *dev = ring_to_dev(ring);
struct netdev_queue *dev_queue;
struct skb_frag_struct *frag;
int buf_num;
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index 7af870a..2f9b12c 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -58,7 +58,7 @@
static const char ibmveth_driver_name[] = "ibmveth";
static const char ibmveth_driver_string[] = "IBM Power Virtual Ethernet Driver";
-#define ibmveth_driver_version "1.05"
+#define ibmveth_driver_version "1.06"
MODULE_AUTHOR("Santiago Leon <santil@linux.vnet.ibm.com>");
MODULE_DESCRIPTION("IBM Power Virtual Ethernet Driver");
@@ -137,6 +137,11 @@
return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_OFF_MASK;
}
+static inline int ibmveth_rxq_large_packet(struct ibmveth_adapter *adapter)
+{
+ return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_LRG_PKT;
+}
+
static inline int ibmveth_rxq_frame_length(struct ibmveth_adapter *adapter)
{
return be32_to_cpu(adapter->rx_queue.queue_addr[adapter->rx_queue.index].length);
@@ -1172,6 +1177,53 @@
goto retry_bounce;
}
+static void ibmveth_rx_mss_helper(struct sk_buff *skb, u16 mss, int lrg_pkt)
+{
+ struct tcphdr *tcph;
+ int offset = 0;
+ int hdr_len;
+
+ /* only TCP packets will be aggregated */
+ if (skb->protocol == htons(ETH_P_IP)) {
+ struct iphdr *iph = (struct iphdr *)skb->data;
+
+ if (iph->protocol == IPPROTO_TCP) {
+ offset = iph->ihl * 4;
+ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
+ } else {
+ return;
+ }
+ } else if (skb->protocol == htons(ETH_P_IPV6)) {
+ struct ipv6hdr *iph6 = (struct ipv6hdr *)skb->data;
+
+ if (iph6->nexthdr == IPPROTO_TCP) {
+ offset = sizeof(struct ipv6hdr);
+ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
+ } else {
+ return;
+ }
+ } else {
+ return;
+ }
+ /* if mss is not set through Large Packet bit/mss in rx buffer,
+ * expect that the mss will be written to the tcp header checksum.
+ */
+ tcph = (struct tcphdr *)(skb->data + offset);
+ if (lrg_pkt) {
+ skb_shinfo(skb)->gso_size = mss;
+ } else if (offset) {
+ skb_shinfo(skb)->gso_size = ntohs(tcph->check);
+ tcph->check = 0;
+ }
+
+ if (skb_shinfo(skb)->gso_size) {
+ hdr_len = offset + tcph->doff * 4;
+ skb_shinfo(skb)->gso_segs =
+ DIV_ROUND_UP(skb->len - hdr_len,
+ skb_shinfo(skb)->gso_size);
+ }
+}
+
static int ibmveth_poll(struct napi_struct *napi, int budget)
{
struct ibmveth_adapter *adapter =
@@ -1180,6 +1232,7 @@
int frames_processed = 0;
unsigned long lpar_rc;
struct iphdr *iph;
+ u16 mss = 0;
restart_poll:
while (frames_processed < budget) {
@@ -1197,9 +1250,21 @@
int length = ibmveth_rxq_frame_length(adapter);
int offset = ibmveth_rxq_frame_offset(adapter);
int csum_good = ibmveth_rxq_csum_good(adapter);
+ int lrg_pkt = ibmveth_rxq_large_packet(adapter);
skb = ibmveth_rxq_get_buffer(adapter);
+ /* if the large packet bit is set in the rx queue
+ * descriptor, the mss will be written by PHYP eight
+ * bytes from the start of the rx buffer, which is
+ * skb->data at this stage
+ */
+ if (lrg_pkt) {
+ __be64 *rxmss = (__be64 *)(skb->data + 8);
+
+ mss = (u16)be64_to_cpu(*rxmss);
+ }
+
new_skb = NULL;
if (length < rx_copybreak)
new_skb = netdev_alloc_skb(netdev, length);
@@ -1233,11 +1298,15 @@
if (iph->check == 0xffff) {
iph->check = 0;
iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
- adapter->rx_large_packets++;
}
}
}
+ if (length > netdev->mtu + ETH_HLEN) {
+ ibmveth_rx_mss_helper(skb, mss, lrg_pkt);
+ adapter->rx_large_packets++;
+ }
+
napi_gro_receive(napi, skb); /* send it up */
netdev->stats.rx_packets++;
@@ -1533,8 +1602,11 @@
netdev->netdev_ops = &ibmveth_netdev_ops;
netdev->ethtool_ops = &netdev_ethtool_ops;
SET_NETDEV_DEV(netdev, &dev->dev);
- netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
- NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+ netdev->hw_features = NETIF_F_SG;
+ if (vio_get_attribute(dev, "ibm,illan-options", NULL) != NULL) {
+ netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_RXCSUM;
+ }
netdev->features |= netdev->hw_features;
diff --git a/drivers/net/ethernet/ibm/ibmveth.h b/drivers/net/ethernet/ibm/ibmveth.h
index 4eade67..7acda04 100644
--- a/drivers/net/ethernet/ibm/ibmveth.h
+++ b/drivers/net/ethernet/ibm/ibmveth.h
@@ -209,6 +209,7 @@
#define IBMVETH_RXQ_TOGGLE 0x80000000
#define IBMVETH_RXQ_TOGGLE_SHIFT 31
#define IBMVETH_RXQ_VALID 0x40000000
+#define IBMVETH_RXQ_LRG_PKT 0x04000000
#define IBMVETH_RXQ_NO_CSUM 0x02000000
#define IBMVETH_RXQ_CSUM_GOOD 0x01000000
#define IBMVETH_RXQ_OFF_MASK 0x0000FFFF
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c
index 23ec28f..afaa98d 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.c
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.c
@@ -77,6 +77,10 @@
s32 ret_val = 0;
u16 phy_id;
+ /* ensure PHY page selection to fix misconfigured i210 */
+ if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211))
+ phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, 0);
+
ret_val = phy->ops.read_reg(hw, PHY_ID1, &phy_id);
if (ret_val)
goto out;
diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c
index d74f5f4..07eabf7 100644
--- a/drivers/net/ethernet/korina.c
+++ b/drivers/net/ethernet/korina.c
@@ -900,10 +900,10 @@
DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR,
&lp->rx_dma_regs->dmasm);
- korina_free_ring(dev);
-
napi_disable(&lp->napi);
+ korina_free_ring(dev);
+
if (korina_init(dev) < 0) {
printk(KERN_ERR "%s: cannot restart device\n", dev->name);
return;
@@ -1064,12 +1064,12 @@
tmp = tmp | DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR;
writel(tmp, &lp->rx_dma_regs->dmasm);
- korina_free_ring(dev);
-
napi_disable(&lp->napi);
cancel_work_sync(&lp->restart_task);
+ korina_free_ring(dev);
+
free_irq(lp->rx_irq, dev);
free_irq(lp->tx_irq, dev);
free_irq(lp->ovr_irq, dev);
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 71ec9cb..15056f0 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -2446,7 +2446,7 @@
mvneta_port_enable(pp);
/* Enable polling on the port */
- for_each_present_cpu(cpu) {
+ for_each_online_cpu(cpu) {
struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
napi_enable(&port->napi);
@@ -2472,7 +2472,7 @@
phy_stop(pp->phy_dev);
- for_each_present_cpu(cpu) {
+ for_each_online_cpu(cpu) {
struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
napi_disable(&port->napi);
@@ -2902,13 +2902,11 @@
static int mvneta_stop(struct net_device *dev)
{
struct mvneta_port *pp = netdev_priv(dev);
- int cpu;
mvneta_stop_dev(pp);
mvneta_mdio_remove(pp);
unregister_cpu_notifier(&pp->cpu_notifier);
- for_each_present_cpu(cpu)
- smp_call_function_single(cpu, mvneta_percpu_disable, pp, true);
+ on_each_cpu(mvneta_percpu_disable, pp, true);
free_percpu_irq(dev->irq, pp->ports);
mvneta_cleanup_rxqs(pp);
mvneta_cleanup_txqs(pp);
diff --git a/drivers/net/ethernet/mellanox/mlx4/catas.c b/drivers/net/ethernet/mellanox/mlx4/catas.c
index 715de8a..e203d0c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/catas.c
+++ b/drivers/net/ethernet/mellanox/mlx4/catas.c
@@ -158,7 +158,7 @@
return -ETIMEDOUT;
}
-static int mlx4_comm_internal_err(u32 slave_read)
+int mlx4_comm_internal_err(u32 slave_read)
{
return (u32)COMM_CHAN_EVENT_INTERNAL_ERR ==
(slave_read & (u32)COMM_CHAN_EVENT_INTERNAL_ERR) ? 1 : 0;
diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c
index 3348e64..6eba580 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cq.c
@@ -101,13 +101,19 @@
{
struct mlx4_cq *cq;
+ rcu_read_lock();
cq = radix_tree_lookup(&mlx4_priv(dev)->cq_table.tree,
cqn & (dev->caps.num_cqs - 1));
+ rcu_read_unlock();
+
if (!cq) {
mlx4_dbg(dev, "Completion event for bogus CQ %08x\n", cqn);
return;
}
+ /* Acessing the CQ outside of rcu_read_lock is safe, because
+ * the CQ is freed only after interrupt handling is completed.
+ */
++cq->arm_sn;
cq->comp(cq);
@@ -118,23 +124,19 @@
struct mlx4_cq_table *cq_table = &mlx4_priv(dev)->cq_table;
struct mlx4_cq *cq;
- spin_lock(&cq_table->lock);
-
+ rcu_read_lock();
cq = radix_tree_lookup(&cq_table->tree, cqn & (dev->caps.num_cqs - 1));
- if (cq)
- atomic_inc(&cq->refcount);
-
- spin_unlock(&cq_table->lock);
+ rcu_read_unlock();
if (!cq) {
- mlx4_warn(dev, "Async event for bogus CQ %08x\n", cqn);
+ mlx4_dbg(dev, "Async event for bogus CQ %08x\n", cqn);
return;
}
+ /* Acessing the CQ outside of rcu_read_lock is safe, because
+ * the CQ is freed only after interrupt handling is completed.
+ */
cq->event(cq, event_type);
-
- if (atomic_dec_and_test(&cq->refcount))
- complete(&cq->free);
}
static int mlx4_SW2HW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
@@ -301,9 +303,9 @@
if (err)
return err;
- spin_lock_irq(&cq_table->lock);
+ spin_lock(&cq_table->lock);
err = radix_tree_insert(&cq_table->tree, cq->cqn, cq);
- spin_unlock_irq(&cq_table->lock);
+ spin_unlock(&cq_table->lock);
if (err)
goto err_icm;
@@ -347,9 +349,9 @@
return 0;
err_radix:
- spin_lock_irq(&cq_table->lock);
+ spin_lock(&cq_table->lock);
radix_tree_delete(&cq_table->tree, cq->cqn);
- spin_unlock_irq(&cq_table->lock);
+ spin_unlock(&cq_table->lock);
err_icm:
mlx4_cq_free_icm(dev, cq->cqn);
@@ -368,15 +370,15 @@
if (err)
mlx4_warn(dev, "HW2SW_CQ failed (%d) for CQN %06x\n", err, cq->cqn);
+ spin_lock(&cq_table->lock);
+ radix_tree_delete(&cq_table->tree, cq->cqn);
+ spin_unlock(&cq_table->lock);
+
synchronize_irq(priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(cq->vector)].irq);
if (priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(cq->vector)].irq !=
priv->eq_table.eq[MLX4_EQ_ASYNC].irq)
synchronize_irq(priv->eq_table.eq[MLX4_EQ_ASYNC].irq);
- spin_lock_irq(&cq_table->lock);
- radix_tree_delete(&cq_table->tree, cq->cqn);
- spin_unlock_irq(&cq_table->lock);
-
if (atomic_dec_and_test(&cq->refcount))
complete(&cq->free);
wait_for_completion(&cq->free);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 28a4b34..82bf1b5 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -439,8 +439,14 @@
ring->cqn = priv->rx_cq[ring_ind]->mcq.cqn;
ring->stride = stride;
- if (ring->stride <= TXBB_SIZE)
+ if (ring->stride <= TXBB_SIZE) {
+ /* Stamp first unused send wqe */
+ __be32 *ptr = (__be32 *)ring->buf;
+ __be32 stamp = cpu_to_be32(1 << STAMP_SHIFT);
+ *ptr = stamp;
+ /* Move pointer to start of rx section */
ring->buf += TXBB_SIZE;
+ }
ring->log_stride = ffs(ring->stride) - 1;
ring->buf_size = ring->size * ring->stride;
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index 603d1c3..ff77b8b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -542,8 +542,9 @@
break;
case MLX4_EVENT_TYPE_SRQ_LIMIT:
- mlx4_dbg(dev, "%s: MLX4_EVENT_TYPE_SRQ_LIMIT\n",
- __func__);
+ mlx4_dbg(dev, "%s: MLX4_EVENT_TYPE_SRQ_LIMIT. srq_no=0x%x, eq 0x%x\n",
+ __func__, be32_to_cpu(eqe->event.srq.srqn),
+ eq->eqn);
case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR:
if (mlx4_is_master(dev)) {
/* forward only to slave owning the SRQ */
@@ -558,15 +559,19 @@
eq->eqn, eq->cons_index, ret);
break;
}
- mlx4_warn(dev, "%s: slave:%d, srq_no:0x%x, event: %02x(%02x)\n",
- __func__, slave,
- be32_to_cpu(eqe->event.srq.srqn),
- eqe->type, eqe->subtype);
+ if (eqe->type ==
+ MLX4_EVENT_TYPE_SRQ_CATAS_ERROR)
+ mlx4_warn(dev, "%s: slave:%d, srq_no:0x%x, event: %02x(%02x)\n",
+ __func__, slave,
+ be32_to_cpu(eqe->event.srq.srqn),
+ eqe->type, eqe->subtype);
if (!ret && slave != dev->caps.function) {
- mlx4_warn(dev, "%s: sending event %02x(%02x) to slave:%d\n",
- __func__, eqe->type,
- eqe->subtype, slave);
+ if (eqe->type ==
+ MLX4_EVENT_TYPE_SRQ_CATAS_ERROR)
+ mlx4_warn(dev, "%s: sending event %02x(%02x) to slave:%d\n",
+ __func__, eqe->type,
+ eqe->subtype, slave);
mlx4_slave_event(dev, slave, eqe);
break;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.c b/drivers/net/ethernet/mellanox/mlx4/icm.c
index 2a9dd46..e1f9e7c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/icm.c
+++ b/drivers/net/ethernet/mellanox/mlx4/icm.c
@@ -118,8 +118,13 @@
if (!buf)
return -ENOMEM;
+ if (offset_in_page(buf)) {
+ dma_free_coherent(dev, PAGE_SIZE << order,
+ buf, sg_dma_address(mem));
+ return -ENOMEM;
+ }
+
sg_set_buf(mem, buf, PAGE_SIZE << order);
- BUG_ON(mem->offset);
sg_dma_len(mem) = PAGE_SIZE << order;
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/intf.c b/drivers/net/ethernet/mellanox/mlx4/intf.c
index 0472941..1a134e0 100644
--- a/drivers/net/ethernet/mellanox/mlx4/intf.c
+++ b/drivers/net/ethernet/mellanox/mlx4/intf.c
@@ -218,6 +218,18 @@
struct mlx4_interface *intf;
mlx4_stop_catas_poll(dev);
+ if (dev->persist->interface_state & MLX4_INTERFACE_STATE_DELETION &&
+ mlx4_is_slave(dev)) {
+ /* In mlx4_remove_one on a VF */
+ u32 slave_read =
+ swab32(readl(&mlx4_priv(dev)->mfunc.comm->slave_read));
+
+ if (mlx4_comm_internal_err(slave_read)) {
+ mlx4_dbg(dev, "%s: comm channel is down, entering error state.\n",
+ __func__);
+ mlx4_enter_error_state(dev->persist);
+ }
+ }
mutex_lock(&intf_mutex);
list_for_each_entry(intf, &intf_list, list)
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index e1cf9036..f5fdbd5 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -1205,6 +1205,7 @@
void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type);
void mlx4_enter_error_state(struct mlx4_dev_persistent *persist);
+int mlx4_comm_internal_err(u32 slave_read);
int mlx4_SENSE_PORT(struct mlx4_dev *dev, int port,
enum mlx4_port_type *type);
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index d314d96..d1fc7fa 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -2955,6 +2955,9 @@
put_res(dev, slave, srqn, RES_SRQ);
qp->srq = srq;
}
+
+ /* Save param3 for dynamic changes from VST back to VGT */
+ qp->param3 = qpc->param3;
put_res(dev, slave, rcqn, RES_CQ);
put_res(dev, slave, mtt_base, RES_MTT);
res_end_move(dev, slave, RES_QP, qpn);
@@ -3747,7 +3750,6 @@
int qpn = vhcr->in_modifier & 0x7fffff;
struct res_qp *qp;
u8 orig_sched_queue;
- __be32 orig_param3 = qpc->param3;
u8 orig_vlan_control = qpc->pri_path.vlan_control;
u8 orig_fvl_rx = qpc->pri_path.fvl_rx;
u8 orig_pri_path_fl = qpc->pri_path.fl;
@@ -3789,7 +3791,6 @@
*/
if (!err) {
qp->sched_queue = orig_sched_queue;
- qp->param3 = orig_param3;
qp->vlan_control = orig_vlan_control;
qp->fvl_rx = orig_fvl_rx;
qp->pri_path_fl = orig_pri_path_fl;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index cc19906..6c66d29 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -630,6 +630,10 @@
pr_debug("\n");
}
+static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg);
+static void mlx5_free_cmd_msg(struct mlx5_core_dev *dev,
+ struct mlx5_cmd_msg *msg);
+
static void cmd_work_handler(struct work_struct *work)
{
struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work);
@@ -638,16 +642,27 @@
struct mlx5_cmd_layout *lay;
struct semaphore *sem;
unsigned long flags;
+ int alloc_ret;
sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem;
down(sem);
if (!ent->page_queue) {
- ent->idx = alloc_ent(cmd);
- if (ent->idx < 0) {
+ alloc_ret = alloc_ent(cmd);
+ if (alloc_ret < 0) {
+ if (ent->callback) {
+ ent->callback(-EAGAIN, ent->context);
+ mlx5_free_cmd_msg(dev, ent->out);
+ free_msg(dev, ent->in);
+ free_cmd(ent);
+ } else {
+ ent->ret = -EAGAIN;
+ complete(&ent->done);
+ }
mlx5_core_err(dev, "failed to allocate command entry\n");
up(sem);
return;
}
+ ent->idx = alloc_ret;
} else {
ent->idx = cmd->max_reg_cmds;
spin_lock_irqsave(&cmd->alloc_lock, flags);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index cf00985..e9408f5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -197,6 +197,10 @@
if (lro_num_seg > 1) {
mlx5e_lro_update_hdr(skb, cqe);
skb_shinfo(skb)->gso_size = DIV_ROUND_UP(cqe_bcnt, lro_num_seg);
+ /* Subtract one since we already counted this as one
+ * "regular" packet in mlx5e_complete_rx_cqe()
+ */
+ rq->stats.packets += lro_num_seg - 1;
rq->stats.lro_packets++;
rq->stats.lro_bytes += cqe_bcnt;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index ba115ec..f5c1f4a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -85,7 +85,7 @@
[2] = {
.mask = MLX5_PROF_MASK_QP_SIZE |
MLX5_PROF_MASK_MR_CACHE,
- .log_max_qp = 17,
+ .log_max_qp = 18,
.mr_cache[0] = {
.size = 500,
.limit = 250
@@ -153,8 +153,9 @@
},
};
-#define FW_INIT_TIMEOUT_MILI 2000
-#define FW_INIT_WAIT_MS 2
+#define FW_INIT_TIMEOUT_MILI 2000
+#define FW_INIT_WAIT_MS 2
+#define FW_PRE_INIT_TIMEOUT_MILI 10000
static int wait_fw_init(struct mlx5_core_dev *dev, u32 max_wait_mili)
{
@@ -934,6 +935,15 @@
*/
dev->state = MLX5_DEVICE_STATE_UP;
+ /* wait for firmware to accept initialization segments configurations
+ */
+ err = wait_fw_init(dev, FW_PRE_INIT_TIMEOUT_MILI);
+ if (err) {
+ dev_err(&dev->pdev->dev, "Firmware over %d MS in pre-initializing state, aborting\n",
+ FW_PRE_INIT_TIMEOUT_MILI);
+ goto out;
+ }
+
err = mlx5_cmd_init(dev);
if (err) {
dev_err(&pdev->dev, "Failed initializing command interface, aborting\n");
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 79ef799..c5ea101 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -326,6 +326,7 @@
static const struct pci_device_id rtl8169_pci_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8129), 0, 0, RTL_CFG_0 },
{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8136), 0, 0, RTL_CFG_2 },
+ { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8161), 0, 0, RTL_CFG_1 },
{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8167), 0, 0, RTL_CFG_0 },
{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8168), 0, 0, RTL_CFG_1 },
{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8169), 0, 0, RTL_CFG_0 },
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 549ad20..585e90f 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -171,6 +171,49 @@
.get_mdio_data = ravb_get_mdio_data,
};
+/* Free TX skb function for AVB-IP */
+static int ravb_tx_free(struct net_device *ndev, int q, bool free_txed_only)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ struct net_device_stats *stats = &priv->stats[q];
+ struct ravb_tx_desc *desc;
+ int free_num = 0;
+ int entry;
+ u32 size;
+
+ for (; priv->cur_tx[q] - priv->dirty_tx[q] > 0; priv->dirty_tx[q]++) {
+ bool txed;
+
+ entry = priv->dirty_tx[q] % (priv->num_tx_ring[q] *
+ NUM_TX_DESC);
+ desc = &priv->tx_ring[q][entry];
+ txed = desc->die_dt == DT_FEMPTY;
+ if (free_txed_only && !txed)
+ break;
+ /* Descriptor type must be checked before all other reads */
+ dma_rmb();
+ size = le16_to_cpu(desc->ds_tagl) & TX_DS;
+ /* Free the original skb. */
+ if (priv->tx_skb[q][entry / NUM_TX_DESC]) {
+ dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
+ size, DMA_TO_DEVICE);
+ /* Last packet descriptor? */
+ if (entry % NUM_TX_DESC == NUM_TX_DESC - 1) {
+ entry /= NUM_TX_DESC;
+ dev_kfree_skb_any(priv->tx_skb[q][entry]);
+ priv->tx_skb[q][entry] = NULL;
+ if (txed)
+ stats->tx_packets++;
+ }
+ free_num++;
+ }
+ if (txed)
+ stats->tx_bytes += size;
+ desc->die_dt = DT_EEMPTY;
+ }
+ return free_num;
+}
+
/* Free skb's and DMA buffers for Ethernet AVB */
static void ravb_ring_free(struct net_device *ndev, int q)
{
@@ -178,6 +221,34 @@
int ring_size;
int i;
+ if (priv->rx_ring[q]) {
+ for (i = 0; i < priv->num_rx_ring[q]; i++) {
+ struct ravb_ex_rx_desc *desc = &priv->rx_ring[q][i];
+
+ if (!dma_mapping_error(ndev->dev.parent,
+ le32_to_cpu(desc->dptr)))
+ dma_unmap_single(ndev->dev.parent,
+ le32_to_cpu(desc->dptr),
+ PKT_BUF_SZ,
+ DMA_FROM_DEVICE);
+ }
+ ring_size = sizeof(struct ravb_ex_rx_desc) *
+ (priv->num_rx_ring[q] + 1);
+ dma_free_coherent(ndev->dev.parent, ring_size, priv->rx_ring[q],
+ priv->rx_desc_dma[q]);
+ priv->rx_ring[q] = NULL;
+ }
+
+ if (priv->tx_ring[q]) {
+ ravb_tx_free(ndev, q, false);
+
+ ring_size = sizeof(struct ravb_tx_desc) *
+ (priv->num_tx_ring[q] * NUM_TX_DESC + 1);
+ dma_free_coherent(ndev->dev.parent, ring_size, priv->tx_ring[q],
+ priv->tx_desc_dma[q]);
+ priv->tx_ring[q] = NULL;
+ }
+
/* Free RX skb ringbuffer */
if (priv->rx_skb[q]) {
for (i = 0; i < priv->num_rx_ring[q]; i++)
@@ -186,33 +257,15 @@
kfree(priv->rx_skb[q]);
priv->rx_skb[q] = NULL;
- /* Free TX skb ringbuffer */
- if (priv->tx_skb[q]) {
- for (i = 0; i < priv->num_tx_ring[q]; i++)
- dev_kfree_skb(priv->tx_skb[q][i]);
- }
- kfree(priv->tx_skb[q]);
- priv->tx_skb[q] = NULL;
-
/* Free aligned TX buffers */
kfree(priv->tx_align[q]);
priv->tx_align[q] = NULL;
- if (priv->rx_ring[q]) {
- ring_size = sizeof(struct ravb_ex_rx_desc) *
- (priv->num_rx_ring[q] + 1);
- dma_free_coherent(ndev->dev.parent, ring_size, priv->rx_ring[q],
- priv->rx_desc_dma[q]);
- priv->rx_ring[q] = NULL;
- }
-
- if (priv->tx_ring[q]) {
- ring_size = sizeof(struct ravb_tx_desc) *
- (priv->num_tx_ring[q] * NUM_TX_DESC + 1);
- dma_free_coherent(ndev->dev.parent, ring_size, priv->tx_ring[q],
- priv->tx_desc_dma[q]);
- priv->tx_ring[q] = NULL;
- }
+ /* Free TX skb ringbuffer.
+ * SKBs are freed by ravb_tx_free() call above.
+ */
+ kfree(priv->tx_skb[q]);
+ priv->tx_skb[q] = NULL;
}
/* Format skb and descriptor buffer for Ethernet AVB */
@@ -420,44 +473,6 @@
return 0;
}
-/* Free TX skb function for AVB-IP */
-static int ravb_tx_free(struct net_device *ndev, int q)
-{
- struct ravb_private *priv = netdev_priv(ndev);
- struct net_device_stats *stats = &priv->stats[q];
- struct ravb_tx_desc *desc;
- int free_num = 0;
- int entry;
- u32 size;
-
- for (; priv->cur_tx[q] - priv->dirty_tx[q] > 0; priv->dirty_tx[q]++) {
- entry = priv->dirty_tx[q] % (priv->num_tx_ring[q] *
- NUM_TX_DESC);
- desc = &priv->tx_ring[q][entry];
- if (desc->die_dt != DT_FEMPTY)
- break;
- /* Descriptor type must be checked before all other reads */
- dma_rmb();
- size = le16_to_cpu(desc->ds_tagl) & TX_DS;
- /* Free the original skb. */
- if (priv->tx_skb[q][entry / NUM_TX_DESC]) {
- dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
- size, DMA_TO_DEVICE);
- /* Last packet descriptor? */
- if (entry % NUM_TX_DESC == NUM_TX_DESC - 1) {
- entry /= NUM_TX_DESC;
- dev_kfree_skb_any(priv->tx_skb[q][entry]);
- priv->tx_skb[q][entry] = NULL;
- stats->tx_packets++;
- }
- free_num++;
- }
- stats->tx_bytes += size;
- desc->die_dt = DT_EEMPTY;
- }
- return free_num;
-}
-
static void ravb_get_tx_tstamp(struct net_device *ndev)
{
struct ravb_private *priv = netdev_priv(ndev);
@@ -797,7 +812,7 @@
spin_lock_irqsave(&priv->lock, flags);
/* Clear TX interrupt */
ravb_write(ndev, ~mask, TIS);
- ravb_tx_free(ndev, q);
+ ravb_tx_free(ndev, q, true);
netif_wake_subqueue(ndev, q);
mmiowb();
spin_unlock_irqrestore(&priv->lock, flags);
@@ -1393,7 +1408,8 @@
priv->cur_tx[q] += NUM_TX_DESC;
if (priv->cur_tx[q] - priv->dirty_tx[q] >
- (priv->num_tx_ring[q] - 1) * NUM_TX_DESC && !ravb_tx_free(ndev, q))
+ (priv->num_tx_ring[q] - 1) * NUM_TX_DESC &&
+ !ravb_tx_free(ndev, q, true))
netif_stop_subqueue(ndev, q);
exit:
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 480f3da..479af10 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -750,6 +750,7 @@
.tsu = 1,
.hw_crc = 1,
.select_mii = 1,
+ .shift_rd0 = 1,
};
/* SH7763 */
@@ -818,6 +819,7 @@
.rpadir_value = 2 << 16,
.no_trimd = 1,
.no_ade = 1,
+ .hw_crc = 1,
.tsu = 1,
.select_mii = 1,
.shift_rd0 = 1,
diff --git a/drivers/net/ethernet/sfc/falcon.c b/drivers/net/ethernet/sfc/falcon.c
index d790cb8..8e832ba 100644
--- a/drivers/net/ethernet/sfc/falcon.c
+++ b/drivers/net/ethernet/sfc/falcon.c
@@ -2796,6 +2796,11 @@
.timer_period_max = 1 << FRF_AB_TC_TIMER_VAL_WIDTH,
.offload_features = NETIF_F_IP_CSUM,
.mcdi_max_ver = -1,
+#ifdef CONFIG_SFC_SRIOV
+ .vswitching_probe = efx_port_dummy_op_int,
+ .vswitching_restore = efx_port_dummy_op_int,
+ .vswitching_remove = efx_port_dummy_op_void,
+#endif
};
const struct efx_nic_type falcon_b0_nic_type = {
@@ -2897,4 +2902,9 @@
.offload_features = NETIF_F_IP_CSUM | NETIF_F_RXHASH | NETIF_F_NTUPLE,
.mcdi_max_ver = -1,
.max_rx_ip_filters = FR_BZ_RX_FILTER_TBL0_ROWS,
+#ifdef CONFIG_SFC_SRIOV
+ .vswitching_probe = efx_port_dummy_op_int,
+ .vswitching_restore = efx_port_dummy_op_int,
+ .vswitching_remove = efx_port_dummy_op_void,
+#endif
};
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
index cf468c8..4cb8b85 100644
--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
@@ -100,6 +100,14 @@
/* BUFFER_ALIGN(adr) calculates the number of bytes to the next alignment. */
#define BUFFER_ALIGN(adr) ((ALIGNMENT - ((u32) adr)) % ALIGNMENT)
+#ifdef __BIG_ENDIAN
+#define xemaclite_readl ioread32be
+#define xemaclite_writel iowrite32be
+#else
+#define xemaclite_readl ioread32
+#define xemaclite_writel iowrite32
+#endif
+
/**
* struct net_local - Our private per device data
* @ndev: instance of the network device
@@ -158,15 +166,15 @@
u32 reg_data;
/* Enable the Tx interrupts for the first Buffer */
- reg_data = __raw_readl(drvdata->base_addr + XEL_TSR_OFFSET);
- __raw_writel(reg_data | XEL_TSR_XMIT_IE_MASK,
- drvdata->base_addr + XEL_TSR_OFFSET);
+ reg_data = xemaclite_readl(drvdata->base_addr + XEL_TSR_OFFSET);
+ xemaclite_writel(reg_data | XEL_TSR_XMIT_IE_MASK,
+ drvdata->base_addr + XEL_TSR_OFFSET);
/* Enable the Rx interrupts for the first buffer */
- __raw_writel(XEL_RSR_RECV_IE_MASK, drvdata->base_addr + XEL_RSR_OFFSET);
+ xemaclite_writel(XEL_RSR_RECV_IE_MASK, drvdata->base_addr + XEL_RSR_OFFSET);
/* Enable the Global Interrupt Enable */
- __raw_writel(XEL_GIER_GIE_MASK, drvdata->base_addr + XEL_GIER_OFFSET);
+ xemaclite_writel(XEL_GIER_GIE_MASK, drvdata->base_addr + XEL_GIER_OFFSET);
}
/**
@@ -181,17 +189,17 @@
u32 reg_data;
/* Disable the Global Interrupt Enable */
- __raw_writel(XEL_GIER_GIE_MASK, drvdata->base_addr + XEL_GIER_OFFSET);
+ xemaclite_writel(XEL_GIER_GIE_MASK, drvdata->base_addr + XEL_GIER_OFFSET);
/* Disable the Tx interrupts for the first buffer */
- reg_data = __raw_readl(drvdata->base_addr + XEL_TSR_OFFSET);
- __raw_writel(reg_data & (~XEL_TSR_XMIT_IE_MASK),
- drvdata->base_addr + XEL_TSR_OFFSET);
+ reg_data = xemaclite_readl(drvdata->base_addr + XEL_TSR_OFFSET);
+ xemaclite_writel(reg_data & (~XEL_TSR_XMIT_IE_MASK),
+ drvdata->base_addr + XEL_TSR_OFFSET);
/* Disable the Rx interrupts for the first buffer */
- reg_data = __raw_readl(drvdata->base_addr + XEL_RSR_OFFSET);
- __raw_writel(reg_data & (~XEL_RSR_RECV_IE_MASK),
- drvdata->base_addr + XEL_RSR_OFFSET);
+ reg_data = xemaclite_readl(drvdata->base_addr + XEL_RSR_OFFSET);
+ xemaclite_writel(reg_data & (~XEL_RSR_RECV_IE_MASK),
+ drvdata->base_addr + XEL_RSR_OFFSET);
}
/**
@@ -323,7 +331,7 @@
byte_count = ETH_FRAME_LEN;
/* Check if the expected buffer is available */
- reg_data = __raw_readl(addr + XEL_TSR_OFFSET);
+ reg_data = xemaclite_readl(addr + XEL_TSR_OFFSET);
if ((reg_data & (XEL_TSR_XMIT_BUSY_MASK |
XEL_TSR_XMIT_ACTIVE_MASK)) == 0) {
@@ -336,7 +344,7 @@
addr = (void __iomem __force *)((u32 __force)addr ^
XEL_BUFFER_OFFSET);
- reg_data = __raw_readl(addr + XEL_TSR_OFFSET);
+ reg_data = xemaclite_readl(addr + XEL_TSR_OFFSET);
if ((reg_data & (XEL_TSR_XMIT_BUSY_MASK |
XEL_TSR_XMIT_ACTIVE_MASK)) != 0)
@@ -347,16 +355,16 @@
/* Write the frame to the buffer */
xemaclite_aligned_write(data, (u32 __force *) addr, byte_count);
- __raw_writel((byte_count & XEL_TPLR_LENGTH_MASK),
- addr + XEL_TPLR_OFFSET);
+ xemaclite_writel((byte_count & XEL_TPLR_LENGTH_MASK),
+ addr + XEL_TPLR_OFFSET);
/* Update the Tx Status Register to indicate that there is a
* frame to send. Set the XEL_TSR_XMIT_ACTIVE_MASK flag which
* is used by the interrupt handler to check whether a frame
* has been transmitted */
- reg_data = __raw_readl(addr + XEL_TSR_OFFSET);
+ reg_data = xemaclite_readl(addr + XEL_TSR_OFFSET);
reg_data |= (XEL_TSR_XMIT_BUSY_MASK | XEL_TSR_XMIT_ACTIVE_MASK);
- __raw_writel(reg_data, addr + XEL_TSR_OFFSET);
+ xemaclite_writel(reg_data, addr + XEL_TSR_OFFSET);
return 0;
}
@@ -371,7 +379,7 @@
*
* Return: Total number of bytes received
*/
-static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data)
+static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data, int maxlen)
{
void __iomem *addr;
u16 length, proto_type;
@@ -381,7 +389,7 @@
addr = (drvdata->base_addr + drvdata->next_rx_buf_to_use);
/* Verify which buffer has valid data */
- reg_data = __raw_readl(addr + XEL_RSR_OFFSET);
+ reg_data = xemaclite_readl(addr + XEL_RSR_OFFSET);
if ((reg_data & XEL_RSR_RECV_DONE_MASK) == XEL_RSR_RECV_DONE_MASK) {
if (drvdata->rx_ping_pong != 0)
@@ -398,27 +406,28 @@
return 0; /* No data was available */
/* Verify that buffer has valid data */
- reg_data = __raw_readl(addr + XEL_RSR_OFFSET);
+ reg_data = xemaclite_readl(addr + XEL_RSR_OFFSET);
if ((reg_data & XEL_RSR_RECV_DONE_MASK) !=
XEL_RSR_RECV_DONE_MASK)
return 0; /* No data was available */
}
/* Get the protocol type of the ethernet frame that arrived */
- proto_type = ((ntohl(__raw_readl(addr + XEL_HEADER_OFFSET +
+ proto_type = ((ntohl(xemaclite_readl(addr + XEL_HEADER_OFFSET +
XEL_RXBUFF_OFFSET)) >> XEL_HEADER_SHIFT) &
XEL_RPLR_LENGTH_MASK);
/* Check if received ethernet frame is a raw ethernet frame
* or an IP packet or an ARP packet */
- if (proto_type > (ETH_FRAME_LEN + ETH_FCS_LEN)) {
+ if (proto_type > ETH_DATA_LEN) {
if (proto_type == ETH_P_IP) {
- length = ((ntohl(__raw_readl(addr +
+ length = ((ntohl(xemaclite_readl(addr +
XEL_HEADER_IP_LENGTH_OFFSET +
XEL_RXBUFF_OFFSET)) >>
XEL_HEADER_SHIFT) &
XEL_RPLR_LENGTH_MASK);
+ length = min_t(u16, length, ETH_DATA_LEN);
length += ETH_HLEN + ETH_FCS_LEN;
} else if (proto_type == ETH_P_ARP)
@@ -431,14 +440,17 @@
/* Use the length in the frame, plus the header and trailer */
length = proto_type + ETH_HLEN + ETH_FCS_LEN;
+ if (WARN_ON(length > maxlen))
+ length = maxlen;
+
/* Read from the EmacLite device */
xemaclite_aligned_read((u32 __force *) (addr + XEL_RXBUFF_OFFSET),
data, length);
/* Acknowledge the frame */
- reg_data = __raw_readl(addr + XEL_RSR_OFFSET);
+ reg_data = xemaclite_readl(addr + XEL_RSR_OFFSET);
reg_data &= ~XEL_RSR_RECV_DONE_MASK;
- __raw_writel(reg_data, addr + XEL_RSR_OFFSET);
+ xemaclite_writel(reg_data, addr + XEL_RSR_OFFSET);
return length;
}
@@ -465,14 +477,14 @@
xemaclite_aligned_write(address_ptr, (u32 __force *) addr, ETH_ALEN);
- __raw_writel(ETH_ALEN, addr + XEL_TPLR_OFFSET);
+ xemaclite_writel(ETH_ALEN, addr + XEL_TPLR_OFFSET);
/* Update the MAC address in the EmacLite */
- reg_data = __raw_readl(addr + XEL_TSR_OFFSET);
- __raw_writel(reg_data | XEL_TSR_PROG_MAC_ADDR, addr + XEL_TSR_OFFSET);
+ reg_data = xemaclite_readl(addr + XEL_TSR_OFFSET);
+ xemaclite_writel(reg_data | XEL_TSR_PROG_MAC_ADDR, addr + XEL_TSR_OFFSET);
/* Wait for EmacLite to finish with the MAC address update */
- while ((__raw_readl(addr + XEL_TSR_OFFSET) &
+ while ((xemaclite_readl(addr + XEL_TSR_OFFSET) &
XEL_TSR_PROG_MAC_ADDR) != 0)
;
}
@@ -605,7 +617,7 @@
skb_reserve(skb, 2);
- len = xemaclite_recv_data(lp, (u8 *) skb->data);
+ len = xemaclite_recv_data(lp, (u8 *) skb->data, len);
if (!len) {
dev->stats.rx_errors++;
@@ -642,32 +654,32 @@
u32 tx_status;
/* Check if there is Rx Data available */
- if ((__raw_readl(base_addr + XEL_RSR_OFFSET) &
+ if ((xemaclite_readl(base_addr + XEL_RSR_OFFSET) &
XEL_RSR_RECV_DONE_MASK) ||
- (__raw_readl(base_addr + XEL_BUFFER_OFFSET + XEL_RSR_OFFSET)
+ (xemaclite_readl(base_addr + XEL_BUFFER_OFFSET + XEL_RSR_OFFSET)
& XEL_RSR_RECV_DONE_MASK))
xemaclite_rx_handler(dev);
/* Check if the Transmission for the first buffer is completed */
- tx_status = __raw_readl(base_addr + XEL_TSR_OFFSET);
+ tx_status = xemaclite_readl(base_addr + XEL_TSR_OFFSET);
if (((tx_status & XEL_TSR_XMIT_BUSY_MASK) == 0) &&
(tx_status & XEL_TSR_XMIT_ACTIVE_MASK) != 0) {
tx_status &= ~XEL_TSR_XMIT_ACTIVE_MASK;
- __raw_writel(tx_status, base_addr + XEL_TSR_OFFSET);
+ xemaclite_writel(tx_status, base_addr + XEL_TSR_OFFSET);
tx_complete = true;
}
/* Check if the Transmission for the second buffer is completed */
- tx_status = __raw_readl(base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET);
+ tx_status = xemaclite_readl(base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET);
if (((tx_status & XEL_TSR_XMIT_BUSY_MASK) == 0) &&
(tx_status & XEL_TSR_XMIT_ACTIVE_MASK) != 0) {
tx_status &= ~XEL_TSR_XMIT_ACTIVE_MASK;
- __raw_writel(tx_status, base_addr + XEL_BUFFER_OFFSET +
- XEL_TSR_OFFSET);
+ xemaclite_writel(tx_status, base_addr + XEL_BUFFER_OFFSET +
+ XEL_TSR_OFFSET);
tx_complete = true;
}
@@ -700,7 +712,7 @@
/* wait for the MDIO interface to not be busy or timeout
after some time.
*/
- while (__raw_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET) &
+ while (xemaclite_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET) &
XEL_MDIOCTRL_MDIOSTS_MASK) {
if (time_before_eq(end, jiffies)) {
WARN_ON(1);
@@ -736,17 +748,17 @@
* MDIO Address register. Set the Status bit in the MDIO Control
* register to start a MDIO read transaction.
*/
- ctrl_reg = __raw_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET);
- __raw_writel(XEL_MDIOADDR_OP_MASK |
- ((phy_id << XEL_MDIOADDR_PHYADR_SHIFT) | reg),
- lp->base_addr + XEL_MDIOADDR_OFFSET);
- __raw_writel(ctrl_reg | XEL_MDIOCTRL_MDIOSTS_MASK,
- lp->base_addr + XEL_MDIOCTRL_OFFSET);
+ ctrl_reg = xemaclite_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET);
+ xemaclite_writel(XEL_MDIOADDR_OP_MASK |
+ ((phy_id << XEL_MDIOADDR_PHYADR_SHIFT) | reg),
+ lp->base_addr + XEL_MDIOADDR_OFFSET);
+ xemaclite_writel(ctrl_reg | XEL_MDIOCTRL_MDIOSTS_MASK,
+ lp->base_addr + XEL_MDIOCTRL_OFFSET);
if (xemaclite_mdio_wait(lp))
return -ETIMEDOUT;
- rc = __raw_readl(lp->base_addr + XEL_MDIORD_OFFSET);
+ rc = xemaclite_readl(lp->base_addr + XEL_MDIORD_OFFSET);
dev_dbg(&lp->ndev->dev,
"xemaclite_mdio_read(phy_id=%i, reg=%x) == %x\n",
@@ -783,13 +795,13 @@
* Data register. Finally, set the Status bit in the MDIO Control
* register to start a MDIO write transaction.
*/
- ctrl_reg = __raw_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET);
- __raw_writel(~XEL_MDIOADDR_OP_MASK &
- ((phy_id << XEL_MDIOADDR_PHYADR_SHIFT) | reg),
- lp->base_addr + XEL_MDIOADDR_OFFSET);
- __raw_writel(val, lp->base_addr + XEL_MDIOWR_OFFSET);
- __raw_writel(ctrl_reg | XEL_MDIOCTRL_MDIOSTS_MASK,
- lp->base_addr + XEL_MDIOCTRL_OFFSET);
+ ctrl_reg = xemaclite_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET);
+ xemaclite_writel(~XEL_MDIOADDR_OP_MASK &
+ ((phy_id << XEL_MDIOADDR_PHYADR_SHIFT) | reg),
+ lp->base_addr + XEL_MDIOADDR_OFFSET);
+ xemaclite_writel(val, lp->base_addr + XEL_MDIOWR_OFFSET);
+ xemaclite_writel(ctrl_reg | XEL_MDIOCTRL_MDIOSTS_MASK,
+ lp->base_addr + XEL_MDIOCTRL_OFFSET);
return 0;
}
@@ -836,8 +848,8 @@
/* Enable the MDIO bus by asserting the enable bit in MDIO Control
* register.
*/
- __raw_writel(XEL_MDIOCTRL_MDIOEN_MASK,
- lp->base_addr + XEL_MDIOCTRL_OFFSET);
+ xemaclite_writel(XEL_MDIOCTRL_MDIOEN_MASK,
+ lp->base_addr + XEL_MDIOCTRL_OFFSET);
bus = mdiobus_alloc();
if (!bus) {
@@ -1141,8 +1153,8 @@
dev_warn(dev, "No MAC address found\n");
/* Clear the Tx CSR's in case this is a restart */
- __raw_writel(0, lp->base_addr + XEL_TSR_OFFSET);
- __raw_writel(0, lp->base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET);
+ xemaclite_writel(0, lp->base_addr + XEL_TSR_OFFSET);
+ xemaclite_writel(0, lp->base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET);
/* Set the MAC address in the EmacLite device */
xemaclite_update_address(lp, ndev->dev_addr);
diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c
index 85828f1..0758d08 100644
--- a/drivers/net/hamradio/mkiss.c
+++ b/drivers/net/hamradio/mkiss.c
@@ -648,8 +648,8 @@
{
/* Finish setting up the DEVICE info. */
dev->mtu = AX_MTU;
- dev->hard_header_len = 0;
- dev->addr_len = 0;
+ dev->hard_header_len = AX25_MAX_HEADER_LEN;
+ dev->addr_len = AX25_ADDR_LEN;
dev->type = ARPHRD_AX25;
dev->tx_queue_len = 10;
dev->header_ops = &ax25_header_ops;
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index e8a09ff..c8a7802 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -197,65 +197,6 @@
return ppi;
}
-union sub_key {
- u64 k;
- struct {
- u8 pad[3];
- u8 kb;
- u32 ka;
- };
-};
-
-/* Toeplitz hash function
- * data: network byte order
- * return: host byte order
- */
-static u32 comp_hash(u8 *key, int klen, void *data, int dlen)
-{
- union sub_key subk;
- int k_next = 4;
- u8 dt;
- int i, j;
- u32 ret = 0;
-
- subk.k = 0;
- subk.ka = ntohl(*(u32 *)key);
-
- for (i = 0; i < dlen; i++) {
- subk.kb = key[k_next];
- k_next = (k_next + 1) % klen;
- dt = ((u8 *)data)[i];
- for (j = 0; j < 8; j++) {
- if (dt & 0x80)
- ret ^= subk.ka;
- dt <<= 1;
- subk.k <<= 1;
- }
- }
-
- return ret;
-}
-
-static bool netvsc_set_hash(u32 *hash, struct sk_buff *skb)
-{
- struct flow_keys flow;
- int data_len;
-
- if (!skb_flow_dissect_flow_keys(skb, &flow, 0) ||
- !(flow.basic.n_proto == htons(ETH_P_IP) ||
- flow.basic.n_proto == htons(ETH_P_IPV6)))
- return false;
-
- if (flow.basic.ip_proto == IPPROTO_TCP)
- data_len = 12;
- else
- data_len = 8;
-
- *hash = comp_hash(netvsc_hash_key, HASH_KEYLEN, &flow, data_len);
-
- return true;
-}
-
static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
void *accel_priv, select_queue_fallback_t fallback)
{
@@ -268,11 +209,9 @@
if (nvsc_dev == NULL || ndev->real_num_tx_queues <= 1)
return 0;
- if (netvsc_set_hash(&hash, skb)) {
- q_idx = nvsc_dev->send_table[hash % VRSS_SEND_TAB_SIZE] %
- ndev->real_num_tx_queues;
- skb_set_hash(skb, hash, PKT_HASH_TYPE_L3);
- }
+ hash = skb_get_hash(skb);
+ q_idx = nvsc_dev->send_table[hash % VRSS_SEND_TAB_SIZE] %
+ ndev->real_num_tx_queues;
return q_idx;
}
diff --git a/drivers/net/irda/irda-usb.c b/drivers/net/irda/irda-usb.c
index 25f2196..de2ea9f 100644
--- a/drivers/net/irda/irda-usb.c
+++ b/drivers/net/irda/irda-usb.c
@@ -1077,7 +1077,7 @@
* are "42101001.sb" or "42101002.sb"
*/
sprintf(stir421x_fw_name, "4210%4X.sb",
- self->usbdev->descriptor.bcdDevice);
+ le16_to_cpu(self->usbdev->descriptor.bcdDevice));
ret = request_firmware(&fw, stir421x_fw_name, &self->usbdev->dev);
if (ret < 0)
return ret;
diff --git a/drivers/net/irda/mcs7780.c b/drivers/net/irda/mcs7780.c
index bca6a1e..e1bb802 100644
--- a/drivers/net/irda/mcs7780.c
+++ b/drivers/net/irda/mcs7780.c
@@ -141,9 +141,19 @@
static int mcs_get_reg(struct mcs_cb *mcs, __u16 reg, __u16 * val)
{
struct usb_device *dev = mcs->usbdev;
- int ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), MCS_RDREQ,
- MCS_RD_RTYPE, 0, reg, val, 2,
- msecs_to_jiffies(MCS_CTRL_TIMEOUT));
+ void *dmabuf;
+ int ret;
+
+ dmabuf = kmalloc(sizeof(__u16), GFP_KERNEL);
+ if (!dmabuf)
+ return -ENOMEM;
+
+ ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), MCS_RDREQ,
+ MCS_RD_RTYPE, 0, reg, dmabuf, 2,
+ msecs_to_jiffies(MCS_CTRL_TIMEOUT));
+
+ memcpy(val, dmabuf, sizeof(__u16));
+ kfree(dmabuf);
return ret;
}
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 06c8bfe..40cd866 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -1110,6 +1110,7 @@
static void macvlan_port_destroy(struct net_device *dev)
{
struct macvlan_port *port = macvlan_port_get_rtnl(dev);
+ struct sk_buff *skb;
dev->priv_flags &= ~IFF_MACVLAN_PORT;
netdev_rx_handler_unregister(dev);
@@ -1118,7 +1119,15 @@
* but we need to cancel it and purge left skbs if any.
*/
cancel_work_sync(&port->bc_work);
- __skb_queue_purge(&port->bc_queue);
+
+ while ((skb = __skb_dequeue(&port->bc_queue))) {
+ const struct macvlan_dev *src = MACVLAN_SKB_CB(skb)->src;
+
+ if (src)
+ dev_put(src->dev);
+
+ kfree_skb(skb);
+ }
kfree_rcu(port, rcu);
}
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index e6cefd0..e83acc6 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -907,7 +907,7 @@
if (overflow) {
pr_debug("tx timestamp queue overflow, count %d\n", overflow);
while (skb) {
- skb_complete_tx_timestamp(skb, NULL);
+ kfree_skb(skb);
skb = skb_dequeue(&dp83640->tx_queue);
}
return;
@@ -1436,8 +1436,6 @@
skb_info->tmo = jiffies + SKB_TIMESTAMP_TIMEOUT;
skb_queue_tail(&dp83640->rx_queue, skb);
schedule_delayed_work(&dp83640->ts_work, SKB_TIMESTAMP_TIMEOUT);
- } else {
- netif_rx_ni(skb);
}
return true;
diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
index 32f1066..7242dd4 100644
--- a/drivers/net/phy/dp83867.c
+++ b/drivers/net/phy/dp83867.c
@@ -29,6 +29,7 @@
#define MII_DP83867_MICR 0x12
#define MII_DP83867_ISR 0x13
#define DP83867_CTRL 0x1f
+#define DP83867_CFG3 0x1e
/* Extended Registers */
#define DP83867_RGMIICTL 0x0032
@@ -89,6 +90,8 @@
micr_status |=
(MII_DP83867_MICR_AN_ERR_INT_EN |
MII_DP83867_MICR_SPEED_CHNG_INT_EN |
+ MII_DP83867_MICR_AUTONEG_COMP_INT_EN |
+ MII_DP83867_MICR_LINK_STS_CHNG_INT_EN |
MII_DP83867_MICR_DUP_MODE_CHNG_INT_EN |
MII_DP83867_MICR_SLEEP_MODE_CHNG_INT_EN);
@@ -184,6 +187,13 @@
DP83867_DEVADDR, phydev->addr, delay);
}
+ /* Enable Interrupt output INT_OE in CFG3 register */
+ if (phy_interrupt_is_valid(phydev)) {
+ val = phy_read(phydev, DP83867_CFG3);
+ val |= BIT(7);
+ phy_write(phydev, DP83867_CFG3, val);
+ }
+
return 0;
}
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 0240552..ebec2dc 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -203,34 +203,6 @@
{
int err;
- /* The Marvell PHY has an errata which requires
- * that certain registers get written in order
- * to restart autonegotiation */
- err = phy_write(phydev, MII_BMCR, BMCR_RESET);
-
- if (err < 0)
- return err;
-
- err = phy_write(phydev, 0x1d, 0x1f);
- if (err < 0)
- return err;
-
- err = phy_write(phydev, 0x1e, 0x200c);
- if (err < 0)
- return err;
-
- err = phy_write(phydev, 0x1d, 0x5);
- if (err < 0)
- return err;
-
- err = phy_write(phydev, 0x1e, 0);
- if (err < 0)
- return err;
-
- err = phy_write(phydev, 0x1e, 0x100);
- if (err < 0)
- return err;
-
err = marvell_set_polarity(phydev, phydev->mdix);
if (err < 0)
return err;
@@ -264,6 +236,42 @@
return 0;
}
+static int m88e1101_config_aneg(struct phy_device *phydev)
+{
+ int err;
+
+ /* This Marvell PHY has an errata which requires
+ * that certain registers get written in order
+ * to restart autonegotiation
+ */
+ err = phy_write(phydev, MII_BMCR, BMCR_RESET);
+
+ if (err < 0)
+ return err;
+
+ err = phy_write(phydev, 0x1d, 0x1f);
+ if (err < 0)
+ return err;
+
+ err = phy_write(phydev, 0x1e, 0x200c);
+ if (err < 0)
+ return err;
+
+ err = phy_write(phydev, 0x1d, 0x5);
+ if (err < 0)
+ return err;
+
+ err = phy_write(phydev, 0x1e, 0);
+ if (err < 0)
+ return err;
+
+ err = phy_write(phydev, 0x1e, 0x100);
+ if (err < 0)
+ return err;
+
+ return marvell_config_aneg(phydev);
+}
+
#ifdef CONFIG_OF_MDIO
/*
* Set and/or override some configuration registers based on the
@@ -814,8 +822,6 @@
phydev->lp_advertising = mii_stat1000_to_ethtool_lpa_t(lpagb) |
mii_lpa_to_ethtool_lpa_t(lpa);
- lpa &= adv;
-
if (status & MII_M1011_PHY_STATUS_FULLDUPLEX)
phydev->duplex = DUPLEX_FULL;
else
@@ -993,7 +999,7 @@
.name = "Marvell 88E1101",
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
- .config_aneg = &marvell_config_aneg,
+ .config_aneg = &m88e1101_config_aneg,
.read_status = &genphy_read_status,
.ack_interrupt = &marvell_ack_interrupt,
.config_intr = &marvell_config_intr,
diff --git a/drivers/net/phy/mdio-bcm-iproc.c b/drivers/net/phy/mdio-bcm-iproc.c
index c0b4e65..46fe1ae 100644
--- a/drivers/net/phy/mdio-bcm-iproc.c
+++ b/drivers/net/phy/mdio-bcm-iproc.c
@@ -81,8 +81,6 @@
if (rc)
return rc;
- iproc_mdio_config_clk(priv->base);
-
/* Prepare the read operation */
cmd = (MII_DATA_TA_VAL << MII_DATA_TA_SHIFT) |
(reg << MII_DATA_RA_SHIFT) |
@@ -112,8 +110,6 @@
if (rc)
return rc;
- iproc_mdio_config_clk(priv->base);
-
/* Prepare the write operation */
cmd = (MII_DATA_TA_VAL << MII_DATA_TA_SHIFT) |
(reg << MII_DATA_RA_SHIFT) |
@@ -163,6 +159,8 @@
bus->read = iproc_mdio_read;
bus->write = iproc_mdio_write;
+ iproc_mdio_config_clk(priv->base);
+
rc = of_mdiobus_register(bus, pdev->dev.of_node);
if (rc) {
dev_err(&pdev->dev, "MDIO bus registration failed\n");
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index e13ad6c..c8b85f1 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -539,6 +539,8 @@
if ((regval & 0xFF) == 0xFF) {
phy_init_hw(phydev);
phydev->link = 0;
+ if (phydev->drv->config_intr && phy_interrupt_is_valid(phydev))
+ phydev->drv->config_intr(phydev);
}
return 0;
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index bba0ca7..49d9f0a 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -538,9 +538,12 @@
cancel_delayed_work_sync(&phydev->state_queue);
mutex_lock(&phydev->lock);
- if (phydev->state > PHY_UP)
+ if (phydev->state > PHY_UP && phydev->state != PHY_HALTED)
phydev->state = PHY_UP;
mutex_unlock(&phydev->lock);
+
+ /* Now we can run the state machine synchronously */
+ phy_state_machine(&phydev->state_queue.work);
}
/**
@@ -918,6 +921,15 @@
if (old_link != phydev->link)
phydev->state = PHY_CHANGELINK;
}
+ /*
+ * Failsafe: check that nobody set phydev->link=0 between two
+ * poll cycles, otherwise we won't leave RUNNING state as long
+ * as link remains down.
+ */
+ if (!phydev->link && phydev->state == PHY_RUNNING) {
+ phydev->state = PHY_CHANGELINK;
+ dev_err(&phydev->dev, "no link in PHY_RUNNING\n");
+ }
break;
case PHY_CHANGELINK:
err = phy_read_status(phydev);
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 0bfbaba..8179727 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -1368,6 +1368,8 @@
{
struct phy_device *phydev = to_phy_device(dev);
+ cancel_delayed_work_sync(&phydev->state_queue);
+
mutex_lock(&phydev->lock);
phydev->state = PHY_DOWN;
mutex_unlock(&phydev->lock);
@@ -1442,7 +1444,7 @@
.phy_id = 0xffffffff,
.phy_id_mask = 0xffffffff,
.name = "Generic PHY",
- .soft_reset = genphy_soft_reset,
+ .soft_reset = genphy_no_soft_reset,
.config_init = genphy_config_init,
.features = PHY_GBIT_FEATURES | SUPPORTED_MII |
SUPPORTED_AUI | SUPPORTED_FIBRE |
diff --git a/drivers/net/usb/catc.c b/drivers/net/usb/catc.c
index 4e2b26a..2aa1a1d 100644
--- a/drivers/net/usb/catc.c
+++ b/drivers/net/usb/catc.c
@@ -777,7 +777,7 @@
struct net_device *netdev;
struct catc *catc;
u8 broadcast[ETH_ALEN];
- int i, pktsz;
+ int pktsz, ret;
if (usb_set_interface(usbdev,
intf->altsetting->desc.bInterfaceNumber, 1)) {
@@ -812,12 +812,8 @@
if ((!catc->ctrl_urb) || (!catc->tx_urb) ||
(!catc->rx_urb) || (!catc->irq_urb)) {
dev_err(&intf->dev, "No free urbs available.\n");
- usb_free_urb(catc->ctrl_urb);
- usb_free_urb(catc->tx_urb);
- usb_free_urb(catc->rx_urb);
- usb_free_urb(catc->irq_urb);
- free_netdev(netdev);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto fail_free;
}
/* The F5U011 has the same vendor/product as the netmate but a device version of 0x130 */
@@ -845,15 +841,24 @@
catc->irq_buf, 2, catc_irq_done, catc, 1);
if (!catc->is_f5u011) {
+ u32 *buf;
+ int i;
+
dev_dbg(dev, "Checking memory size\n");
- i = 0x12345678;
- catc_write_mem(catc, 0x7a80, &i, 4);
- i = 0x87654321;
- catc_write_mem(catc, 0xfa80, &i, 4);
- catc_read_mem(catc, 0x7a80, &i, 4);
+ buf = kmalloc(4, GFP_KERNEL);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto fail_free;
+ }
+
+ *buf = 0x12345678;
+ catc_write_mem(catc, 0x7a80, buf, 4);
+ *buf = 0x87654321;
+ catc_write_mem(catc, 0xfa80, buf, 4);
+ catc_read_mem(catc, 0x7a80, buf, 4);
- switch (i) {
+ switch (*buf) {
case 0x12345678:
catc_set_reg(catc, TxBufCount, 8);
catc_set_reg(catc, RxBufCount, 32);
@@ -868,6 +873,8 @@
dev_dbg(dev, "32k Memory\n");
break;
}
+
+ kfree(buf);
dev_dbg(dev, "Getting MAC from SEEROM.\n");
@@ -914,16 +921,21 @@
usb_set_intfdata(intf, catc);
SET_NETDEV_DEV(netdev, &intf->dev);
- if (register_netdev(netdev) != 0) {
- usb_set_intfdata(intf, NULL);
- usb_free_urb(catc->ctrl_urb);
- usb_free_urb(catc->tx_urb);
- usb_free_urb(catc->rx_urb);
- usb_free_urb(catc->irq_urb);
- free_netdev(netdev);
- return -EIO;
- }
+ ret = register_netdev(netdev);
+ if (ret)
+ goto fail_clear_intfdata;
+
return 0;
+
+fail_clear_intfdata:
+ usb_set_intfdata(intf, NULL);
+fail_free:
+ usb_free_urb(catc->ctrl_urb);
+ usb_free_urb(catc->tx_urb);
+ usb_free_urb(catc->rx_urb);
+ usb_free_urb(catc->irq_urb);
+ free_netdev(netdev);
+ return ret;
}
static void catc_disconnect(struct usb_interface *intf)
diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c
index f64b25c..cd93220 100644
--- a/drivers/net/usb/kaweth.c
+++ b/drivers/net/usb/kaweth.c
@@ -1009,6 +1009,7 @@
struct net_device *netdev;
const eth_addr_t bcast_addr = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
int result = 0;
+ int rv = -EIO;
dev_dbg(dev,
"Kawasaki Device Probe (Device number:%d): 0x%4.4x:0x%4.4x:0x%4.4x\n",
@@ -1029,6 +1030,7 @@
kaweth = netdev_priv(netdev);
kaweth->dev = udev;
kaweth->net = netdev;
+ kaweth->intf = intf;
spin_lock_init(&kaweth->device_lock);
init_waitqueue_head(&kaweth->term_wait);
@@ -1048,6 +1050,10 @@
/* Download the firmware */
dev_info(dev, "Downloading firmware...\n");
kaweth->firmware_buf = (__u8 *)__get_free_page(GFP_KERNEL);
+ if (!kaweth->firmware_buf) {
+ rv = -ENOMEM;
+ goto err_free_netdev;
+ }
if ((result = kaweth_download_firmware(kaweth,
"kaweth/new_code.bin",
100,
@@ -1139,8 +1145,6 @@
dev_dbg(dev, "Initializing net device.\n");
- kaweth->intf = intf;
-
kaweth->tx_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!kaweth->tx_urb)
goto err_free_netdev;
@@ -1204,7 +1208,7 @@
err_free_netdev:
free_netdev(netdev);
- return -EIO;
+ return rv;
}
/****************************************************************
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
index f840802..17fac01 100644
--- a/drivers/net/usb/pegasus.c
+++ b/drivers/net/usb/pegasus.c
@@ -126,40 +126,61 @@
static int get_registers(pegasus_t *pegasus, __u16 indx, __u16 size, void *data)
{
+ u8 *buf;
int ret;
+ buf = kmalloc(size, GFP_NOIO);
+ if (!buf)
+ return -ENOMEM;
+
ret = usb_control_msg(pegasus->usb, usb_rcvctrlpipe(pegasus->usb, 0),
PEGASUS_REQ_GET_REGS, PEGASUS_REQT_READ, 0,
- indx, data, size, 1000);
+ indx, buf, size, 1000);
if (ret < 0)
netif_dbg(pegasus, drv, pegasus->net,
"%s returned %d\n", __func__, ret);
+ else if (ret <= size)
+ memcpy(data, buf, ret);
+ kfree(buf);
return ret;
}
-static int set_registers(pegasus_t *pegasus, __u16 indx, __u16 size, void *data)
+static int set_registers(pegasus_t *pegasus, __u16 indx, __u16 size,
+ const void *data)
{
+ u8 *buf;
int ret;
+ buf = kmemdup(data, size, GFP_NOIO);
+ if (!buf)
+ return -ENOMEM;
+
ret = usb_control_msg(pegasus->usb, usb_sndctrlpipe(pegasus->usb, 0),
PEGASUS_REQ_SET_REGS, PEGASUS_REQT_WRITE, 0,
- indx, data, size, 100);
+ indx, buf, size, 100);
if (ret < 0)
netif_dbg(pegasus, drv, pegasus->net,
"%s returned %d\n", __func__, ret);
+ kfree(buf);
return ret;
}
static int set_register(pegasus_t *pegasus, __u16 indx, __u8 data)
{
+ u8 *buf;
int ret;
+ buf = kmemdup(&data, 1, GFP_NOIO);
+ if (!buf)
+ return -ENOMEM;
+
ret = usb_control_msg(pegasus->usb, usb_sndctrlpipe(pegasus->usb, 0),
PEGASUS_REQ_SET_REG, PEGASUS_REQT_WRITE, data,
- indx, &data, 1, 1000);
+ indx, buf, 1, 1000);
if (ret < 0)
netif_dbg(pegasus, drv, pegasus->net,
"%s returned %d\n", __func__, ret);
+ kfree(buf);
return ret;
}
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 09052f9..958af3b 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -707,6 +707,7 @@
{QMI_FIXED_INTF(0x19d2, 0x1428, 2)}, /* Telewell TW-LTE 4G v2 */
{QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */
{QMI_FIXED_INTF(0x2001, 0x7e19, 4)}, /* D-Link DWM-221 B1 */
+ {QMI_FIXED_INTF(0x2001, 0x7e35, 4)}, /* D-Link DWM-222 */
{QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */
{QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */
{QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */
@@ -730,6 +731,8 @@
{QMI_FIXED_INTF(0x1199, 0x9071, 10)}, /* Sierra Wireless MC74xx */
{QMI_FIXED_INTF(0x1199, 0x9079, 8)}, /* Sierra Wireless EM74xx */
{QMI_FIXED_INTF(0x1199, 0x9079, 10)}, /* Sierra Wireless EM74xx */
+ {QMI_FIXED_INTF(0x1199, 0x907b, 8)}, /* Sierra Wireless EM74xx */
+ {QMI_FIXED_INTF(0x1199, 0x907b, 10)}, /* Sierra Wireless EM74xx */
{QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */
{QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */
{QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */
@@ -754,6 +757,7 @@
{QMI_FIXED_INTF(0x413c, 0x81b1, 8)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card */
{QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */
{QMI_FIXED_INTF(0x22de, 0x9061, 3)}, /* WeTelecom WPD-600N */
+ {QMI_FIXED_INTF(0x1e0e, 0x9001, 5)}, /* SIMCom 7230E */
/* 4. Gobi 1000 devices */
{QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index fbb1867..1c27e6f 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -1851,6 +1851,9 @@
napi_complete(napi);
if (!list_empty(&tp->rx_done))
napi_schedule(napi);
+ else if (!skb_queue_empty(&tp->tx_queue) &&
+ !list_empty(&tp->tx_free))
+ napi_schedule(napi);
}
return work_done;
@@ -2990,10 +2993,13 @@
if (!netif_carrier_ok(netdev)) {
tp->rtl_ops.enable(tp);
set_bit(RTL8152_SET_RX_MODE, &tp->flags);
+ netif_stop_queue(netdev);
napi_disable(&tp->napi);
netif_carrier_on(netdev);
rtl_start_rx(tp);
napi_enable(&tp->napi);
+ netif_wake_queue(netdev);
+ netif_info(tp, link, netdev, "carrier on\n");
}
} else {
if (netif_carrier_ok(netdev)) {
@@ -3001,6 +3007,7 @@
napi_disable(&tp->napi);
tp->rtl_ops.disable(tp);
napi_enable(&tp->napi);
+ netif_info(tp, link, netdev, "carrier off\n");
}
}
}
@@ -3385,12 +3392,12 @@
if (!netif_running(netdev))
return 0;
+ netif_stop_queue(netdev);
napi_disable(&tp->napi);
clear_bit(WORK_ENABLE, &tp->flags);
usb_kill_urb(tp->intr_urb);
cancel_delayed_work_sync(&tp->schedule);
if (netif_carrier_ok(netdev)) {
- netif_stop_queue(netdev);
mutex_lock(&tp->control);
tp->rtl_ops.disable(tp);
mutex_unlock(&tp->control);
@@ -3415,12 +3422,14 @@
if (netif_carrier_ok(netdev)) {
mutex_lock(&tp->control);
tp->rtl_ops.enable(tp);
+ rtl_start_rx(tp);
rtl8152_set_rx_mode(netdev);
mutex_unlock(&tp->control);
- netif_wake_queue(netdev);
}
napi_enable(&tp->napi);
+ netif_wake_queue(netdev);
+ usb_submit_urb(tp->intr_urb, GFP_KERNEL);
return 0;
}
diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c
index d37b7dc..3967298 100644
--- a/drivers/net/usb/rtl8150.c
+++ b/drivers/net/usb/rtl8150.c
@@ -155,16 +155,36 @@
*/
static int get_registers(rtl8150_t * dev, u16 indx, u16 size, void *data)
{
- return usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
- RTL8150_REQ_GET_REGS, RTL8150_REQT_READ,
- indx, 0, data, size, 500);
+ void *buf;
+ int ret;
+
+ buf = kmalloc(size, GFP_NOIO);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
+ RTL8150_REQ_GET_REGS, RTL8150_REQT_READ,
+ indx, 0, buf, size, 500);
+ if (ret > 0 && ret <= size)
+ memcpy(data, buf, ret);
+ kfree(buf);
+ return ret;
}
-static int set_registers(rtl8150_t * dev, u16 indx, u16 size, void *data)
+static int set_registers(rtl8150_t * dev, u16 indx, u16 size, const void *data)
{
- return usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
- RTL8150_REQ_SET_REGS, RTL8150_REQT_WRITE,
- indx, 0, data, size, 500);
+ void *buf;
+ int ret;
+
+ buf = kmemdup(data, size, GFP_NOIO);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
+ RTL8150_REQ_SET_REGS, RTL8150_REQT_WRITE,
+ indx, 0, buf, size, 500);
+ kfree(buf);
+ return ret;
}
static void async_set_reg_cb(struct urb *urb)
diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c
index a2515887..0b5a84c 100644
--- a/drivers/net/usb/sierra_net.c
+++ b/drivers/net/usb/sierra_net.c
@@ -73,8 +73,6 @@
/* Private data structure */
struct sierra_net_data {
- u8 ethr_hdr_tmpl[ETH_HLEN]; /* ethernet header template for rx'd pkts */
-
u16 link_up; /* air link up or down */
u8 tx_hdr_template[4]; /* part of HIP hdr for tx'd packets */
@@ -122,6 +120,7 @@
/* LSI Protocol types */
#define SIERRA_NET_PROTOCOL_UMTS 0x01
+#define SIERRA_NET_PROTOCOL_UMTS_DS 0x04
/* LSI Coverage */
#define SIERRA_NET_COVERAGE_NONE 0x00
#define SIERRA_NET_COVERAGE_NOPACKET 0x01
@@ -129,7 +128,8 @@
/* LSI Session */
#define SIERRA_NET_SESSION_IDLE 0x00
/* LSI Link types */
-#define SIERRA_NET_AS_LINK_TYPE_IPv4 0x00
+#define SIERRA_NET_AS_LINK_TYPE_IPV4 0x00
+#define SIERRA_NET_AS_LINK_TYPE_IPV6 0x02
struct lsi_umts {
u8 protocol;
@@ -137,9 +137,14 @@
__be16 length;
/* eventually use a union for the rest - assume umts for now */
u8 coverage;
- u8 unused2[41];
+ u8 network_len; /* network name len */
+ u8 network[40]; /* network name (UCS2, bigendian) */
u8 session_state;
u8 unused3[33];
+} __packed;
+
+struct lsi_umts_single {
+ struct lsi_umts lsi;
u8 link_type;
u8 pdp_addr_len; /* NW-supplied PDP address len */
u8 pdp_addr[16]; /* NW-supplied PDP address (bigendian)) */
@@ -158,10 +163,31 @@
u8 reserved[8];
} __packed;
+struct lsi_umts_dual {
+ struct lsi_umts lsi;
+ u8 pdp_addr4_len; /* NW-supplied PDP IPv4 address len */
+ u8 pdp_addr4[4]; /* NW-supplied PDP IPv4 address (bigendian)) */
+ u8 pdp_addr6_len; /* NW-supplied PDP IPv6 address len */
+ u8 pdp_addr6[16]; /* NW-supplied PDP IPv6 address (bigendian)) */
+ u8 unused4[23];
+ u8 dns1_addr4_len; /* NW-supplied 1st DNS v4 address len (bigendian) */
+ u8 dns1_addr4[4]; /* NW-supplied 1st DNS v4 address */
+ u8 dns1_addr6_len; /* NW-supplied 1st DNS v6 address len */
+ u8 dns1_addr6[16]; /* NW-supplied 1st DNS v6 address (bigendian)*/
+ u8 dns2_addr4_len; /* NW-supplied 2nd DNS v4 address len (bigendian) */
+ u8 dns2_addr4[4]; /* NW-supplied 2nd DNS v4 address */
+ u8 dns2_addr6_len; /* NW-supplied 2nd DNS v6 address len */
+ u8 dns2_addr6[16]; /* NW-supplied 2nd DNS v6 address (bigendian)*/
+ u8 unused5[68];
+} __packed;
+
#define SIERRA_NET_LSI_COMMON_LEN 4
-#define SIERRA_NET_LSI_UMTS_LEN (sizeof(struct lsi_umts))
+#define SIERRA_NET_LSI_UMTS_LEN (sizeof(struct lsi_umts_single))
#define SIERRA_NET_LSI_UMTS_STATUS_LEN \
(SIERRA_NET_LSI_UMTS_LEN - SIERRA_NET_LSI_COMMON_LEN)
+#define SIERRA_NET_LSI_UMTS_DS_LEN (sizeof(struct lsi_umts_dual))
+#define SIERRA_NET_LSI_UMTS_DS_STATUS_LEN \
+ (SIERRA_NET_LSI_UMTS_DS_LEN - SIERRA_NET_LSI_COMMON_LEN)
/* Forward definitions */
static void sierra_sync_timer(unsigned long syncdata);
@@ -191,10 +217,11 @@
dev->data[0] = (unsigned long)priv;
}
-/* is packet IPv4 */
+/* is packet IPv4/IPv6 */
static inline int is_ip(struct sk_buff *skb)
{
- return skb->protocol == cpu_to_be16(ETH_P_IP);
+ return skb->protocol == cpu_to_be16(ETH_P_IP) ||
+ skb->protocol == cpu_to_be16(ETH_P_IPV6);
}
/*
@@ -350,46 +377,51 @@
static int sierra_net_parse_lsi(struct usbnet *dev, char *data, int datalen)
{
struct lsi_umts *lsi = (struct lsi_umts *)data;
+ u32 expected_length;
- if (datalen < sizeof(struct lsi_umts)) {
- netdev_err(dev->net, "%s: Data length %d, exp %Zu\n",
- __func__, datalen,
- sizeof(struct lsi_umts));
+ if (datalen < sizeof(struct lsi_umts_single)) {
+ netdev_err(dev->net, "%s: Data length %d, exp >= %Zu\n",
+ __func__, datalen, sizeof(struct lsi_umts_single));
return -1;
}
- if (lsi->length != cpu_to_be16(SIERRA_NET_LSI_UMTS_STATUS_LEN)) {
- netdev_err(dev->net, "%s: LSI_UMTS_STATUS_LEN %d, exp %u\n",
- __func__, be16_to_cpu(lsi->length),
- (u32)SIERRA_NET_LSI_UMTS_STATUS_LEN);
- return -1;
- }
-
- /* Validate the protocol - only support UMTS for now */
- if (lsi->protocol != SIERRA_NET_PROTOCOL_UMTS) {
- netdev_err(dev->net, "Protocol unsupported, 0x%02x\n",
- lsi->protocol);
- return -1;
- }
-
- /* Validate the link type */
- if (lsi->link_type != SIERRA_NET_AS_LINK_TYPE_IPv4) {
- netdev_err(dev->net, "Link type unsupported: 0x%02x\n",
- lsi->link_type);
- return -1;
- }
-
- /* Validate the coverage */
- if (lsi->coverage == SIERRA_NET_COVERAGE_NONE
- || lsi->coverage == SIERRA_NET_COVERAGE_NOPACKET) {
- netdev_err(dev->net, "No coverage, 0x%02x\n", lsi->coverage);
- return 0;
- }
-
/* Validate the session state */
if (lsi->session_state == SIERRA_NET_SESSION_IDLE) {
netdev_err(dev->net, "Session idle, 0x%02x\n",
- lsi->session_state);
+ lsi->session_state);
+ return 0;
+ }
+
+ /* Validate the protocol - only support UMTS for now */
+ if (lsi->protocol == SIERRA_NET_PROTOCOL_UMTS) {
+ struct lsi_umts_single *single = (struct lsi_umts_single *)lsi;
+
+ /* Validate the link type */
+ if (single->link_type != SIERRA_NET_AS_LINK_TYPE_IPV4 &&
+ single->link_type != SIERRA_NET_AS_LINK_TYPE_IPV6) {
+ netdev_err(dev->net, "Link type unsupported: 0x%02x\n",
+ single->link_type);
+ return -1;
+ }
+ expected_length = SIERRA_NET_LSI_UMTS_STATUS_LEN;
+ } else if (lsi->protocol == SIERRA_NET_PROTOCOL_UMTS_DS) {
+ expected_length = SIERRA_NET_LSI_UMTS_DS_STATUS_LEN;
+ } else {
+ netdev_err(dev->net, "Protocol unsupported, 0x%02x\n",
+ lsi->protocol);
+ return -1;
+ }
+
+ if (be16_to_cpu(lsi->length) != expected_length) {
+ netdev_err(dev->net, "%s: LSI_UMTS_STATUS_LEN %d, exp %u\n",
+ __func__, be16_to_cpu(lsi->length), expected_length);
+ return -1;
+ }
+
+ /* Validate the coverage */
+ if (lsi->coverage == SIERRA_NET_COVERAGE_NONE ||
+ lsi->coverage == SIERRA_NET_COVERAGE_NOPACKET) {
+ netdev_err(dev->net, "No coverage, 0x%02x\n", lsi->coverage);
return 0;
}
@@ -662,7 +694,6 @@
u8 numendpoints;
u16 fwattr = 0;
int status;
- struct ethhdr *eth;
struct sierra_net_data *priv;
static const u8 sync_tmplate[sizeof(priv->sync_msg)] = {
0x00, 0x00, SIERRA_NET_HIP_MSYNC_ID, 0x00};
@@ -700,11 +731,6 @@
dev->net->dev_addr[ETH_ALEN-2] = atomic_inc_return(&iface_counter);
dev->net->dev_addr[ETH_ALEN-1] = ifacenum;
- /* we will have to manufacture ethernet headers, prepare template */
- eth = (struct ethhdr *)priv->ethr_hdr_tmpl;
- memcpy(ð->h_dest, dev->net->dev_addr, ETH_ALEN);
- eth->h_proto = cpu_to_be16(ETH_P_IP);
-
/* prepare shutdown message template */
memcpy(priv->shdwn_msg, shdwn_tmplate, sizeof(priv->shdwn_msg));
/* set context index initially to 0 - prepares tx hdr template */
@@ -833,9 +859,14 @@
skb_pull(skb, hh.hdrlen);
- /* We are going to accept this packet, prepare it */
- memcpy(skb->data, sierra_net_get_private(dev)->ethr_hdr_tmpl,
- ETH_HLEN);
+ /* We are going to accept this packet, prepare it.
+ * In case protocol is IPv6, keep it, otherwise force IPv4.
+ */
+ skb_reset_mac_header(skb);
+ if (eth_hdr(skb)->h_proto != cpu_to_be16(ETH_P_IPV6))
+ eth_hdr(skb)->h_proto = cpu_to_be16(ETH_P_IP);
+ eth_zero_addr(eth_hdr(skb)->h_source);
+ memcpy(eth_hdr(skb)->h_dest, dev->net->dev_addr, ETH_ALEN);
/* Last packet in batch handled by usbnet */
if (hh.payload_len.word == skb->len)
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 0e2a19e..8dfc752 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -47,8 +47,16 @@
*/
DECLARE_EWMA(pkt_len, 1, 64)
+/* With mergeable buffers we align buffer address and use the low bits to
+ * encode its true size. Buffer size is up to 1 page so we need to align to
+ * square root of page size to ensure we reserve enough bits to encode the true
+ * size.
+ */
+#define MERGEABLE_BUFFER_MIN_ALIGN_SHIFT ((PAGE_SHIFT + 1) / 2)
+
/* Minimum alignment for mergeable packet buffers. */
-#define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, 256)
+#define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, \
+ 1 << MERGEABLE_BUFFER_MIN_ALIGN_SHIFT)
#define VIRTNET_DRIVER_VERSION "1.0.0"
@@ -1415,6 +1423,7 @@
#ifdef CONFIG_NET_RX_BUSY_POLL
.ndo_busy_poll = virtnet_busy_poll,
#endif
+ .ndo_features_check = passthru_features_check,
};
static void virtnet_config_changed_work(struct work_struct *work)
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 349aecb..ac945f8 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -733,15 +733,15 @@
static void vrf_dev_uninit(struct net_device *dev)
{
struct net_vrf *vrf = netdev_priv(dev);
- struct slave_queue *queue = &vrf->queue;
- struct list_head *head = &queue->all_slaves;
- struct slave *slave, *next;
+// struct slave_queue *queue = &vrf->queue;
+// struct list_head *head = &queue->all_slaves;
+// struct slave *slave, *next;
vrf_rtable_destroy(vrf);
vrf_rt6_destroy(vrf);
- list_for_each_entry_safe(slave, next, head, list)
- vrf_del_slave(dev, slave->dev);
+// list_for_each_entry_safe(slave, next, head, list)
+// vrf_del_slave(dev, slave->dev);
free_percpu(dev->dstats);
dev->dstats = NULL;
@@ -914,6 +914,14 @@
static void vrf_dellink(struct net_device *dev, struct list_head *head)
{
+ struct net_vrf *vrf = netdev_priv(dev);
+ struct slave_queue *queue = &vrf->queue;
+ struct list_head *all_slaves = &queue->all_slaves;
+ struct slave *slave, *next;
+
+ list_for_each_entry_safe(slave, next, all_slaves, list)
+ vrf_del_slave(dev, slave->dev);
+
unregister_netdevice_queue(dev, head);
}
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 590750a..dab3bf6 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -77,6 +77,8 @@
static int vxlan_sock_add(struct vxlan_dev *vxlan);
+static void vxlan_vs_del_dev(struct vxlan_dev *vxlan);
+
/* per-network namespace private data for this module */
struct vxlan_net {
struct list_head vxlan_list;
@@ -1052,6 +1054,8 @@
static void vxlan_sock_release(struct vxlan_dev *vxlan)
{
+ vxlan_vs_del_dev(vxlan);
+
__vxlan_sock_release(vxlan->vn4_sock);
#if IS_ENABLED(CONFIG_IPV6)
__vxlan_sock_release(vxlan->vn6_sock);
@@ -2236,7 +2240,7 @@
= container_of(p, struct vxlan_fdb, hlist);
unsigned long timeout;
- if (f->state & NUD_PERMANENT)
+ if (f->state & (NUD_PERMANENT | NUD_NOARP))
continue;
timeout = f->used + vxlan->cfg.age_interval * HZ;
@@ -2255,6 +2259,15 @@
mod_timer(&vxlan->age_timer, next_timer);
}
+static void vxlan_vs_del_dev(struct vxlan_dev *vxlan)
+{
+ struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
+
+ spin_lock(&vn->sock_lock);
+ hlist_del_init_rcu(&vxlan->hlist);
+ spin_unlock(&vn->sock_lock);
+}
+
static void vxlan_vs_add_dev(struct vxlan_sock *vs, struct vxlan_dev *vxlan)
{
struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
@@ -3028,12 +3041,6 @@
static void vxlan_dellink(struct net_device *dev, struct list_head *head)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
- struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
-
- spin_lock(&vn->sock_lock);
- if (!hlist_unhashed(&vxlan->hlist))
- hlist_del_rcu(&vxlan->hlist);
- spin_unlock(&vn->sock_lock);
gro_cells_destroy(&vxlan->gro_cells);
list_del(&vxlan->next);
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index 9acaffa..8565e06 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -1885,6 +1885,12 @@
goto err_wmi_detach;
}
+ /* If firmware indicates Full Rx Reorder support it must be used in a
+ * slightly different manner. Let HTT code know.
+ */
+ ar->htt.rx_ring.in_ord_rx = !!(test_bit(WMI_SERVICE_RX_FULL_REORDER,
+ ar->wmi.svc_map));
+
status = ath10k_htt_rx_alloc(&ar->htt);
if (status) {
ath10k_err(ar, "failed to alloc htt rx: %d\n", status);
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index 9e607b2..a9fc19f 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -3271,6 +3271,9 @@
goto err_free_pipes;
}
+ if (QCA_REV_6174(ar) || QCA_REV_9377(ar))
+ ath10k_pci_override_ce_config(ar);
+
ret = ath10k_pci_force_wake(ar);
if (ret) {
ath10k_warn(ar, "failed to wake up device : %d\n", ret);
diff --git a/drivers/net/wireless/ath/ath10k/wmi-ops.h b/drivers/net/wireless/ath/ath10k/wmi-ops.h
index 1298592..8a21e77 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-ops.h
+++ b/drivers/net/wireless/ath/ath10k/wmi-ops.h
@@ -669,6 +669,9 @@
struct sk_buff *skb;
u32 cmd_id;
+ if (!ar->wmi.ops->gen_vdev_spectral_conf)
+ return -EOPNOTSUPP;
+
skb = ar->wmi.ops->gen_vdev_spectral_conf(ar, arg);
if (IS_ERR(skb))
return PTR_ERR(skb);
@@ -684,6 +687,9 @@
struct sk_buff *skb;
u32 cmd_id;
+ if (!ar->wmi.ops->gen_vdev_spectral_enable)
+ return -EOPNOTSUPP;
+
skb = ar->wmi.ops->gen_vdev_spectral_enable(ar, vdev_id, trigger,
enable);
if (IS_ERR(skb))
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
index 201425e..fbc8c9a 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
@@ -1815,8 +1815,6 @@
static void ar9003_hw_tx99_start(struct ath_hw *ah, u32 qnum)
{
REG_SET_BIT(ah, AR_PHY_TEST, PHY_AGC_CLR);
- REG_SET_BIT(ah, 0x9864, 0x7f000);
- REG_SET_BIT(ah, 0x9924, 0x7f00fe);
REG_CLR_BIT(ah, AR_DIAG_SW, AR_DIAG_RX_DIS);
REG_WRITE(ah, AR_CR, AR_CR_RXD);
REG_WRITE(ah, AR_DLCL_IFS(qnum), 0);
diff --git a/drivers/net/wireless/ath/ath9k/common-spectral.c b/drivers/net/wireless/ath/ath9k/common-spectral.c
index a876271..0394573 100644
--- a/drivers/net/wireless/ath/ath9k/common-spectral.c
+++ b/drivers/net/wireless/ath/ath9k/common-spectral.c
@@ -528,6 +528,9 @@
if (!(radar_info->pulse_bw_info & SPECTRAL_SCAN_BITMASK))
return 0;
+ if (!spec_priv->rfs_chan_spec_scan)
+ return 1;
+
/* Output buffers are full, no need to process anything
* since there is no space to put the result anyway
*/
@@ -1072,7 +1075,7 @@
void ath9k_cmn_spectral_deinit_debug(struct ath_spec_scan_priv *spec_priv)
{
- if (config_enabled(CONFIG_ATH9K_DEBUGFS)) {
+ if (config_enabled(CONFIG_ATH9K_DEBUGFS) && spec_priv->rfs_chan_spec_scan) {
relay_close(spec_priv->rfs_chan_spec_scan);
spec_priv->rfs_chan_spec_scan = NULL;
}
@@ -1086,6 +1089,9 @@
debugfs_phy,
1024, 256, &rfs_spec_scan_cb,
NULL);
+ if (!spec_priv->rfs_chan_spec_scan)
+ return;
+
debugfs_create_file("spectral_scan_ctl",
S_IRUSR | S_IWUSR,
debugfs_phy, spec_priv,
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
index 165dd20..c92564b 100644
--- a/drivers/net/wireless/ath/ath9k/hif_usb.c
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
@@ -37,6 +37,7 @@
{ USB_DEVICE(0x0cf3, 0xb002) }, /* Ubiquiti WifiStation */
{ USB_DEVICE(0x057c, 0x8403) }, /* AVM FRITZ!WLAN 11N v2 USB */
{ USB_DEVICE(0x0471, 0x209e) }, /* Philips (or NXP) PTA01 */
+ { USB_DEVICE(0x1eda, 0x2315) }, /* AirTies */
{ USB_DEVICE(0x0cf3, 0x7015),
.driver_info = AR9287_USB }, /* Atheros */
@@ -1216,6 +1217,9 @@
u8 bulk_out_ep;
int r;
+ if (iface_desc->desc.bNumEndpoints < 2)
+ return -ENODEV;
+
/* Find bulk out endpoint */
for (r = 1; r >= 0; r--) {
endpoint = &iface_desc->endpoint[r].desc;
diff --git a/drivers/net/wireless/ath/ath9k/tx99.c b/drivers/net/wireless/ath/ath9k/tx99.c
index ac4781f..b4e6304 100644
--- a/drivers/net/wireless/ath/ath9k/tx99.c
+++ b/drivers/net/wireless/ath/ath9k/tx99.c
@@ -190,22 +190,27 @@
if (strtobool(buf, &start))
return -EINVAL;
+ mutex_lock(&sc->mutex);
+
if (start == sc->tx99_state) {
if (!start)
- return count;
+ goto out;
ath_dbg(common, XMIT, "Resetting TX99\n");
ath9k_tx99_deinit(sc);
}
if (!start) {
ath9k_tx99_deinit(sc);
- return count;
+ goto out;
}
r = ath9k_tx99_init(sc);
- if (r)
+ if (r) {
+ mutex_unlock(&sc->mutex);
return r;
-
+ }
+out:
+ mutex_unlock(&sc->mutex);
return count;
}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/core.c b/drivers/net/wireless/brcm80211/brcmfmac/core.c
index b5ab98e..82753e7 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/core.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/core.c
@@ -211,7 +211,7 @@
int ret;
struct brcmf_if *ifp = netdev_priv(ndev);
struct brcmf_pub *drvr = ifp->drvr;
- struct ethhdr *eh = (struct ethhdr *)(skb->data);
+ struct ethhdr *eh;
brcmf_dbg(DATA, "Enter, idx=%d\n", ifp->bssidx);
@@ -232,22 +232,13 @@
goto done;
}
- /* Make sure there's enough room for any header */
- if (skb_headroom(skb) < drvr->hdrlen) {
- struct sk_buff *skb2;
-
- brcmf_dbg(INFO, "%s: insufficient headroom\n",
+ /* Make sure there's enough writable headroom*/
+ ret = skb_cow_head(skb, drvr->hdrlen);
+ if (ret < 0) {
+ brcmf_err("%s: skb_cow_head failed\n",
brcmf_ifname(drvr, ifp->bssidx));
- drvr->bus_if->tx_realloc++;
- skb2 = skb_realloc_headroom(skb, drvr->hdrlen);
dev_kfree_skb(skb);
- skb = skb2;
- if (skb == NULL) {
- brcmf_err("%s: skb_realloc_headroom failed\n",
- brcmf_ifname(drvr, ifp->bssidx));
- ret = -ENOMEM;
- goto done;
- }
+ goto done;
}
/* validate length for ether packet */
@@ -257,6 +248,8 @@
goto done;
}
+ eh = (struct ethhdr *)(skb->data);
+
if (eh->h_proto == htons(ETH_P_PAE))
atomic_inc(&ifp->pend_8021x_cnt);
diff --git a/drivers/net/wireless/cw1200/sta.c b/drivers/net/wireless/cw1200/sta.c
index 982a625..74eba16 100644
--- a/drivers/net/wireless/cw1200/sta.c
+++ b/drivers/net/wireless/cw1200/sta.c
@@ -2135,9 +2135,7 @@
int cw1200_ampdu_action(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
- enum ieee80211_ampdu_mlme_action action,
- struct ieee80211_sta *sta, u16 tid, u16 *ssn,
- u8 buf_size, bool amsdu)
+ struct ieee80211_ampdu_params *params)
{
/* Aggregation is implemented fully in firmware,
* including block ack negotiation. Do not allow
diff --git a/drivers/net/wireless/hostap/hostap_hw.c b/drivers/net/wireless/hostap/hostap_hw.c
index 6df3ee5..515aa3f 100644
--- a/drivers/net/wireless/hostap/hostap_hw.c
+++ b/drivers/net/wireless/hostap/hostap_hw.c
@@ -836,25 +836,30 @@
spin_lock_bh(&local->baplock);
res = hfa384x_setup_bap(dev, BAP0, rid, 0);
- if (!res)
- res = hfa384x_from_bap(dev, BAP0, &rec, sizeof(rec));
+ if (res)
+ goto unlock;
+
+ res = hfa384x_from_bap(dev, BAP0, &rec, sizeof(rec));
+ if (res)
+ goto unlock;
if (le16_to_cpu(rec.len) == 0) {
/* RID not available */
res = -ENODATA;
+ goto unlock;
}
rlen = (le16_to_cpu(rec.len) - 1) * 2;
- if (!res && exact_len && rlen != len) {
+ if (exact_len && rlen != len) {
printk(KERN_DEBUG "%s: hfa384x_get_rid - RID len mismatch: "
"rid=0x%04x, len=%d (expected %d)\n",
dev->name, rid, rlen, len);
res = -ENODATA;
}
- if (!res)
- res = hfa384x_from_bap(dev, BAP0, buf, len);
+ res = hfa384x_from_bap(dev, BAP0, buf, len);
+unlock:
spin_unlock_bh(&local->baplock);
mutex_unlock(&local->rid_bap_mtx);
diff --git a/drivers/net/wireless/iwlegacy/4965-mac.c b/drivers/net/wireless/iwlegacy/4965-mac.c
index 201ce5a..7f82a81 100644
--- a/drivers/net/wireless/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/iwlegacy/4965-mac.c
@@ -5982,12 +5982,14 @@
int
il4965_mac_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
- enum ieee80211_ampdu_mlme_action action,
- struct ieee80211_sta *sta, u16 tid, u16 * ssn,
- u8 buf_size, bool amsdu)
+ struct ieee80211_ampdu_params *params)
{
struct il_priv *il = hw->priv;
int ret = -EINVAL;
+ struct ieee80211_sta *sta = params->sta;
+ enum ieee80211_ampdu_mlme_action action = params->action;
+ u16 tid = params->tid;
+ u16 *ssn = ¶ms->ssn;
D_HT("A-MPDU action on addr %pM tid %d\n", sta->addr, tid);
diff --git a/drivers/net/wireless/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
index 57c9a2af..2dda8e7 100644
--- a/drivers/net/wireless/iwlwifi/dvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
@@ -729,12 +729,15 @@
static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
- enum ieee80211_ampdu_mlme_action action,
- struct ieee80211_sta *sta, u16 tid, u16 *ssn,
- u8 buf_size, bool amsdu)
+ struct ieee80211_ampdu_params *params)
{
struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
int ret = -EINVAL;
+ struct ieee80211_sta *sta = params->sta;
+ enum ieee80211_ampdu_mlme_action action = params->action;
+ u16 tid = params->tid;
+ u16 *ssn = ¶ms->ssn;
+ u8 buf_size = params->buf_size;
struct iwl_station_priv *sta_priv = (void *) sta->drv_priv;
IWL_DEBUG_HT(priv, "A-MPDU action on addr %pM tid %d\n",
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
index f895886..8eda638 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
@@ -826,13 +826,16 @@
static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
- enum ieee80211_ampdu_mlme_action action,
- struct ieee80211_sta *sta, u16 tid,
- u16 *ssn, u8 buf_size, bool amsdu)
+ struct ieee80211_ampdu_params *params)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
int ret;
bool tx_agg_ref = false;
+ struct ieee80211_sta *sta = params->sta;
+ enum ieee80211_ampdu_mlme_action action = params->action;
+ u16 tid = params->tid;
+ u16 *ssn = ¶ms->ssn;
+ u8 buf_size = params->buf_size;
IWL_DEBUG_HT(mvm, "A-MPDU action on addr %pM tid %d: action %d\n",
sta->addr, tid, action);
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index c5d72a0..0ad8984 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -2539,7 +2539,7 @@
tasklet_hrtimer_init(&data->beacon_timer,
mac80211_hwsim_beacon,
- CLOCK_MONOTONIC_RAW, HRTIMER_MODE_ABS);
+ CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
spin_lock_bh(&hwsim_radio_lock);
list_add_tail(&data->list, &hwsim_radios);
diff --git a/drivers/net/wireless/mwifiex/11n_aggr.c b/drivers/net/wireless/mwifiex/11n_aggr.c
index aa498e0..49f3e17 100644
--- a/drivers/net/wireless/mwifiex/11n_aggr.c
+++ b/drivers/net/wireless/mwifiex/11n_aggr.c
@@ -101,13 +101,6 @@
{
struct txpd *local_tx_pd;
struct mwifiex_txinfo *tx_info = MWIFIEX_SKB_TXCB(skb);
- unsigned int pad;
- int headroom = (priv->adapter->iface_type ==
- MWIFIEX_USB) ? 0 : INTF_HEADER_LEN;
-
- pad = ((void *)skb->data - sizeof(*local_tx_pd) -
- headroom - NULL) & (MWIFIEX_DMA_ALIGN_SZ - 1);
- skb_push(skb, pad);
skb_push(skb, sizeof(*local_tx_pd));
@@ -121,12 +114,10 @@
local_tx_pd->bss_num = priv->bss_num;
local_tx_pd->bss_type = priv->bss_type;
/* Always zero as the data is followed by struct txpd */
- local_tx_pd->tx_pkt_offset = cpu_to_le16(sizeof(struct txpd) +
- pad);
+ local_tx_pd->tx_pkt_offset = cpu_to_le16(sizeof(struct txpd));
local_tx_pd->tx_pkt_type = cpu_to_le16(PKT_TYPE_AMSDU);
local_tx_pd->tx_pkt_length = cpu_to_le16(skb->len -
- sizeof(*local_tx_pd) -
- pad);
+ sizeof(*local_tx_pd));
if (tx_info->flags & MWIFIEX_BUF_FLAG_TDLS_PKT)
local_tx_pd->flags |= MWIFIEX_TXPD_FLAGS_TDLS_PACKET;
@@ -190,7 +181,11 @@
ra_list_flags);
return -1;
}
- skb_reserve(skb_aggr, MWIFIEX_MIN_DATA_HEADER_LEN);
+
+ /* skb_aggr->data already 64 byte align, just reserve bus interface
+ * header and txpd.
+ */
+ skb_reserve(skb_aggr, headroom + sizeof(struct txpd));
tx_info_aggr = MWIFIEX_SKB_TXCB(skb_aggr);
memset(tx_info_aggr, 0, sizeof(*tx_info_aggr));
diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c
index 8df23f60..02ef32a 100644
--- a/drivers/net/wireless/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/mwifiex/cfg80211.c
@@ -3740,7 +3740,7 @@
if (adapter->config_bands & BAND_A)
n_channels_a = mwifiex_band_5ghz.n_channels;
- adapter->num_in_chan_stats = max_t(u32, n_channels_bg, n_channels_a);
+ adapter->num_in_chan_stats = n_channels_bg + n_channels_a;
adapter->chan_stats = vmalloc(sizeof(*adapter->chan_stats) *
adapter->num_in_chan_stats);
diff --git a/drivers/net/wireless/mwifiex/debugfs.c b/drivers/net/wireless/mwifiex/debugfs.c
index 9824d8d..45d97b6 100644
--- a/drivers/net/wireless/mwifiex/debugfs.c
+++ b/drivers/net/wireless/mwifiex/debugfs.c
@@ -115,7 +115,8 @@
if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) {
p += sprintf(p, "multicast_count=\"%d\"\n",
netdev_mc_count(netdev));
- p += sprintf(p, "essid=\"%s\"\n", info.ssid.ssid);
+ p += sprintf(p, "essid=\"%.*s\"\n", info.ssid.ssid_len,
+ info.ssid.ssid);
p += sprintf(p, "bssid=\"%pM\"\n", info.bssid);
p += sprintf(p, "channel=\"%d\"\n", (int) info.bss_chan);
p += sprintf(p, "country_code = \"%s\"\n", info.country_code);
diff --git a/drivers/net/wireless/mwifiex/pcie.c b/drivers/net/wireless/mwifiex/pcie.c
index 21192b6..268e50b 100644
--- a/drivers/net/wireless/mwifiex/pcie.c
+++ b/drivers/net/wireless/mwifiex/pcie.c
@@ -947,6 +947,7 @@
if (card && card->cmd_buf) {
mwifiex_unmap_pci_memory(adapter, card->cmd_buf,
PCI_DMA_TODEVICE);
+ dev_kfree_skb_any(card->cmd_buf);
}
return 0;
}
@@ -1513,6 +1514,11 @@
return -1;
card->cmd_buf = skb;
+ /*
+ * Need to keep a reference, since core driver might free up this
+ * buffer before we've unmapped it.
+ */
+ skb_get(skb);
/* To send a command, the driver will:
1. Write the 64bit physical address of the data buffer to
@@ -1610,6 +1616,7 @@
if (card->cmd_buf) {
mwifiex_unmap_pci_memory(adapter, card->cmd_buf,
PCI_DMA_TODEVICE);
+ dev_kfree_skb_any(card->cmd_buf);
card->cmd_buf = NULL;
}
diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c
index 682bcd5..7822fdb 100644
--- a/drivers/net/wireless/mwifiex/scan.c
+++ b/drivers/net/wireless/mwifiex/scan.c
@@ -2170,6 +2170,12 @@
sizeof(struct mwifiex_chan_stats);
for (i = 0 ; i < num_chan; i++) {
+ if (adapter->survey_idx >= adapter->num_in_chan_stats) {
+ mwifiex_dbg(adapter, WARN,
+ "FW reported too many channel results (max %d)\n",
+ adapter->num_in_chan_stats);
+ return;
+ }
chan_stats.chan_num = fw_chan_stats->chan_num;
chan_stats.bandcfg = fw_chan_stats->bandcfg;
chan_stats.flags = fw_chan_stats->flags;
diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c
index d6c4f0f..6cfa296 100644
--- a/drivers/net/wireless/mwifiex/sta_ioctl.c
+++ b/drivers/net/wireless/mwifiex/sta_ioctl.c
@@ -1098,8 +1098,6 @@
encrypt_key.is_rx_seq_valid = true;
}
} else {
- if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_UAP)
- return 0;
encrypt_key.key_disable = true;
if (mac_addr)
memcpy(encrypt_key.mac_addr, mac_addr, ETH_ALEN);
diff --git a/drivers/net/wireless/p54/fwio.c b/drivers/net/wireless/p54/fwio.c
index 257a9ead..4ac6764 100644
--- a/drivers/net/wireless/p54/fwio.c
+++ b/drivers/net/wireless/p54/fwio.c
@@ -488,7 +488,7 @@
entry += sizeof(__le16);
chan->pa_points_per_curve = 8;
- memset(chan->curve_data, 0, sizeof(*chan->curve_data));
+ memset(chan->curve_data, 0, sizeof(chan->curve_data));
memcpy(chan->curve_data, entry,
sizeof(struct p54_pa_curve_data_sample) *
min((u8)8, curve_data->points_per_channel));
diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c
index a5223037..c48b7e8e 100644
--- a/drivers/net/wireless/realtek/rtlwifi/pci.c
+++ b/drivers/net/wireless/realtek/rtlwifi/pci.c
@@ -2269,7 +2269,7 @@
/* find adapter */
if (!_rtl_pci_find_adapter(pdev, hw)) {
err = -ENODEV;
- goto fail3;
+ goto fail2;
}
/* Init IO handler */
@@ -2339,10 +2339,10 @@
pci_set_drvdata(pdev, NULL);
rtl_deinit_core(hw);
+fail2:
if (rtlpriv->io.pci_mem_start != 0)
pci_iounmap(pdev, (void __iomem *)rtlpriv->io.pci_mem_start);
-fail2:
pci_release_regions(pdev);
complete(&rtlpriv->firmware_loading_complete);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
index 9b4d8a6..4b35491 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c
@@ -359,6 +359,107 @@
return rtl8821ae_phy_rf6052_config(hw);
}
+static void _rtl8812ae_phy_set_rfe_reg_24g(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ u8 tmp;
+
+ switch (rtlhal->rfe_type) {
+ case 3:
+ rtl_set_bbreg(hw, RA_RFE_PINMUX, BMASKDWORD, 0x54337770);
+ rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, 0x54337770);
+ rtl_set_bbreg(hw, RA_RFE_INV, BMASKRFEINV, 0x010);
+ rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x010);
+ rtl_set_bbreg(hw, 0x900, 0x00000303, 0x1);
+ break;
+ case 4:
+ rtl_set_bbreg(hw, RA_RFE_PINMUX, BMASKDWORD, 0x77777777);
+ rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, 0x77777777);
+ rtl_set_bbreg(hw, RA_RFE_INV, BMASKRFEINV, 0x001);
+ rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x001);
+ break;
+ case 5:
+ rtl_write_byte(rtlpriv, RA_RFE_PINMUX + 2, 0x77);
+ rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, 0x77777777);
+ tmp = rtl_read_byte(rtlpriv, RA_RFE_INV + 3);
+ rtl_write_byte(rtlpriv, RA_RFE_INV + 3, tmp & ~0x1);
+ rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x000);
+ break;
+ case 1:
+ if (rtlpriv->btcoexist.bt_coexistence) {
+ rtl_set_bbreg(hw, RA_RFE_PINMUX, 0xffffff, 0x777777);
+ rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD,
+ 0x77777777);
+ rtl_set_bbreg(hw, RA_RFE_INV, 0x33f00000, 0x000);
+ rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x000);
+ break;
+ }
+ case 0:
+ case 2:
+ default:
+ rtl_set_bbreg(hw, RA_RFE_PINMUX, BMASKDWORD, 0x77777777);
+ rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, 0x77777777);
+ rtl_set_bbreg(hw, RA_RFE_INV, BMASKRFEINV, 0x000);
+ rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x000);
+ break;
+ }
+}
+
+static void _rtl8812ae_phy_set_rfe_reg_5g(struct ieee80211_hw *hw)
+{
+ struct rtl_priv *rtlpriv = rtl_priv(hw);
+ struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ u8 tmp;
+
+ switch (rtlhal->rfe_type) {
+ case 0:
+ rtl_set_bbreg(hw, RA_RFE_PINMUX, BMASKDWORD, 0x77337717);
+ rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, 0x77337717);
+ rtl_set_bbreg(hw, RA_RFE_INV, BMASKRFEINV, 0x010);
+ rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x010);
+ break;
+ case 1:
+ if (rtlpriv->btcoexist.bt_coexistence) {
+ rtl_set_bbreg(hw, RA_RFE_PINMUX, 0xffffff, 0x337717);
+ rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD,
+ 0x77337717);
+ rtl_set_bbreg(hw, RA_RFE_INV, 0x33f00000, 0x000);
+ rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x000);
+ } else {
+ rtl_set_bbreg(hw, RA_RFE_PINMUX, BMASKDWORD,
+ 0x77337717);
+ rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD,
+ 0x77337717);
+ rtl_set_bbreg(hw, RA_RFE_INV, BMASKRFEINV, 0x000);
+ rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x000);
+ }
+ break;
+ case 3:
+ rtl_set_bbreg(hw, RA_RFE_PINMUX, BMASKDWORD, 0x54337717);
+ rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, 0x54337717);
+ rtl_set_bbreg(hw, RA_RFE_INV, BMASKRFEINV, 0x010);
+ rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x010);
+ rtl_set_bbreg(hw, 0x900, 0x00000303, 0x1);
+ break;
+ case 5:
+ rtl_write_byte(rtlpriv, RA_RFE_PINMUX + 2, 0x33);
+ rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, 0x77337777);
+ tmp = rtl_read_byte(rtlpriv, RA_RFE_INV + 3);
+ rtl_write_byte(rtlpriv, RA_RFE_INV + 3, tmp | 0x1);
+ rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x010);
+ break;
+ case 2:
+ case 4:
+ default:
+ rtl_set_bbreg(hw, RA_RFE_PINMUX, BMASKDWORD, 0x77337777);
+ rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD, 0x77337777);
+ rtl_set_bbreg(hw, RA_RFE_INV, BMASKRFEINV, 0x010);
+ rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x010);
+ break;
+ }
+}
+
u32 phy_get_tx_swing_8812A(struct ieee80211_hw *hw, u8 band,
u8 rf_path)
{
@@ -553,14 +654,9 @@
/* 0x82C[1:0] = 2b'00 */
rtl_set_bbreg(hw, 0x82c, 0x3, 0);
}
- if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) {
- rtl_set_bbreg(hw, RA_RFE_PINMUX, BMASKDWORD,
- 0x77777777);
- rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD,
- 0x77777777);
- rtl_set_bbreg(hw, RA_RFE_INV, 0x3ff00000, 0x000);
- rtl_set_bbreg(hw, RB_RFE_INV, 0x3ff00000, 0x000);
- }
+
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE)
+ _rtl8812ae_phy_set_rfe_reg_24g(hw);
rtl_set_bbreg(hw, RTXPATH, 0xf0, 0x1);
rtl_set_bbreg(hw, RCCK_RX, 0x0f000000, 0x1);
@@ -615,14 +711,8 @@
/* 0x82C[1:0] = 2'b00 */
rtl_set_bbreg(hw, 0x82c, 0x3, 1);
- if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE) {
- rtl_set_bbreg(hw, RA_RFE_PINMUX, BMASKDWORD,
- 0x77337777);
- rtl_set_bbreg(hw, RB_RFE_PINMUX, BMASKDWORD,
- 0x77337777);
- rtl_set_bbreg(hw, RA_RFE_INV, 0x3ff00000, 0x010);
- rtl_set_bbreg(hw, RB_RFE_INV, 0x3ff00000, 0x010);
- }
+ if (rtlhal->hw_type == HARDWARE_TYPE_RTL8812AE)
+ _rtl8812ae_phy_set_rfe_reg_5g(hw);
rtl_set_bbreg(hw, RTXPATH, 0xf0, 0);
rtl_set_bbreg(hw, RCCK_RX, 0x0f000000, 0xf);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/reg.h b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/reg.h
index 1d6110f..ed69dbe 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/reg.h
+++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/reg.h
@@ -2424,6 +2424,7 @@
#define BMASKH4BITS 0xf0000000
#define BMASKOFDM_D 0xffc00000
#define BMASKCCK 0x3f3f3f3f
+#define BMASKRFEINV 0x3ff00000
#define BRFREGOFFSETMASK 0xfffff
diff --git a/drivers/net/wireless/ti/wl1251/main.c b/drivers/net/wireless/ti/wl1251/main.c
index 56384a4e..d1f42ce 100644
--- a/drivers/net/wireless/ti/wl1251/main.c
+++ b/drivers/net/wireless/ti/wl1251/main.c
@@ -1567,6 +1567,7 @@
wl->state = WL1251_STATE_OFF;
mutex_init(&wl->mutex);
+ spin_lock_init(&wl->wl_lock);
wl->tx_mgmt_frm_rate = DEFAULT_HW_GEN_TX_RATE;
wl->tx_mgmt_frm_mod = DEFAULT_HW_GEN_MODULATION_TYPE;
diff --git a/drivers/net/wireless/ti/wl18xx/event.c b/drivers/net/wireless/ti/wl18xx/event.c
index 614150c..cabcd01 100644
--- a/drivers/net/wireless/ti/wl18xx/event.c
+++ b/drivers/net/wireless/ti/wl18xx/event.c
@@ -206,5 +206,33 @@
mbox->sc_pwd_len,
mbox->sc_pwd);
+ if (vector & RX_BA_WIN_SIZE_CHANGE_EVENT_ID) {
+ struct wl12xx_vif *wlvif;
+ struct ieee80211_vif *vif;
+ struct ieee80211_sta *sta;
+ u8 link_id = mbox->rx_ba_link_id;
+ u8 win_size = mbox->rx_ba_win_size;
+ const u8 *addr;
+
+ wlvif = wl->links[link_id].wlvif;
+ vif = wl12xx_wlvif_to_vif(wlvif);
+
+ /* Update RX aggregation window size and call
+ * MAC routine to stop active RX aggregations for this link
+ */
+ if (wlvif->bss_type != BSS_TYPE_AP_BSS)
+ addr = vif->bss_conf.bssid;
+ else
+ addr = wl->links[link_id].addr;
+
+ sta = ieee80211_find_sta(vif, addr);
+ if (sta) {
+ sta->max_rx_aggregation_subframes = win_size;
+ ieee80211_stop_rx_ba_session(vif,
+ wl->links[link_id].ba_bitmap,
+ addr);
+ }
+ }
+
return 0;
}
diff --git a/drivers/net/wireless/ti/wl18xx/event.h b/drivers/net/wireless/ti/wl18xx/event.h
index f3d4f13..9495fad 100644
--- a/drivers/net/wireless/ti/wl18xx/event.h
+++ b/drivers/net/wireless/ti/wl18xx/event.h
@@ -38,6 +38,7 @@
REMAIN_ON_CHANNEL_COMPLETE_EVENT_ID = BIT(18),
DFS_CHANNELS_CONFIG_COMPLETE_EVENT = BIT(19),
PERIODIC_SCAN_REPORT_EVENT_ID = BIT(20),
+ RX_BA_WIN_SIZE_CHANGE_EVENT_ID = BIT(21),
SMART_CONFIG_SYNC_EVENT_ID = BIT(22),
SMART_CONFIG_DECODE_EVENT_ID = BIT(23),
TIME_SYNC_EVENT_ID = BIT(24),
diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c
index ea78cbe..c3227cf 100644
--- a/drivers/net/wireless/ti/wl18xx/main.c
+++ b/drivers/net/wireless/ti/wl18xx/main.c
@@ -1029,7 +1029,8 @@
DFS_CHANNELS_CONFIG_COMPLETE_EVENT |
SMART_CONFIG_SYNC_EVENT_ID |
SMART_CONFIG_DECODE_EVENT_ID |
- TIME_SYNC_EVENT_ID;
+ TIME_SYNC_EVENT_ID |
+ RX_BA_WIN_SIZE_CHANGE_EVENT_ID;
wl->ap_event_mask = MAX_TX_FAILURE_EVENT_ID;
diff --git a/drivers/net/wireless/ti/wlcore/acx.c b/drivers/net/wireless/ti/wlcore/acx.c
index f28fa3b..0646c9b 100644
--- a/drivers/net/wireless/ti/wlcore/acx.c
+++ b/drivers/net/wireless/ti/wlcore/acx.c
@@ -1419,7 +1419,8 @@
/* setup BA session receiver setting in the FW. */
int wl12xx_acx_set_ba_receiver_session(struct wl1271 *wl, u8 tid_index,
- u16 ssn, bool enable, u8 peer_hlid)
+ u16 ssn, bool enable, u8 peer_hlid,
+ u8 win_size)
{
struct wl1271_acx_ba_receiver_setup *acx;
int ret;
@@ -1435,7 +1436,7 @@
acx->hlid = peer_hlid;
acx->tid = tid_index;
acx->enable = enable;
- acx->win_size = wl->conf.ht.rx_ba_win_size;
+ acx->win_size = win_size;
acx->ssn = ssn;
ret = wlcore_cmd_configure_failsafe(wl, ACX_BA_SESSION_RX_SETUP, acx,
diff --git a/drivers/net/wireless/ti/wlcore/acx.h b/drivers/net/wireless/ti/wlcore/acx.h
index 954d57e..524aea4 100644
--- a/drivers/net/wireless/ti/wlcore/acx.h
+++ b/drivers/net/wireless/ti/wlcore/acx.h
@@ -1112,7 +1112,8 @@
int wl12xx_acx_set_ba_initiator_policy(struct wl1271 *wl,
struct wl12xx_vif *wlvif);
int wl12xx_acx_set_ba_receiver_session(struct wl1271 *wl, u8 tid_index,
- u16 ssn, bool enable, u8 peer_hlid);
+ u16 ssn, bool enable, u8 peer_hlid,
+ u8 win_size);
int wl12xx_acx_tsf_info(struct wl1271 *wl, struct wl12xx_vif *wlvif,
u64 *mactime);
int wl1271_acx_ps_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif,
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index b3eada9..13bce2d 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -5328,7 +5328,9 @@
}
ret = wl12xx_acx_set_ba_receiver_session(wl, tid, *ssn, true,
- hlid);
+ hlid,
+ params->buf_size);
+
if (!ret) {
*ba_bitmap |= BIT(tid);
wl->ba_rx_session_count++;
@@ -5349,7 +5351,7 @@
}
ret = wl12xx_acx_set_ba_receiver_session(wl, tid, 0, false,
- hlid);
+ hlid, 0);
if (!ret) {
*ba_bitmap &= ~BIT(tid);
wl->ba_rx_session_count--;
diff --git a/drivers/net/wireless/ti/wlcore/spi.c b/drivers/net/wireless/ti/wlcore/spi.c
index 44f059f..9ebe00e 100644
--- a/drivers/net/wireless/ti/wlcore/spi.c
+++ b/drivers/net/wireless/ti/wlcore/spi.c
@@ -71,7 +71,7 @@
* only support SPI for 12xx - this code should be reworked when 18xx
* support is introduced
*/
-#define SPI_AGGR_BUFFER_SIZE (4 * PAGE_SIZE)
+#define SPI_AGGR_BUFFER_SIZE (4 * SZ_4K)
/* Maximum number of SPI write chunks */
#define WSPI_MAX_NUM_OF_CHUNKS \
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index 0333ab0..34173b5 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -201,6 +201,7 @@
unsigned long remaining_credit;
struct timer_list credit_timeout;
u64 credit_window_start;
+ bool rate_limited;
/* Statistics */
struct xenvif_stats stats;
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index e7bd63e..60b26f3 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -105,7 +105,11 @@
if (work_done < budget) {
napi_complete(napi);
- xenvif_napi_schedule_or_enable_events(queue);
+ /* If the queue is rate-limited, it shall be
+ * rescheduled in the timer callback.
+ */
+ if (likely(!queue->rate_limited))
+ xenvif_napi_schedule_or_enable_events(queue);
}
return work_done;
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 1049c34..72ee1c3 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -687,6 +687,7 @@
max_credit = ULONG_MAX; /* wrapped: clamp to ULONG_MAX */
queue->remaining_credit = min(max_credit, max_burst);
+ queue->rate_limited = false;
}
void xenvif_tx_credit_callback(unsigned long data)
@@ -1184,8 +1185,10 @@
msecs_to_jiffies(queue->credit_usec / 1000);
/* Timer could already be pending in rare cases. */
- if (timer_pending(&queue->credit_timeout))
+ if (timer_pending(&queue->credit_timeout)) {
+ queue->rate_limited = true;
return true;
+ }
/* Passed the point where we can replenish credit? */
if (time_after_eq64(now, next_credit)) {
@@ -1200,6 +1203,7 @@
mod_timer(&queue->credit_timeout,
next_credit);
queue->credit_window_start = next_credit;
+ queue->rate_limited = true;
return true;
}
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 1f445f3..34a062c 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -304,7 +304,7 @@
queue->rx_skbs[id] = skb;
ref = gnttab_claim_grant_reference(&queue->gref_rx_head);
- BUG_ON((signed short)ref < 0);
+ WARN_ON_ONCE(IS_ERR_VALUE((unsigned long)(int)ref));
queue->grant_rx_ref[id] = ref;
page = skb_frag_page(&skb_shinfo(skb)->frags[0]);
@@ -321,7 +321,7 @@
queue->rx.req_prod_pvt = req_prod;
/* Not enough requests? Try again later. */
- if (req_prod - queue->rx.rsp_cons < NET_RX_SLOTS_MIN) {
+ if (req_prod - queue->rx.sring->req_prod < NET_RX_SLOTS_MIN) {
mod_timer(&queue->rx_refill_timer, jiffies + (HZ/10));
return;
}
@@ -437,7 +437,7 @@
id = get_id_from_freelist(&queue->tx_skb_freelist, queue->tx_skbs);
tx = RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++);
ref = gnttab_claim_grant_reference(&queue->gref_tx_head);
- BUG_ON((signed short)ref < 0);
+ WARN_ON_ONCE(IS_ERR_VALUE((unsigned long)(int)ref));
gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id,
gfn, GNTMAP_readonly);
diff --git a/drivers/nfc/fdp/i2c.c b/drivers/nfc/fdp/i2c.c
index 532db28..a5d7332 100644
--- a/drivers/nfc/fdp/i2c.c
+++ b/drivers/nfc/fdp/i2c.c
@@ -210,14 +210,14 @@
struct sk_buff *skb;
int r;
- client = phy->i2c_dev;
- dev_dbg(&client->dev, "%s\n", __func__);
-
if (!phy || irq != phy->i2c_dev->irq) {
WARN_ON_ONCE(1);
return IRQ_NONE;
}
+ client = phy->i2c_dev;
+ dev_dbg(&client->dev, "%s\n", __func__);
+
r = fdp_nci_i2c_read(phy, &skb);
if (r == -EREMOTEIO)
diff --git a/drivers/nfc/nfcmrvl/fw_dnld.c b/drivers/nfc/nfcmrvl/fw_dnld.c
index f8dcdf4..af62c4c 100644
--- a/drivers/nfc/nfcmrvl/fw_dnld.c
+++ b/drivers/nfc/nfcmrvl/fw_dnld.c
@@ -459,7 +459,7 @@
INIT_WORK(&priv->fw_dnld.rx_work, fw_dnld_rx_work);
snprintf(name, sizeof(name), "%s_nfcmrvl_fw_dnld_rx_wq",
- dev_name(priv->dev));
+ dev_name(&priv->ndev->nfc_dev->dev));
priv->fw_dnld.rx_wq = create_singlethread_workqueue(name);
if (!priv->fw_dnld.rx_wq)
return -ENOMEM;
@@ -496,6 +496,7 @@
{
struct nfcmrvl_private *priv = nci_get_drvdata(ndev);
struct nfcmrvl_fw_dnld *fw_dnld = &priv->fw_dnld;
+ int res;
if (!priv->support_fw_dnld)
return -ENOTSUPP;
@@ -511,7 +512,9 @@
*/
/* Retrieve FW binary */
- if (request_firmware(&fw_dnld->fw, firmware_name, priv->dev) < 0) {
+ res = request_firmware(&fw_dnld->fw, firmware_name,
+ &ndev->nfc_dev->dev);
+ if (res < 0) {
nfc_err(priv->dev, "failed to retrieve FW %s", firmware_name);
return -ENOENT;
}
diff --git a/drivers/nfc/nfcmrvl/main.c b/drivers/nfc/nfcmrvl/main.c
index 51c8240..a446590 100644
--- a/drivers/nfc/nfcmrvl/main.c
+++ b/drivers/nfc/nfcmrvl/main.c
@@ -124,12 +124,13 @@
memcpy(&priv->config, pdata, sizeof(*pdata));
if (priv->config.reset_n_io) {
- rc = devm_gpio_request_one(dev,
- priv->config.reset_n_io,
- GPIOF_OUT_INIT_LOW,
- "nfcmrvl_reset_n");
- if (rc < 0)
+ rc = gpio_request_one(priv->config.reset_n_io,
+ GPIOF_OUT_INIT_LOW,
+ "nfcmrvl_reset_n");
+ if (rc < 0) {
+ priv->config.reset_n_io = 0;
nfc_err(dev, "failed to request reset_n io\n");
+ }
}
if (phy == NFCMRVL_PHY_SPI) {
@@ -154,32 +155,36 @@
if (!priv->ndev) {
nfc_err(dev, "nci_allocate_device failed\n");
rc = -ENOMEM;
- goto error;
+ goto error_free_gpio;
}
- nci_set_drvdata(priv->ndev, priv);
-
- rc = nci_register_device(priv->ndev);
- if (rc) {
- nfc_err(dev, "nci_register_device failed %d\n", rc);
- goto error_free_dev;
- }
-
- /* Ensure that controller is powered off */
- nfcmrvl_chip_halt(priv);
-
rc = nfcmrvl_fw_dnld_init(priv);
if (rc) {
nfc_err(dev, "failed to initialize FW download %d\n", rc);
goto error_free_dev;
}
+ nci_set_drvdata(priv->ndev, priv);
+
+ rc = nci_register_device(priv->ndev);
+ if (rc) {
+ nfc_err(dev, "nci_register_device failed %d\n", rc);
+ goto error_fw_dnld_deinit;
+ }
+
+ /* Ensure that controller is powered off */
+ nfcmrvl_chip_halt(priv);
+
nfc_info(dev, "registered with nci successfully\n");
return priv;
+error_fw_dnld_deinit:
+ nfcmrvl_fw_dnld_deinit(priv);
error_free_dev:
nci_free_device(priv->ndev);
-error:
+error_free_gpio:
+ if (priv->config.reset_n_io)
+ gpio_free(priv->config.reset_n_io);
kfree(priv);
return ERR_PTR(rc);
}
@@ -195,7 +200,7 @@
nfcmrvl_fw_dnld_deinit(priv);
if (priv->config.reset_n_io)
- devm_gpio_free(priv->dev, priv->config.reset_n_io);
+ gpio_free(priv->config.reset_n_io);
nci_unregister_device(ndev);
nci_free_device(ndev);
diff --git a/drivers/nfc/nfcmrvl/uart.c b/drivers/nfc/nfcmrvl/uart.c
index 83a99e3..6c0c301 100644
--- a/drivers/nfc/nfcmrvl/uart.c
+++ b/drivers/nfc/nfcmrvl/uart.c
@@ -109,6 +109,7 @@
struct nfcmrvl_private *priv;
struct nfcmrvl_platform_data *pdata = NULL;
struct nfcmrvl_platform_data config;
+ struct device *dev = nu->tty->dev;
/*
* Platform data cannot be used here since usually it is already used
@@ -116,9 +117,8 @@
* and check if DT entries were added.
*/
- if (nu->tty->dev->parent && nu->tty->dev->parent->of_node)
- if (nfcmrvl_uart_parse_dt(nu->tty->dev->parent->of_node,
- &config) == 0)
+ if (dev && dev->parent && dev->parent->of_node)
+ if (nfcmrvl_uart_parse_dt(dev->parent->of_node, &config) == 0)
pdata = &config;
if (!pdata) {
@@ -131,7 +131,7 @@
}
priv = nfcmrvl_nci_register_dev(NFCMRVL_PHY_UART, nu, &uart_ops,
- nu->tty->dev, pdata);
+ dev, pdata);
if (IS_ERR(priv))
return PTR_ERR(priv);
diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c
index ecc6fb9..3bbdf60 100644
--- a/drivers/ntb/ntb_transport.c
+++ b/drivers/ntb/ntb_transport.c
@@ -599,7 +599,7 @@
if (!mw->virt_addr)
return -ENOMEM;
- if (qp_count % mw_count && mw_num + 1 < qp_count / mw_count)
+ if (mw_num < qp_count % mw_count)
num_qps_mw = qp_count / mw_count + 1;
else
num_qps_mw = qp_count / mw_count;
@@ -947,7 +947,7 @@
qp->event_handler = NULL;
ntb_qp_link_down_reset(qp);
- if (qp_count % mw_count && mw_num + 1 < qp_count / mw_count)
+ if (mw_num < qp_count % mw_count)
num_qps_mw = qp_count / mw_count + 1;
else
num_qps_mw = qp_count / mw_count;
@@ -1065,8 +1065,8 @@
qp_count = ilog2(qp_bitmap);
if (max_num_clients && max_num_clients < qp_count)
qp_count = max_num_clients;
- else if (mw_count < qp_count)
- qp_count = mw_count;
+ else if (nt->mw_count < qp_count)
+ qp_count = nt->mw_count;
qp_bitmap &= BIT_ULL(qp_count) - 1;
diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c
index efb2c1c..9572342 100644
--- a/drivers/nvdimm/btt.c
+++ b/drivers/nvdimm/btt.c
@@ -1205,10 +1205,13 @@
struct page *page, int rw)
{
struct btt *btt = bdev->bd_disk->private_data;
+ int rc;
- btt_do_bvec(btt, NULL, page, PAGE_CACHE_SIZE, 0, rw, sector);
- page_endio(page, rw & WRITE, 0);
- return 0;
+ rc = btt_do_bvec(btt, NULL, page, PAGE_CACHE_SIZE, 0, rw, sector);
+ if (rc == 0)
+ page_endio(page, rw & WRITE, 0);
+
+ return rc;
}
diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
index 5f47356..254b0ee 100644
--- a/drivers/nvdimm/bus.c
+++ b/drivers/nvdimm/bus.c
@@ -590,8 +590,14 @@
rc = nd_desc->ndctl(nd_desc, nvdimm, cmd, buf, buf_len);
if (rc < 0)
goto out_unlock;
+ nvdimm_bus_unlock(&nvdimm_bus->dev);
+
if (copy_to_user(p, buf, buf_len))
rc = -EFAULT;
+
+ vfree(buf);
+ return rc;
+
out_unlock:
nvdimm_bus_unlock(&nvdimm_bus->dev);
out:
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 044253d..b8a5a8e 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -27,6 +27,13 @@
NVME_NS_LIGHTNVM = 1,
};
+/* The below value is the specific amount of delay needed before checking
+ * readiness in case of the PCI_DEVICE(0x1c58, 0x0003), which needs the
+ * NVME_QUIRK_DELAY_BEFORE_CHK_RDY quirk enabled. The value (in ms) was
+ * found empirically.
+ */
+#define NVME_QUIRK_DELAY_AMOUNT 2000
+
/*
* Represents an NVM Express device. Each nvme_dev is a PCI function.
*/
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index c851bc5..4c673d4 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -1633,10 +1633,15 @@
*/
static int nvme_disable_ctrl(struct nvme_dev *dev, u64 cap)
{
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
+
dev->ctrl_config &= ~NVME_CC_SHN_MASK;
dev->ctrl_config &= ~NVME_CC_ENABLE;
writel(dev->ctrl_config, &dev->bar->cc);
+ if (pdev->vendor == 0x1c58 && pdev->device == 0x0003)
+ msleep(NVME_QUIRK_DELAY_AMOUNT);
+
return nvme_wait_ready(dev, cap, false);
}
diff --git a/drivers/nvmem/imx-ocotp.c b/drivers/nvmem/imx-ocotp.c
index b7971d4..74e5360 100644
--- a/drivers/nvmem/imx-ocotp.c
+++ b/drivers/nvmem/imx-ocotp.c
@@ -88,7 +88,7 @@
static const struct of_device_id imx_ocotp_dt_ids[] = {
{ .compatible = "fsl,imx6q-ocotp", (void *)128 },
- { .compatible = "fsl,imx6sl-ocotp", (void *)32 },
+ { .compatible = "fsl,imx6sl-ocotp", (void *)64 },
{ .compatible = "fsl,imx6sx-ocotp", (void *)128 },
{ },
};
diff --git a/drivers/of/address.c b/drivers/of/address.c
index ec5eb17..5393be7 100644
--- a/drivers/of/address.c
+++ b/drivers/of/address.c
@@ -260,7 +260,7 @@
if (!parser->range || parser->range + parser->np > parser->end)
return NULL;
- range->pci_space = parser->range[0];
+ range->pci_space = be32_to_cpup(parser->range);
range->flags = of_bus_pci_get_flags(parser->range);
range->pci_addr = of_read_number(parser->range + 1, ns);
range->cpu_addr = of_translate_address(parser->node,
diff --git a/drivers/of/device.c b/drivers/of/device.c
index e5f47ce..97a280d 100644
--- a/drivers/of/device.c
+++ b/drivers/of/device.c
@@ -225,6 +225,7 @@
return tsize;
}
+EXPORT_SYMBOL_GPL(of_device_get_modalias);
/**
* of_device_uevent - Display OF related uevent information
@@ -287,3 +288,4 @@
return 0;
}
+EXPORT_SYMBOL_GPL(of_device_uevent_modalias);
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index 0438512..ca17571 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -632,9 +632,12 @@
const char *pathp;
int offset, rc = 0, depth = -1;
- for (offset = fdt_next_node(blob, -1, &depth);
- offset >= 0 && depth >= 0 && !rc;
- offset = fdt_next_node(blob, offset, &depth)) {
+ if (!blob)
+ return 0;
+
+ for (offset = fdt_next_node(blob, -1, &depth);
+ offset >= 0 && depth >= 0 && !rc;
+ offset = fdt_next_node(blob, offset, &depth)) {
pathp = fdt_get_name(blob, offset, NULL);
if (*pathp == '/')
diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c
index 8e11fb2..34f1d6b 100644
--- a/drivers/parisc/ccio-dma.c
+++ b/drivers/parisc/ccio-dma.c
@@ -741,6 +741,8 @@
BUG_ON(!dev);
ioc = GET_IOC(dev);
+ if (!ioc)
+ return DMA_ERROR_CODE;
BUG_ON(size <= 0);
@@ -805,6 +807,10 @@
BUG_ON(!dev);
ioc = GET_IOC(dev);
+ if (!ioc) {
+ WARN_ON(!ioc);
+ return;
+ }
DBG_RUN("%s() iovp 0x%lx/%x\n",
__func__, (long)iova, size);
@@ -908,6 +914,8 @@
BUG_ON(!dev);
ioc = GET_IOC(dev);
+ if (!ioc)
+ return 0;
DBG_RUN_SG("%s() START %d entries\n", __func__, nents);
@@ -980,6 +988,10 @@
BUG_ON(!dev);
ioc = GET_IOC(dev);
+ if (!ioc) {
+ WARN_ON(!ioc);
+ return;
+ }
DBG_RUN_SG("%s() START %d entries, %p,%x\n",
__func__, nents, sg_virt(sglist), sglist->length);
diff --git a/drivers/parisc/dino.c b/drivers/parisc/dino.c
index a0580af..005ea63 100644
--- a/drivers/parisc/dino.c
+++ b/drivers/parisc/dino.c
@@ -154,7 +154,10 @@
};
/* Looks nice and keeps the compiler happy */
-#define DINO_DEV(d) ((struct dino_device *) d)
+#define DINO_DEV(d) ({ \
+ void *__pdata = d; \
+ BUG_ON(!__pdata); \
+ (struct dino_device *)__pdata; })
/*
@@ -951,7 +954,7 @@
dino_dev->hba.dev = dev;
dino_dev->hba.base_addr = ioremap_nocache(hpa, 4096);
- dino_dev->hba.lmmio_space_offset = 0; /* CPU addrs == bus addrs */
+ dino_dev->hba.lmmio_space_offset = PCI_F_EXTEND;
spin_lock_init(&dino_dev->dinosaur_pen);
dino_dev->hba.iommu = ccio_get_iommu(dev);
diff --git a/drivers/parisc/lba_pci.c b/drivers/parisc/lba_pci.c
index 42844c2..d0c2759 100644
--- a/drivers/parisc/lba_pci.c
+++ b/drivers/parisc/lba_pci.c
@@ -111,8 +111,10 @@
/* Looks nice and keeps the compiler happy */
-#define LBA_DEV(d) ((struct lba_device *) (d))
-
+#define LBA_DEV(d) ({ \
+ void *__pdata = d; \
+ BUG_ON(!__pdata); \
+ (struct lba_device *)__pdata; })
/*
** Only allow 8 subsidiary busses per LBA
diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c
index 225049b..d632614 100644
--- a/drivers/parisc/sba_iommu.c
+++ b/drivers/parisc/sba_iommu.c
@@ -691,6 +691,8 @@
return 0;
ioc = GET_IOC(dev);
+ if (!ioc)
+ return 0;
/*
* check if mask is >= than the current max IO Virt Address
@@ -722,6 +724,8 @@
int pide;
ioc = GET_IOC(dev);
+ if (!ioc)
+ return DMA_ERROR_CODE;
/* save offset bits */
offset = ((dma_addr_t) (long) addr) & ~IOVP_MASK;
@@ -803,6 +807,10 @@
DBG_RUN("%s() iovp 0x%lx/%x\n", __func__, (long) iova, size);
ioc = GET_IOC(dev);
+ if (!ioc) {
+ WARN_ON(!ioc);
+ return;
+ }
offset = iova & ~IOVP_MASK;
iova ^= offset; /* clear offset bits */
size += offset;
@@ -942,6 +950,8 @@
DBG_RUN_SG("%s() START %d entries\n", __func__, nents);
ioc = GET_IOC(dev);
+ if (!ioc)
+ return 0;
/* Fast path single entry scatterlists. */
if (nents == 1) {
@@ -1027,6 +1037,10 @@
__func__, nents, sg_virt(sglist), sglist->length);
ioc = GET_IOC(dev);
+ if (!ioc) {
+ WARN_ON(!ioc);
+ return;
+ }
#ifdef SBA_COLLECT_STATS
ioc->usg_calls++;
diff --git a/drivers/parport/parport_gsc.c b/drivers/parport/parport_gsc.c
index 6e3a60c..50f3bb0 100644
--- a/drivers/parport/parport_gsc.c
+++ b/drivers/parport/parport_gsc.c
@@ -293,7 +293,7 @@
p->irq = PARPORT_IRQ_NONE;
}
if (p->irq != PARPORT_IRQ_NONE) {
- printk(", irq %d", p->irq);
+ pr_cont(", irq %d", p->irq);
if (p->dma == PARPORT_DMA_AUTO) {
p->dma = PARPORT_DMA_NONE;
@@ -303,8 +303,8 @@
is mandatory (see above) */
p->dma = PARPORT_DMA_NONE;
- printk(" [");
-#define printmode(x) {if(p->modes&PARPORT_MODE_##x){printk("%s%s",f?",":"",#x);f++;}}
+ pr_cont(" [");
+#define printmode(x) {if(p->modes&PARPORT_MODE_##x){pr_cont("%s%s",f?",":"",#x);f++;}}
{
int f = 0;
printmode(PCSPP);
@@ -315,7 +315,7 @@
// printmode(DMA);
}
#undef printmode
- printk("]\n");
+ pr_cont("]\n");
if (p->irq != PARPORT_IRQ_NONE) {
if (request_irq (p->irq, parport_irq_handler,
diff --git a/drivers/parport/share.c b/drivers/parport/share.c
index 5ce5ef21..754f21f 100644
--- a/drivers/parport/share.c
+++ b/drivers/parport/share.c
@@ -936,8 +936,10 @@
* pardevice fields. -arca
*/
port->ops->init_state(par_dev, par_dev->state);
- port->proc_device = par_dev;
- parport_device_proc_register(par_dev);
+ if (!test_and_set_bit(PARPORT_DEVPROC_REGISTERED, &port->devflags)) {
+ port->proc_device = par_dev;
+ parport_device_proc_register(par_dev);
+ }
return par_dev;
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index 31f31d4..3575277 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -303,13 +303,6 @@
return rc;
}
- pci_iov_set_numvfs(dev, nr_virtfn);
- iov->ctrl |= PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE;
- pci_cfg_access_lock(dev);
- pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
- msleep(100);
- pci_cfg_access_unlock(dev);
-
iov->initial_VFs = initial;
if (nr_virtfn < initial)
initial = nr_virtfn;
@@ -320,6 +313,13 @@
goto err_pcibios;
}
+ pci_iov_set_numvfs(dev, nr_virtfn);
+ iov->ctrl |= PCI_SRIOV_CTRL_VFE | PCI_SRIOV_CTRL_MSE;
+ pci_cfg_access_lock(dev);
+ pci_write_config_word(dev, iov->pos + PCI_SRIOV_CTRL, iov->ctrl);
+ msleep(100);
+ pci_cfg_access_unlock(dev);
+
for (i = 0; i < initial; i++) {
rc = virtfn_add(dev, i, 0);
if (rc)
@@ -555,21 +555,61 @@
}
/**
- * pci_iov_resource_bar - get position of the SR-IOV BAR
+ * pci_iov_update_resource - update a VF BAR
* @dev: the PCI device
* @resno: the resource number
*
- * Returns position of the BAR encapsulated in the SR-IOV capability.
+ * Update a VF BAR in the SR-IOV capability of a PF.
*/
-int pci_iov_resource_bar(struct pci_dev *dev, int resno)
+void pci_iov_update_resource(struct pci_dev *dev, int resno)
{
- if (resno < PCI_IOV_RESOURCES || resno > PCI_IOV_RESOURCE_END)
- return 0;
+ struct pci_sriov *iov = dev->is_physfn ? dev->sriov : NULL;
+ struct resource *res = dev->resource + resno;
+ int vf_bar = resno - PCI_IOV_RESOURCES;
+ struct pci_bus_region region;
+ u16 cmd;
+ u32 new;
+ int reg;
- BUG_ON(!dev->is_physfn);
+ /*
+ * The generic pci_restore_bars() path calls this for all devices,
+ * including VFs and non-SR-IOV devices. If this is not a PF, we
+ * have nothing to do.
+ */
+ if (!iov)
+ return;
- return dev->sriov->pos + PCI_SRIOV_BAR +
- 4 * (resno - PCI_IOV_RESOURCES);
+ pci_read_config_word(dev, iov->pos + PCI_SRIOV_CTRL, &cmd);
+ if ((cmd & PCI_SRIOV_CTRL_VFE) && (cmd & PCI_SRIOV_CTRL_MSE)) {
+ dev_WARN(&dev->dev, "can't update enabled VF BAR%d %pR\n",
+ vf_bar, res);
+ return;
+ }
+
+ /*
+ * Ignore unimplemented BARs, unused resource slots for 64-bit
+ * BARs, and non-movable resources, e.g., those described via
+ * Enhanced Allocation.
+ */
+ if (!res->flags)
+ return;
+
+ if (res->flags & IORESOURCE_UNSET)
+ return;
+
+ if (res->flags & IORESOURCE_PCI_FIXED)
+ return;
+
+ pcibios_resource_to_bus(dev->bus, ®ion, res);
+ new = region.start;
+ new |= res->flags & ~PCI_BASE_ADDRESS_MEM_MASK;
+
+ reg = iov->pos + PCI_SRIOV_BAR + 4 * vf_bar;
+ pci_write_config_dword(dev, reg, new);
+ if (res->flags & IORESOURCE_MEM_64) {
+ new = region.start >> 16 >> 16;
+ pci_write_config_dword(dev, reg + 4, new);
+ }
}
resource_size_t __weak pcibios_iov_resource_alignment(struct pci_dev *dev,
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index d7ffd66..fca9255 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -945,6 +945,7 @@
return pci_legacy_resume_early(dev);
pci_update_current_state(pci_dev, PCI_D0);
+ pci_restore_state(pci_dev);
if (drv && drv->pm && drv->pm->thaw_noirq)
error = drv->pm->thaw_noirq(dev);
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index d750870..f8b2b59 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -973,15 +973,19 @@
int pci_mmap_fits(struct pci_dev *pdev, int resno, struct vm_area_struct *vma,
enum pci_mmap_api mmap_api)
{
- unsigned long nr, start, size, pci_start;
+ unsigned long nr, start, size;
+ resource_size_t pci_start = 0, pci_end;
if (pci_resource_len(pdev, resno) == 0)
return 0;
nr = vma_pages(vma);
start = vma->vm_pgoff;
size = ((pci_resource_len(pdev, resno) - 1) >> PAGE_SHIFT) + 1;
- pci_start = (mmap_api == PCI_MMAP_PROCFS) ?
- pci_resource_start(pdev, resno) >> PAGE_SHIFT : 0;
+ if (mmap_api == PCI_MMAP_PROCFS) {
+ pci_resource_to_user(pdev, resno, &pdev->resource[resno],
+ &pci_start, &pci_end);
+ pci_start >>= PAGE_SHIFT;
+ }
if (start >= pci_start && start < pci_start + size &&
start + nr <= pci_start + size)
return 1;
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index e311a9b..1a14ca8 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -519,10 +519,6 @@
{
int i;
- /* Per SR-IOV spec 3.4.1.11, VF BARs are RO zero */
- if (dev->is_virtfn)
- return;
-
for (i = 0; i < PCI_BRIDGE_RESOURCES; i++)
pci_update_resource(dev, i);
}
@@ -1736,8 +1732,8 @@
}
}
if (!list_empty(&pci_pme_list))
- schedule_delayed_work(&pci_pme_work,
- msecs_to_jiffies(PME_TIMEOUT));
+ queue_delayed_work(system_freezable_wq, &pci_pme_work,
+ msecs_to_jiffies(PME_TIMEOUT));
mutex_unlock(&pci_pme_list_mutex);
}
@@ -1802,8 +1798,9 @@
mutex_lock(&pci_pme_list_mutex);
list_add(&pme_dev->list, &pci_pme_list);
if (list_is_singular(&pci_pme_list))
- schedule_delayed_work(&pci_pme_work,
- msecs_to_jiffies(PME_TIMEOUT));
+ queue_delayed_work(system_freezable_wq,
+ &pci_pme_work,
+ msecs_to_jiffies(PME_TIMEOUT));
mutex_unlock(&pci_pme_list_mutex);
} else {
mutex_lock(&pci_pme_list_mutex);
@@ -4472,36 +4469,6 @@
}
EXPORT_SYMBOL(pci_select_bars);
-/**
- * pci_resource_bar - get position of the BAR associated with a resource
- * @dev: the PCI device
- * @resno: the resource number
- * @type: the BAR type to be filled in
- *
- * Returns BAR position in config space, or 0 if the BAR is invalid.
- */
-int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type)
-{
- int reg;
-
- if (resno < PCI_ROM_RESOURCE) {
- *type = pci_bar_unknown;
- return PCI_BASE_ADDRESS_0 + 4 * resno;
- } else if (resno == PCI_ROM_RESOURCE) {
- *type = pci_bar_mem32;
- return dev->rom_base_reg;
- } else if (resno < PCI_BRIDGE_RESOURCES) {
- /* device specific resource */
- *type = pci_bar_unknown;
- reg = pci_iov_resource_bar(dev, resno);
- if (reg)
- return reg;
- }
-
- dev_err(&dev->dev, "BAR %d: invalid resource\n", resno);
- return 0;
-}
-
/* Some architectures require additional programming to enable VGA */
static arch_set_vga_state_t arch_set_vga_state;
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index d390fc1..c43e448 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -232,7 +232,6 @@
int pci_setup_device(struct pci_dev *dev);
int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
struct resource *res, unsigned int reg);
-int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type);
void pci_configure_ari(struct pci_dev *dev);
void __pci_bus_size_bridges(struct pci_bus *bus,
struct list_head *realloc_head);
@@ -276,7 +275,7 @@
#ifdef CONFIG_PCI_IOV
int pci_iov_init(struct pci_dev *dev);
void pci_iov_release(struct pci_dev *dev);
-int pci_iov_resource_bar(struct pci_dev *dev, int resno);
+void pci_iov_update_resource(struct pci_dev *dev, int resno);
resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev, int resno);
void pci_restore_iov_state(struct pci_dev *dev);
int pci_iov_bus_range(struct pci_bus *bus);
@@ -290,10 +289,6 @@
{
}
-static inline int pci_iov_resource_bar(struct pci_dev *dev, int resno)
-{
- return 0;
-}
static inline void pci_restore_iov_state(struct pci_dev *dev)
{
}
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index bc8620b..f047d6d 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -226,7 +226,8 @@
mask64 = (u32)PCI_BASE_ADDRESS_MEM_MASK;
}
} else {
- res->flags |= (l & IORESOURCE_ROM_ENABLE);
+ if (l & PCI_ROM_ADDRESS_ENABLE)
+ res->flags |= IORESOURCE_ROM_ENABLE;
l64 = l & PCI_ROM_ADDRESS_MASK;
sz64 = sz & PCI_ROM_ADDRESS_MASK;
mask64 = PCI_ROM_ADDRESS_MASK;
diff --git a/drivers/pci/rom.c b/drivers/pci/rom.c
index eb0ad53..3eea7fc 100644
--- a/drivers/pci/rom.c
+++ b/drivers/pci/rom.c
@@ -31,6 +31,11 @@
if (!res->flags)
return -1;
+ /*
+ * Ideally pci_update_resource() would update the ROM BAR address,
+ * and we would only set the enable bit here. But apparently some
+ * devices have buggy ROM BARs that read as zero when disabled.
+ */
pcibios_resource_to_bus(pdev->bus, ®ion, res);
pci_read_config_dword(pdev, pdev->rom_base_reg, &rom_addr);
rom_addr &= ~PCI_ROM_ADDRESS_MASK;
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
index 604011e..2506296 100644
--- a/drivers/pci/setup-res.c
+++ b/drivers/pci/setup-res.c
@@ -25,21 +25,18 @@
#include <linux/slab.h>
#include "pci.h"
-
-void pci_update_resource(struct pci_dev *dev, int resno)
+static void pci_std_update_resource(struct pci_dev *dev, int resno)
{
struct pci_bus_region region;
bool disable;
u16 cmd;
u32 new, check, mask;
int reg;
- enum pci_bar_type type;
struct resource *res = dev->resource + resno;
- if (dev->is_virtfn) {
- dev_warn(&dev->dev, "can't update VF BAR%d\n", resno);
+ /* Per SR-IOV spec 3.4.1.11, VF BARs are RO zero */
+ if (dev->is_virtfn)
return;
- }
/*
* Ignore resources for unimplemented BARs and unused resource slots
@@ -60,21 +57,34 @@
return;
pcibios_resource_to_bus(dev->bus, ®ion, res);
+ new = region.start;
- new = region.start | (res->flags & PCI_REGION_FLAG_MASK);
- if (res->flags & IORESOURCE_IO)
+ if (res->flags & IORESOURCE_IO) {
mask = (u32)PCI_BASE_ADDRESS_IO_MASK;
- else
+ new |= res->flags & ~PCI_BASE_ADDRESS_IO_MASK;
+ } else if (resno == PCI_ROM_RESOURCE) {
+ mask = (u32)PCI_ROM_ADDRESS_MASK;
+ } else {
mask = (u32)PCI_BASE_ADDRESS_MEM_MASK;
+ new |= res->flags & ~PCI_BASE_ADDRESS_MEM_MASK;
+ }
- reg = pci_resource_bar(dev, resno, &type);
- if (!reg)
- return;
- if (type != pci_bar_unknown) {
+ if (resno < PCI_ROM_RESOURCE) {
+ reg = PCI_BASE_ADDRESS_0 + 4 * resno;
+ } else if (resno == PCI_ROM_RESOURCE) {
+
+ /*
+ * Apparently some Matrox devices have ROM BARs that read
+ * as zero when disabled, so don't update ROM BARs unless
+ * they're enabled. See https://lkml.org/lkml/2005/8/30/138.
+ */
if (!(res->flags & IORESOURCE_ROM_ENABLE))
return;
+
+ reg = dev->rom_base_reg;
new |= PCI_ROM_ADDRESS_ENABLE;
- }
+ } else
+ return;
/*
* We can't update a 64-bit BAR atomically, so when possible,
@@ -110,6 +120,16 @@
pci_write_config_word(dev, PCI_COMMAND, cmd);
}
+void pci_update_resource(struct pci_dev *dev, int resno)
+{
+ if (resno <= PCI_ROM_RESOURCE)
+ pci_std_update_resource(dev, resno);
+#ifdef CONFIG_PCI_IOV
+ else if (resno >= PCI_IOV_RESOURCES && resno <= PCI_IOV_RESOURCE_END)
+ pci_iov_update_resource(dev, resno);
+#endif
+}
+
int pci_claim_resource(struct pci_dev *dev, int resource)
{
struct resource *res = &dev->resource[resource];
diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig
index 03cb3ea..b5679fb 100644
--- a/drivers/phy/Kconfig
+++ b/drivers/phy/Kconfig
@@ -368,6 +368,7 @@
config PHY_TUSB1210
tristate "TI TUSB1210 ULPI PHY module"
depends on USB_ULPI_BUS
+ depends on EXTCON || !EXTCON # if EXTCON=m, this cannot be built-in
select GENERIC_PHY
help
Support for TI TUSB1210 USB ULPI PHY.
diff --git a/drivers/pinctrl/berlin/berlin-bg4ct.c b/drivers/pinctrl/berlin/berlin-bg4ct.c
index 0917204..c617ec4 100644
--- a/drivers/pinctrl/berlin/berlin-bg4ct.c
+++ b/drivers/pinctrl/berlin/berlin-bg4ct.c
@@ -217,7 +217,7 @@
BERLIN_PINCTRL_GROUP("SCRD0_CRD_PRES", 0xc, 0x3, 0x15,
BERLIN_PINCTRL_FUNCTION(0x0, "gpio"), /* GPIO20 */
BERLIN_PINCTRL_FUNCTION(0x1, "scrd0"), /* crd pres */
- BERLIN_PINCTRL_FUNCTION(0x1, "sd1a")), /* DAT3 */
+ BERLIN_PINCTRL_FUNCTION(0x3, "sd1a")), /* DAT3 */
BERLIN_PINCTRL_GROUP("SPI1_SS0n", 0xc, 0x3, 0x18,
BERLIN_PINCTRL_FUNCTION(0x0, "spi1"), /* SS0n */
BERLIN_PINCTRL_FUNCTION(0x1, "gpio"), /* GPIO37 */
diff --git a/drivers/pinctrl/freescale/pinctrl-mxs.c b/drivers/pinctrl/freescale/pinctrl-mxs.c
index 6bbda6b..5da9c95dc 100644
--- a/drivers/pinctrl/freescale/pinctrl-mxs.c
+++ b/drivers/pinctrl/freescale/pinctrl-mxs.c
@@ -195,6 +195,16 @@
return 0;
}
+static void mxs_pinctrl_rmwl(u32 value, u32 mask, u8 shift, void __iomem *reg)
+{
+ u32 tmp;
+
+ tmp = readl(reg);
+ tmp &= ~(mask << shift);
+ tmp |= value << shift;
+ writel(tmp, reg);
+}
+
static int mxs_pinctrl_set_mux(struct pinctrl_dev *pctldev, unsigned selector,
unsigned group)
{
@@ -212,8 +222,7 @@
reg += bank * 0x20 + pin / 16 * 0x10;
shift = pin % 16 * 2;
- writel(0x3 << shift, reg + CLR);
- writel(g->muxsel[i] << shift, reg + SET);
+ mxs_pinctrl_rmwl(g->muxsel[i], 0x3, shift, reg);
}
return 0;
@@ -280,8 +289,7 @@
/* mA */
if (config & MA_PRESENT) {
shift = pin % 8 * 4;
- writel(0x3 << shift, reg + CLR);
- writel(ma << shift, reg + SET);
+ mxs_pinctrl_rmwl(ma, 0x3, shift, reg);
}
/* vol */
diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
index a009ae3..930f0f2 100644
--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
+++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
@@ -1466,12 +1466,11 @@
offset += range->npins;
}
- /* Mask and clear all interrupts */
- chv_writel(0, pctrl->regs + CHV_INTMASK);
+ /* Clear all interrupts */
chv_writel(0xffff, pctrl->regs + CHV_INTSTAT);
ret = gpiochip_irqchip_add(chip, &chv_gpio_irqchip, 0,
- handle_simple_irq, IRQ_TYPE_NONE);
+ handle_bad_irq, IRQ_TYPE_NONE);
if (ret) {
dev_err(pctrl->dev, "failed to add IRQ chip\n");
goto fail;
diff --git a/drivers/pinctrl/meson/pinctrl-meson8b.c b/drivers/pinctrl/meson/pinctrl-meson8b.c
index 9677807..b505b87 100644
--- a/drivers/pinctrl/meson/pinctrl-meson8b.c
+++ b/drivers/pinctrl/meson/pinctrl-meson8b.c
@@ -732,8 +732,8 @@
static const char * const nand_groups[] = {
"nand_io", "nand_io_ce0", "nand_io_ce1",
"nand_io_rb0", "nand_ale", "nand_cle",
- "nand_wen_clk", "nand_ren_clk", "nand_dqs0",
- "nand_dqs1"
+ "nand_wen_clk", "nand_ren_clk", "nand_dqs_0",
+ "nand_dqs_1"
};
static const char * const nor_groups[] = {
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
index d6bd494..a9c27a3 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm.c
@@ -609,10 +609,6 @@
spin_lock_irqsave(&pctrl->lock, flags);
- val = readl(pctrl->regs + g->intr_status_reg);
- val &= ~BIT(g->intr_status_bit);
- writel(val, pctrl->regs + g->intr_status_reg);
-
val = readl(pctrl->regs + g->intr_cfg_reg);
val |= BIT(g->intr_enable_bit);
writel(val, pctrl->regs + g->intr_cfg_reg);
diff --git a/drivers/pinctrl/samsung/pinctrl-exynos.c b/drivers/pinctrl/samsung/pinctrl-exynos.c
index 71ccf6a..2551e4a 100644
--- a/drivers/pinctrl/samsung/pinctrl-exynos.c
+++ b/drivers/pinctrl/samsung/pinctrl-exynos.c
@@ -194,8 +194,6 @@
spin_unlock_irqrestore(&bank->slock, flags);
- exynos_irq_unmask(irqd);
-
return 0;
}
@@ -216,8 +214,6 @@
shift = irqd->hwirq * bank_type->fld_width[PINCFG_TYPE_FUNC];
mask = (1 << bank_type->fld_width[PINCFG_TYPE_FUNC]) - 1;
- exynos_irq_mask(irqd);
-
spin_lock_irqsave(&bank->slock, flags);
con = readl(d->virt_base + reg_con);
diff --git a/drivers/pinctrl/sh-pfc/core.c b/drivers/pinctrl/sh-pfc/core.c
index 2b0d702..699efb1 100644
--- a/drivers/pinctrl/sh-pfc/core.c
+++ b/drivers/pinctrl/sh-pfc/core.c
@@ -543,6 +543,9 @@
ret = info->ops->init(pfc);
if (ret < 0)
return ret;
+
+ /* .init() may have overridden pfc->info */
+ info = pfc->info;
}
/* Enable dummy states for those platforms without pinctrl support */
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7791.c b/drivers/pinctrl/sh-pfc/pfc-r8a7791.c
index 87a4f44..42ffa87 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7791.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7791.c
@@ -1102,7 +1102,7 @@
PINMUX_IPSR_MSEL(IP6_5_3, FMIN_E, SEL_FM_4),
PINMUX_IPSR_DATA(IP6_7_6, AUDIO_CLKOUT),
PINMUX_IPSR_MSEL(IP6_7_6, MSIOF1_SS1_B, SEL_SOF1_1),
- PINMUX_IPSR_MSEL(IP6_5_3, TX2, SEL_SCIF2_0),
+ PINMUX_IPSR_MSEL(IP6_7_6, TX2, SEL_SCIF2_0),
PINMUX_IPSR_MSEL(IP6_7_6, SCIFA2_TXD, SEL_SCIFA2_0),
PINMUX_IPSR_DATA(IP6_9_8, IRQ0),
PINMUX_IPSR_MSEL(IP6_9_8, SCIFB1_RXD_D, SEL_SCIFB1_3),
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c b/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c
index 862a096..be5c71d 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun4i-a10.c
@@ -811,6 +811,7 @@
SUNXI_FUNCTION(0x2, "lcd1"), /* D16 */
SUNXI_FUNCTION(0x3, "pata"), /* ATAD12 */
SUNXI_FUNCTION(0x4, "keypad"), /* IN6 */
+ SUNXI_FUNCTION(0x5, "sim"), /* DET */
SUNXI_FUNCTION_IRQ(0x6, 16), /* EINT16 */
SUNXI_FUNCTION(0x7, "csi1")), /* D16 */
SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 17),
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c b/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c
index 90b973e..a7c81e9 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun8i-a83t.c
@@ -394,7 +394,7 @@
SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 18),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out"),
- SUNXI_FUNCTION(0x3, "owa")), /* DOUT */
+ SUNXI_FUNCTION(0x3, "spdif")), /* DOUT */
SUNXI_PIN(SUNXI_PINCTRL_PIN(E, 19),
SUNXI_FUNCTION(0x0, "gpio_in"),
SUNXI_FUNCTION(0x1, "gpio_out")),
diff --git a/drivers/platform/msm/sps/bam.c b/drivers/platform/msm/sps/bam.c
index c5e8de0..5e9e991 100644
--- a/drivers/platform/msm/sps/bam.c
+++ b/drivers/platform/msm/sps/bam.c
@@ -834,7 +834,7 @@
return;
}
iowrite32(val, dev->base + offset);
- SPS_DBG(dev, "sps:bam 0x%p(va) write reg 0x%x w_val 0x%x.\n",
+ SPS_DBG(dev, "sps:bam 0x%pP(va) write reg 0x%x w_val 0x%x.\n",
dev->base, offset, val);
}
@@ -870,7 +870,7 @@
tmp &= ~mask; /* clear written bits */
val = tmp | (val << shift);
iowrite32(val, dev->base + offset);
- SPS_DBG(dev, "sps:bam 0x%p(va) write reg 0x%x w_val 0x%x.\n",
+ SPS_DBG(dev, "sps:bam 0x%pP(va) write reg 0x%x w_val 0x%x.\n",
dev->base, offset, val);
}
diff --git a/drivers/platform/msm/sps/sps.c b/drivers/platform/msm/sps/sps.c
index ddb2388..ae9f746 100644
--- a/drivers/platform/msm/sps/sps.c
+++ b/drivers/platform/msm/sps/sps.c
@@ -303,8 +303,7 @@
} else {
vir_addr = &bam->base;
num_pipes = bam->props.num_pipes;
- if (log_level_sel <= SPS_IPC_MAX_LOGLEVEL)
- bam->ipc_loglevel = log_level_sel;
+ bam->ipc_loglevel = log_level_sel;
}
switch (reg_dump_option) {
@@ -515,7 +514,7 @@
debugfs_buf_size = 0;
debugfs_buf_used = 0;
wraparound = false;
- log_level_sel = SPS_IPC_MAX_LOGLEVEL + 1;
+ log_level_sel = 0;
dent = debugfs_create_dir("sps", 0);
if (IS_ERR(dent)) {
@@ -2224,8 +2223,6 @@
if (bam_props->ipc_loglevel)
bam->ipc_loglevel = bam_props->ipc_loglevel;
- else
- bam->ipc_loglevel = SPS_IPC_DEFAULT_LOGLEVEL;
ok = sps_bam_device_init(bam);
mutex_unlock(&bam->lock);
diff --git a/drivers/platform/msm/sps/sps_bam.c b/drivers/platform/msm/sps/sps_bam.c
index 38a6474..ff4c9c8 100644
--- a/drivers/platform/msm/sps/sps_bam.c
+++ b/drivers/platform/msm/sps/sps_bam.c
@@ -249,7 +249,7 @@
&ready);
} else {
SPS_DBG1(dev,
- "sps:bam_isr: BAM is not ready and thus skip IRQ for bam:%pa IRQ #:%d.\n",
+ "sps:bam_isr: BAM is not ready and thus skip IRQ for bam:%paP IRQ #:%d.\n",
BAM_ID(dev), irq);
}
} else {
diff --git a/drivers/platform/msm/sps/spsi.h b/drivers/platform/msm/sps/spsi.h
index 9e80381..3de72c4 100644
--- a/drivers/platform/msm/sps/spsi.h
+++ b/drivers/platform/msm/sps/spsi.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2015, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -52,8 +52,6 @@
#define MAX_MSG_LEN 80
#define SPS_IPC_LOGPAGES 10
#define SPS_IPC_REG_DUMP_FACTOR 3
-#define SPS_IPC_DEFAULT_LOGLEVEL 3
-#define SPS_IPC_MAX_LOGLEVEL 4
/* Connection mapping control struct */
struct sps_rm {
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
index 1062fa4..b2cdc1a 100644
--- a/drivers/platform/x86/acer-wmi.c
+++ b/drivers/platform/x86/acer-wmi.c
@@ -1816,11 +1816,24 @@
return status;
}
+#define ACER_WMID_ACCEL_HID "BST0001"
+
static acpi_status __init acer_wmi_get_handle_cb(acpi_handle ah, u32 level,
void *ctx, void **retval)
{
+ struct acpi_device *dev;
+
+ if (!strcmp(ctx, "SENR")) {
+ if (acpi_bus_get_device(ah, &dev))
+ return AE_OK;
+ if (!strcmp(ACER_WMID_ACCEL_HID, acpi_device_hid(dev)))
+ return AE_OK;
+ } else
+ return AE_OK;
+
*(acpi_handle *)retval = ah;
- return AE_OK;
+
+ return AE_CTRL_TERMINATE;
}
static int __init acer_wmi_get_handle(const char *name, const char *prop,
@@ -1847,7 +1860,7 @@
{
int err;
- err = acer_wmi_get_handle("SENR", "BST0001", &gsensor_handle);
+ err = acer_wmi_get_handle("SENR", ACER_WMID_ACCEL_HID, &gsensor_handle);
if (err)
return err;
@@ -2185,10 +2198,11 @@
err = acer_wmi_input_setup();
if (err)
return err;
+ err = acer_wmi_accel_setup();
+ if (err)
+ return err;
}
- acer_wmi_accel_setup();
-
err = platform_driver_register(&acer_platform_driver);
if (err) {
pr_err("Unable to register platform driver\n");
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index be3bc2f..09cc64b 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -807,6 +807,7 @@
case 11:
case 7:
case 6:
+ case 1:
ideapad_input_report(priv, vpc_bit);
break;
case 5:
diff --git a/drivers/power/bq24190_charger.c b/drivers/power/bq24190_charger.c
index f5746b9..f05d277 100644
--- a/drivers/power/bq24190_charger.c
+++ b/drivers/power/bq24190_charger.c
@@ -144,10 +144,7 @@
* so the first read after a fault returns the latched value and subsequent
* reads return the current value. In order to return the fault status
* to the user, have the interrupt handler save the reg's value and retrieve
- * it in the appropriate health/status routine. Each routine has its own
- * flag indicating whether it should use the value stored by the last run
- * of the interrupt handler or do an actual reg read. That way each routine
- * can report back whatever fault may have occured.
+ * it in the appropriate health/status routine.
*/
struct bq24190_dev_info {
struct i2c_client *client;
@@ -159,10 +156,6 @@
unsigned int gpio_int;
unsigned int irq;
struct mutex f_reg_lock;
- bool first_time;
- bool charger_health_valid;
- bool battery_health_valid;
- bool battery_status_valid;
u8 f_reg;
u8 ss_reg;
u8 watchdog;
@@ -636,21 +629,11 @@
union power_supply_propval *val)
{
u8 v;
- int health, ret;
+ int health;
mutex_lock(&bdi->f_reg_lock);
-
- if (bdi->charger_health_valid) {
- v = bdi->f_reg;
- bdi->charger_health_valid = false;
- mutex_unlock(&bdi->f_reg_lock);
- } else {
- mutex_unlock(&bdi->f_reg_lock);
-
- ret = bq24190_read(bdi, BQ24190_REG_F, &v);
- if (ret < 0)
- return ret;
- }
+ v = bdi->f_reg;
+ mutex_unlock(&bdi->f_reg_lock);
if (v & BQ24190_REG_F_BOOST_FAULT_MASK) {
/*
@@ -937,18 +920,8 @@
int status, ret;
mutex_lock(&bdi->f_reg_lock);
-
- if (bdi->battery_status_valid) {
- chrg_fault = bdi->f_reg;
- bdi->battery_status_valid = false;
- mutex_unlock(&bdi->f_reg_lock);
- } else {
- mutex_unlock(&bdi->f_reg_lock);
-
- ret = bq24190_read(bdi, BQ24190_REG_F, &chrg_fault);
- if (ret < 0)
- return ret;
- }
+ chrg_fault = bdi->f_reg;
+ mutex_unlock(&bdi->f_reg_lock);
chrg_fault &= BQ24190_REG_F_CHRG_FAULT_MASK;
chrg_fault >>= BQ24190_REG_F_CHRG_FAULT_SHIFT;
@@ -996,21 +969,11 @@
union power_supply_propval *val)
{
u8 v;
- int health, ret;
+ int health;
mutex_lock(&bdi->f_reg_lock);
-
- if (bdi->battery_health_valid) {
- v = bdi->f_reg;
- bdi->battery_health_valid = false;
- mutex_unlock(&bdi->f_reg_lock);
- } else {
- mutex_unlock(&bdi->f_reg_lock);
-
- ret = bq24190_read(bdi, BQ24190_REG_F, &v);
- if (ret < 0)
- return ret;
- }
+ v = bdi->f_reg;
+ mutex_unlock(&bdi->f_reg_lock);
if (v & BQ24190_REG_F_BAT_FAULT_MASK) {
health = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
@@ -1197,9 +1160,12 @@
static irqreturn_t bq24190_irq_handler_thread(int irq, void *data)
{
struct bq24190_dev_info *bdi = data;
- bool alert_userspace = false;
+ const u8 battery_mask_ss = BQ24190_REG_SS_CHRG_STAT_MASK;
+ const u8 battery_mask_f = BQ24190_REG_F_BAT_FAULT_MASK
+ | BQ24190_REG_F_NTC_FAULT_MASK;
+ bool alert_charger = false, alert_battery = false;
u8 ss_reg = 0, f_reg = 0;
- int ret;
+ int i, ret;
pm_runtime_get_sync(bdi->dev);
@@ -1209,6 +1175,32 @@
goto out;
}
+ i = 0;
+ do {
+ ret = bq24190_read(bdi, BQ24190_REG_F, &f_reg);
+ if (ret < 0) {
+ dev_err(bdi->dev, "Can't read F reg: %d\n", ret);
+ goto out;
+ }
+ } while (f_reg && ++i < 2);
+
+ if (f_reg != bdi->f_reg) {
+ dev_info(bdi->dev,
+ "Fault: boost %d, charge %d, battery %d, ntc %d\n",
+ !!(f_reg & BQ24190_REG_F_BOOST_FAULT_MASK),
+ !!(f_reg & BQ24190_REG_F_CHRG_FAULT_MASK),
+ !!(f_reg & BQ24190_REG_F_BAT_FAULT_MASK),
+ !!(f_reg & BQ24190_REG_F_NTC_FAULT_MASK));
+
+ mutex_lock(&bdi->f_reg_lock);
+ if ((bdi->f_reg & battery_mask_f) != (f_reg & battery_mask_f))
+ alert_battery = true;
+ if ((bdi->f_reg & ~battery_mask_f) != (f_reg & ~battery_mask_f))
+ alert_charger = true;
+ bdi->f_reg = f_reg;
+ mutex_unlock(&bdi->f_reg_lock);
+ }
+
if (ss_reg != bdi->ss_reg) {
/*
* The device is in host mode so when PG_STAT goes from 1->0
@@ -1225,47 +1217,17 @@
ret);
}
+ if ((bdi->ss_reg & battery_mask_ss) != (ss_reg & battery_mask_ss))
+ alert_battery = true;
+ if ((bdi->ss_reg & ~battery_mask_ss) != (ss_reg & ~battery_mask_ss))
+ alert_charger = true;
bdi->ss_reg = ss_reg;
- alert_userspace = true;
}
- mutex_lock(&bdi->f_reg_lock);
-
- ret = bq24190_read(bdi, BQ24190_REG_F, &f_reg);
- if (ret < 0) {
- mutex_unlock(&bdi->f_reg_lock);
- dev_err(bdi->dev, "Can't read F reg: %d\n", ret);
- goto out;
- }
-
- if (f_reg != bdi->f_reg) {
- bdi->f_reg = f_reg;
- bdi->charger_health_valid = true;
- bdi->battery_health_valid = true;
- bdi->battery_status_valid = true;
-
- alert_userspace = true;
- }
-
- mutex_unlock(&bdi->f_reg_lock);
-
- /*
- * Sometimes bq24190 gives a steady trickle of interrupts even
- * though the watchdog timer is turned off and neither the STATUS
- * nor FAULT registers have changed. Weed out these sprurious
- * interrupts so userspace isn't alerted for no reason.
- * In addition, the chip always generates an interrupt after
- * register reset so we should ignore that one (the very first
- * interrupt received).
- */
- if (alert_userspace) {
- if (!bdi->first_time) {
- power_supply_changed(bdi->charger);
- power_supply_changed(bdi->battery);
- } else {
- bdi->first_time = false;
- }
- }
+ if (alert_charger)
+ power_supply_changed(bdi->charger);
+ if (alert_battery)
+ power_supply_changed(bdi->battery);
out:
pm_runtime_put_sync(bdi->dev);
@@ -1300,6 +1262,10 @@
goto out;
ret = bq24190_set_mode_host(bdi);
+ if (ret < 0)
+ goto out;
+
+ ret = bq24190_read(bdi, BQ24190_REG_SS, &bdi->ss_reg);
out:
pm_runtime_put_sync(bdi->dev);
return ret;
@@ -1375,10 +1341,8 @@
bdi->model = id->driver_data;
strncpy(bdi->model_name, id->name, I2C_NAME_SIZE);
mutex_init(&bdi->f_reg_lock);
- bdi->first_time = true;
- bdi->charger_health_valid = false;
- bdi->battery_health_valid = false;
- bdi->battery_status_valid = false;
+ bdi->f_reg = 0;
+ bdi->ss_reg = BQ24190_REG_SS_VBUS_STAT_MASK; /* impossible state */
i2c_set_clientdata(client, bdi);
@@ -1392,22 +1356,13 @@
return -EINVAL;
}
- ret = devm_request_threaded_irq(dev, bdi->irq, NULL,
- bq24190_irq_handler_thread,
- IRQF_TRIGGER_RISING | IRQF_ONESHOT,
- "bq24190-charger", bdi);
- if (ret < 0) {
- dev_err(dev, "Can't set up irq handler\n");
- goto out1;
- }
-
pm_runtime_enable(dev);
pm_runtime_resume(dev);
ret = bq24190_hw_init(bdi);
if (ret < 0) {
dev_err(dev, "Hardware init failed\n");
- goto out2;
+ goto out1;
}
charger_cfg.drv_data = bdi;
@@ -1418,7 +1373,7 @@
if (IS_ERR(bdi->charger)) {
dev_err(dev, "Can't register charger\n");
ret = PTR_ERR(bdi->charger);
- goto out2;
+ goto out1;
}
battery_cfg.drv_data = bdi;
@@ -1427,24 +1382,34 @@
if (IS_ERR(bdi->battery)) {
dev_err(dev, "Can't register battery\n");
ret = PTR_ERR(bdi->battery);
- goto out3;
+ goto out2;
}
ret = bq24190_sysfs_create_group(bdi);
if (ret) {
dev_err(dev, "Can't create sysfs entries\n");
+ goto out3;
+ }
+
+ ret = devm_request_threaded_irq(dev, bdi->irq, NULL,
+ bq24190_irq_handler_thread,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ "bq24190-charger", bdi);
+ if (ret < 0) {
+ dev_err(dev, "Can't set up irq handler\n");
goto out4;
}
return 0;
out4:
- power_supply_unregister(bdi->battery);
+ bq24190_sysfs_remove_group(bdi);
out3:
- power_supply_unregister(bdi->charger);
+ power_supply_unregister(bdi->battery);
out2:
- pm_runtime_disable(dev);
+ power_supply_unregister(bdi->charger);
out1:
+ pm_runtime_disable(dev);
if (bdi->gpio_int)
gpio_free(bdi->gpio_int);
@@ -1488,12 +1453,13 @@
struct i2c_client *client = to_i2c_client(dev);
struct bq24190_dev_info *bdi = i2c_get_clientdata(client);
- bdi->charger_health_valid = false;
- bdi->battery_health_valid = false;
- bdi->battery_status_valid = false;
+ bdi->f_reg = 0;
+ bdi->ss_reg = BQ24190_REG_SS_VBUS_STAT_MASK; /* impossible state */
pm_runtime_get_sync(bdi->dev);
bq24190_register_reset(bdi);
+ bq24190_set_mode_host(bdi);
+ bq24190_read(bdi, BQ24190_REG_SS, &bdi->ss_reg);
pm_runtime_put_sync(bdi->dev);
/* Things may have changed while suspended so alert upper layer */
diff --git a/drivers/power/htc_battery.c b/drivers/power/htc_battery.c
index af5fc2a..cf24526 100644
--- a/drivers/power/htc_battery.c
+++ b/drivers/power/htc_battery.c
@@ -9,7 +9,6 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
-#include <linux/alarmtimer.h>
#include <linux/delay.h>
#include <linux/fb.h>
#include <linux/gpio.h>
@@ -28,8 +27,14 @@
#define HTC_BATT_NAME "htc_battery"
-/* mutex for static data htc_batt_info and htc_batt_timer */
-static struct mutex htc_battery_lock;
+/*
+ * Protects access to the following static data:
+ *
+ * htc_batt_info
+ * htc_batt_timer
+ * g_htc_battery_probe_done
+ */
+DEFINE_MUTEX(htc_battery_lock);
static int full_level_dis_chg = 100;
module_param_named(
@@ -134,7 +139,6 @@
struct timer_list batt_timer;
struct workqueue_struct *batt_wq;
unsigned int time_out;
- struct alarm batt_check_wakeup_alarm;
};
static struct htc_battery_info htc_batt_info;
@@ -470,6 +474,11 @@
return ibat_ma * 1000;
}
+/*
+ * Accesses htc_batt_timer and g_htc_battery_probe_done.
+ *
+ * Caller must hold htc_battery_lock.
+ */
int htc_batt_schedule_batt_info_update(void)
{
if (!g_htc_battery_probe_done)
@@ -786,13 +795,6 @@
if (!htc_batt_info.usb_psy)
return -EPROBE_DEFER;
- htc_batt_info.htc_batt_cb.notifier_call = htc_notifier_batt_callback;
- rc = power_supply_reg_notifier(&htc_batt_info.htc_batt_cb);
- if (rc < 0) {
- BATT_ERR("Couldn't register psy notifier rc = %d\n", rc);
- return rc;
- }
-
rc = power_supply_get_property(htc_batt_info.bms_psy,
POWER_SUPPLY_PROP_BATTERY_TYPE, &ret);
if (rc < 0) {
@@ -906,18 +908,19 @@
return 0;
}
+/*
+ * htc_batt_timer.batt_timer can only be started after htc_batt_timer.batt_wq
+ * is initialized and g_htc_battery_probe_done is set to true, so the timer
+ * callback is properly synchronized with code that acquires htc_battery_lock.
+ *
+ * This timer callback *cannot* acquire htc_battery_lock because otherwise it
+ * will deadlock with the del_timer_sync() in batt_worker().
+ */
static void batt_regular_timer_handler(unsigned long data)
{
htc_batt_schedule_batt_info_update();
}
-static enum alarmtimer_restart
-batt_check_alarm_handler(struct alarm *alarm, ktime_t time)
-{
- /* BATT_LOG("alarm handler, but do nothing."); */
- return 0;
-}
-
/* accesses htc_batt_info, needs htc_battery_lock */
static int htc_battery_fb_register(void)
{
@@ -937,31 +940,46 @@
{
int rc = 0;
- mutex_init(&htc_battery_lock);
mutex_lock(&htc_battery_lock);
- INIT_WORK(&htc_batt_timer.batt_work, batt_worker);
- INIT_WORK(&htc_batt_info.batt_update_work, htc_battery_update_work);
- init_timer(&htc_batt_timer.batt_timer);
- htc_batt_timer.batt_timer.function = batt_regular_timer_handler;
- alarm_init(&htc_batt_timer.batt_check_wakeup_alarm, ALARM_REALTIME,
- batt_check_alarm_handler);
- htc_batt_timer.batt_wq = create_singlethread_workqueue("batt_timer");
-
- htc_batt_timer.time_out = BATT_TIMER_UPDATE_TIME;
- batt_set_check_timer(htc_batt_timer.time_out);
-
rc = htc_battery_probe_process();
if (rc < 0)
- goto done;
+ goto err_unlock;
+
+ INIT_WORK(&htc_batt_timer.batt_work, batt_worker);
+ INIT_WORK(&htc_batt_info.batt_update_work, htc_battery_update_work);
+ setup_timer(&htc_batt_timer.batt_timer, batt_regular_timer_handler, 0);
+ htc_batt_timer.batt_wq = create_singlethread_workqueue("batt_timer");
+ if (!htc_batt_timer.batt_wq) {
+ BATT_LOG("%s: create_singlethread_workqueue failed.\n",
+ __func__);
+ goto err_unlock;
+ }
+
+ htc_batt_timer.time_out = BATT_TIMER_UPDATE_TIME;
+
+ htc_batt_info.htc_batt_cb.notifier_call = htc_notifier_batt_callback;
+ rc = power_supply_reg_notifier(&htc_batt_info.htc_batt_cb);
+ if (rc < 0) {
+ BATT_LOG("%s: power_supply_reg_notifier failed.\n", __func__);
+ goto err_destroy_workqueue;
+ }
rc = htc_battery_fb_register();
- if (rc < 0)
- goto done;
+ if (rc < 0) {
+ BATT_LOG("%s: htc_battery_fb_register failed.\n", __func__);
+ goto err_power_supply_unreg_notifier;
+ }
g_htc_battery_probe_done = true;
+ batt_set_check_timer(htc_batt_timer.time_out);
+ goto err_unlock;
-done:
+err_power_supply_unreg_notifier:
+ power_supply_unreg_notifier(&htc_batt_info.htc_batt_cb);
+err_destroy_workqueue:
+ destroy_workqueue(htc_batt_timer.batt_wq);
+err_unlock:
mutex_unlock(&htc_battery_lock);
return rc;
}
@@ -981,6 +999,8 @@
static int __init htc_battery_init(void)
{
+ int ret;
+
wake_lock_init(&htc_batt_info.charger_exist_lock,
WAKE_LOCK_SUSPEND, "charger_exist_lock");
@@ -1010,8 +1030,17 @@
htc_batt_info.batt_thermal_limit_vol = 4200;
htc_batt_info.vbus = 0;
- platform_device_register(&htc_battery_pdev);
- platform_driver_register(&htc_battery_driver);
+ ret = platform_device_register(&htc_battery_pdev);
+ if (ret < 0) {
+ BATT_LOG("%s: device registration failed.\n", __func__);
+ return ret;
+ }
+
+ ret = platform_driver_register(&htc_battery_driver);
+ if (ret < 0) {
+ BATT_LOG("%s: driver registration failed.\n", __func__);
+ return ret;
+ }
BATT_LOG("%s done.\n", __func__);
@@ -1022,6 +1051,7 @@
{
platform_device_unregister(&htc_battery_pdev);
platform_driver_unregister(&htc_battery_driver);
+ BATT_LOG("%s done.\n", __func__);
}
module_init(htc_battery_init);
diff --git a/drivers/power/lge_battery.c b/drivers/power/lge_battery.c
index ab43355..137efc6 100644
--- a/drivers/power/lge_battery.c
+++ b/drivers/power/lge_battery.c
@@ -33,9 +33,7 @@
#define NORM_VOLT 4400000
#define LIM_VOLT 4100000
#define PARALLEL_VOLT 4450000
-#define SC_VOLT 4200000
#define CHG_CURRENT_MAX 3550000
-#define SC_CURRENT 2400000
#define LCD_ON_CURRENT 1000000
#define WATCH_DELAY 30000
#define DEMO_MODE_MAX 35
@@ -69,9 +67,16 @@
BM_HEALTH_MAX,
};
+enum bm_batt_maker {
+ BM_BATT_LGC,
+ BM_BATT_TOCAD,
+ BM_BATT_MAX,
+};
+
struct battery_manager {
struct device *dev;
struct power_supply *batt_psy;
+ struct power_supply *bms_psy;
struct power_supply *usb_psy;
struct power_supply *pl_psy;
struct notifier_block ps_nb;
@@ -84,6 +89,7 @@
struct mutex work_lock;
enum bm_therm_states therm_stat;
+ int batt_id;
int chg_present;
int chg_status;
int batt_soc;
@@ -93,8 +99,8 @@
int demo_iusb;
int demo_ibat;
int demo_enable;
+ int sc_status;
bool bm_active;
- bool sc_status;
};
struct bm_therm_table {
@@ -120,6 +126,34 @@
-EINVAL,
};
+struct bm_step_table {
+ int cur;
+ int volt;
+};
+
+static struct bm_step_table LGC_step_table[] = {
+ { -EINVAL, 4200000},
+ { 2400000, INT_MAX},
+};
+
+static struct bm_step_table TOCAD_step_table[] = {
+ { -EINVAL, 4200000},
+ { 3000000, 4350000},
+ { 2400000, INT_MAX},
+};
+
+struct bm_batt_id_table {
+ int min;
+ int max;
+ int step_max;
+ struct bm_step_table *step_table;
+};
+
+static struct bm_batt_id_table valid_batt_id[BM_BATT_MAX] = {
+ { 16000, 24000, 1, LGC_step_table},
+ { 44800, 67200, 2, TOCAD_step_table},
+};
+
static int debug_mask = ERROR | INTERRUPT | MISC | VERBOSE;
static int demo_mode;
@@ -324,28 +358,36 @@
void bm_check_step_charging(struct battery_manager *bm, int volt)
{
- int rc = 0;
+ int rc, stat;
- if (!bm->bm_active) {
- if (bm->sc_status) {
- rc = bm_vote_fcc(bm, BM_REASON_STEP, -EINVAL);
- if (rc < 0) {
- pr_bm(ERROR,
- "Couldn't set ibat curr rc=%d\n", rc);
- return;
- }
- bm->sc_status = false;
+ if (!bm->bm_active && bm->sc_status) {
+ rc = bm_vote_fcc(bm, BM_REASON_STEP, -EINVAL);
+ if (rc < 0) {
+ pr_bm(ERROR,
+ "Couldn't set ibat curr rc=%d\n", rc);
+ return;
}
+ bm->sc_status = 0;
return;
}
- if (!bm->sc_status && volt >= SC_VOLT) {
- rc = bm_vote_fcc(bm, BM_REASON_STEP, SC_CURRENT);
+ for (stat = bm->sc_status;
+ stat < valid_batt_id[bm->batt_id].step_max; stat++) {
+ if (volt < valid_batt_id[bm->batt_id].step_table[stat].volt)
+ break;
+ }
+
+ if (bm->sc_status != stat) {
+ pr_bm(MISC, "STATE[%d->%d] CUR[%d] VOL[%d]\n",
+ bm->sc_status, stat,
+ valid_batt_id[bm->batt_id].step_table[stat].cur, volt);
+ rc = bm_vote_fcc(bm, BM_REASON_STEP,
+ valid_batt_id[bm->batt_id].step_table[stat].cur);
if (rc < 0) {
pr_bm(ERROR, "Couldn't set ibat curr rc=%d\n", rc);
return;
}
- bm->sc_status = true;
+ bm->sc_status = stat;
}
}
@@ -386,6 +428,7 @@
struct battery_manager,
bm_watch.work);
int rc, batt_volt, batt_temp, ibat_max = 0;
+ int batt_ocv, batt_res, charge_counter, charge_raw;
mutex_lock(&bm->work_lock);
@@ -413,10 +456,40 @@
POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
&ibat_max);
+ rc = bm_get_property(bm->bms_psy,
+ POWER_SUPPLY_PROP_VOLTAGE_OCV, &batt_ocv);
+ if (rc < 0) {
+ pr_bm(ERROR, "Couldn't get BATT_OCV = %d\n", rc);
+ goto error;
+ }
+
+ rc = bm_get_property(bm->bms_psy,
+ POWER_SUPPLY_PROP_RESISTANCE, &batt_res);
+ if (rc < 0) {
+ pr_bm(ERROR, "Couldn't get BATT_RES = %d\n", rc);
+ goto error;
+ }
+
+ rc = bm_get_property(bm->bms_psy,
+ POWER_SUPPLY_PROP_CHARGE_COUNTER, &charge_counter);
+ if (rc < 0) {
+ pr_bm(ERROR, "Couldn't get CHARGE_COUNTER = %d\n", rc);
+ goto error;
+ }
+
+ rc = bm_get_property(bm->bms_psy,
+ POWER_SUPPLY_PROP_CHARGE_FULL, &charge_raw);
+ if (rc < 0) {
+ pr_bm(ERROR, "Couldn't get CHARGE_RAW = %d\n", rc);
+ goto error;
+ }
+
pr_bm(VERBOSE, "PRESENT:%d, CHG_STAT:%d, THM_STAT:%d, " \
- "BAT_TEMP:%d, BAT_VOLT:%d, VOTE_CUR:%d, SET_CUR:%d,\n",
- bm->chg_present, bm->chg_status, bm->therm_stat,
- batt_temp, batt_volt, bm_vote_fcc_get(bm), ibat_max);
+ "BAT_TEMP:%d, BAT_VOLT:%d, BAT_OCV:%d, VOTE_CUR:%d, " \
+ "SET_CUR:%d, BATT_RES:%d, BATT_CC:%d, BATT_CAPA:%d \n",
+ bm->chg_present, bm->chg_status, bm->therm_stat,
+ batt_temp, batt_volt, batt_ocv, bm_vote_fcc_get(bm),
+ ibat_max, batt_res, charge_counter, charge_raw);
error:
mutex_unlock(&bm->work_lock);
@@ -586,9 +659,10 @@
static int bm_init(struct battery_manager *bm)
{
- int rc, batt_temp, batt_volt = 0;
+ int i, rc, batt_temp, batt_volt, batt_id = 0;
bm->fb_state = 0;
+ bm->sc_status = 0;
bm->therm_stat = BM_HEALTH_GOOD;
bm->bm_vote_fcc_reason = -EINVAL;
bm->bm_vote_fcc_value = -EINVAL;
@@ -611,6 +685,12 @@
return -ENODEV;
}
+ bm->bms_psy = power_supply_get_by_name("bms");
+ if (!bm->bms_psy) {
+ pr_bm(ERROR, "Couldn't get bms_psy\n");
+ return -ENODEV;
+ }
+
rc = bm_get_property(bm->batt_psy,
POWER_SUPPLY_PROP_STATUS, &bm->chg_status);
if (rc < 0)
@@ -636,6 +716,23 @@
if (rc < 0)
bm->chg_present = 0;
+ rc = bm_get_property(bm->bms_psy,
+ POWER_SUPPLY_PROP_RESISTANCE_ID, &batt_id);
+ if (rc < 0) {
+ bm->batt_id = BM_BATT_TOCAD;
+ } else {
+ for (i = 0; i < BM_BATT_MAX; i++) {
+ if (valid_batt_id[i].min <= batt_id &&
+ valid_batt_id[i].max >= batt_id)
+ break;
+ }
+ if (i == BM_BATT_MAX) {
+ pr_bm(ERROR, "Couldn't get valid battery id\n");
+ return -EINVAL;
+ }
+ bm->batt_id = i;
+ }
+
if (bm->chg_present) {
bm->demo_iusb = 1;
bm->demo_ibat = 1;
diff --git a/drivers/power/reset/at91-poweroff.c b/drivers/power/reset/at91-poweroff.c
index e9e24df..2579f02 100644
--- a/drivers/power/reset/at91-poweroff.c
+++ b/drivers/power/reset/at91-poweroff.c
@@ -14,9 +14,12 @@
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_address.h>
#include <linux/platform_device.h>
#include <linux/printk.h>
+#include <soc/at91/at91sam9_ddrsdr.h>
+
#define AT91_SHDW_CR 0x00 /* Shut Down Control Register */
#define AT91_SHDW_SHDW BIT(0) /* Shut Down command */
#define AT91_SHDW_KEY (0xa5 << 24) /* KEY Password */
@@ -50,6 +53,7 @@
static void __iomem *at91_shdwc_base;
static struct clk *sclk;
+static void __iomem *mpddrc_base;
static void __init at91_wakeup_status(void)
{
@@ -73,6 +77,29 @@
writel(AT91_SHDW_KEY | AT91_SHDW_SHDW, at91_shdwc_base + AT91_SHDW_CR);
}
+static void at91_lpddr_poweroff(void)
+{
+ asm volatile(
+ /* Align to cache lines */
+ ".balign 32\n\t"
+
+ /* Ensure AT91_SHDW_CR is in the TLB by reading it */
+ " ldr r6, [%2, #" __stringify(AT91_SHDW_CR) "]\n\t"
+
+ /* Power down SDRAM0 */
+ " str %1, [%0, #" __stringify(AT91_DDRSDRC_LPR) "]\n\t"
+ /* Shutdown CPU */
+ " str %3, [%2, #" __stringify(AT91_SHDW_CR) "]\n\t"
+
+ " b .\n\t"
+ :
+ : "r" (mpddrc_base),
+ "r" cpu_to_le32(AT91_DDRSDRC_LPDDR2_PWOFF),
+ "r" (at91_shdwc_base),
+ "r" cpu_to_le32(AT91_SHDW_KEY | AT91_SHDW_SHDW)
+ : "r0");
+}
+
static int at91_poweroff_get_wakeup_mode(struct device_node *np)
{
const char *pm;
@@ -124,6 +151,8 @@
static int __init at91_poweroff_probe(struct platform_device *pdev)
{
struct resource *res;
+ struct device_node *np;
+ u32 ddr_type;
int ret;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -150,12 +179,30 @@
pm_power_off = at91_poweroff;
+ np = of_find_compatible_node(NULL, NULL, "atmel,sama5d3-ddramc");
+ if (!np)
+ return 0;
+
+ mpddrc_base = of_iomap(np, 0);
+ of_node_put(np);
+
+ if (!mpddrc_base)
+ return 0;
+
+ ddr_type = readl(mpddrc_base + AT91_DDRSDRC_MDR) & AT91_DDRSDRC_MD;
+ if ((ddr_type == AT91_DDRSDRC_MD_LPDDR2) ||
+ (ddr_type == AT91_DDRSDRC_MD_LPDDR3))
+ pm_power_off = at91_lpddr_poweroff;
+ else
+ iounmap(mpddrc_base);
+
return 0;
}
static int __exit at91_poweroff_remove(struct platform_device *pdev)
{
- if (pm_power_off == at91_poweroff)
+ if (pm_power_off == at91_poweroff ||
+ pm_power_off == at91_lpddr_poweroff)
pm_power_off = NULL;
clk_disable_unprepare(sclk);
@@ -163,6 +210,11 @@
return 0;
}
+static const struct of_device_id at91_ramc_of_match[] = {
+ { .compatible = "atmel,sama5d3-ddramc", },
+ { /* sentinel */ }
+};
+
static const struct of_device_id at91_poweroff_of_match[] = {
{ .compatible = "atmel,at91sam9260-shdwc", },
{ .compatible = "atmel,at91sam9rl-shdwc", },
diff --git a/drivers/power/supply/qcom/fg-core.h b/drivers/power/supply/qcom/fg-core.h
index 9f36d4e..5af5f8e 100644
--- a/drivers/power/supply/qcom/fg-core.h
+++ b/drivers/power/supply/qcom/fg-core.h
@@ -232,6 +232,7 @@
struct fg_dt_props {
bool force_load_profile;
bool hold_soc_while_full;
+ bool linearize_soc;
bool auto_recharge_soc;
int cutoff_volt_mv;
int empty_volt_mv;
@@ -266,6 +267,7 @@
int esr_pulse_thresh_ma;
int esr_meas_curr_ma;
int fg_comp_factor;
+ int ki_coeff_full_soc_dischg;
int jeita_thresholds[NUM_JEITA_LEVELS];
int ki_coeff_soc[KI_COEFF_SOC_LEVELS];
int ki_coeff_med_dischg[KI_COEFF_SOC_LEVELS];
diff --git a/drivers/power/supply/qcom/qpnp-fg-gen3.c b/drivers/power/supply/qcom/qpnp-fg-gen3.c
index ff3d97f..9119387 100644
--- a/drivers/power/supply/qcom/qpnp-fg-gen3.c
+++ b/drivers/power/supply/qcom/qpnp-fg-gen3.c
@@ -566,6 +566,21 @@
return 0;
}
+#define BATT_SOC_32BIT GENMASK(31, 0)
+static int fg_get_charge_counter_shadow(struct fg_chip *chip, int *val)
+{
+ int rc, batt_soc;
+
+ rc = fg_get_sram_prop(chip, FG_SRAM_BATT_SOC, &batt_soc);
+ if (rc < 0) {
+ pr_err("Error in getting BATT_SOC, rc=%d\n", rc);
+ return rc;
+ }
+
+ *val = div_u64((u32)batt_soc * chip->cl.learned_cc_uah, BATT_SOC_32BIT);
+ return 0;
+}
+
static int fg_get_charge_counter(struct fg_chip *chip, int *val)
{
int rc, cc_soc;
@@ -965,7 +980,7 @@
if (rc < 0)
return rc;
- if (chip->delta_soc > 0)
+ if (chip->dt.linearize_soc && chip->delta_soc > 0)
*val = chip->maint_soc;
else
*val = msoc;
@@ -1042,6 +1057,7 @@
struct device_node *batt_node, *profile_node;
const char *data;
int rc, len;
+ u32 temp;
batt_node = of_find_node_by_name(node, "qcom,battery-data");
if (!batt_node) {
@@ -1087,6 +1103,14 @@
chip->bp.vbatt_full_mv = -EINVAL;
}
+ rc = of_property_read_u32(profile_node, "qcom,fg-chg-term-current", &temp);
+ if (rc >= 0)
+ chip->dt.chg_term_curr_ma = temp;
+
+ rc = of_property_read_u32(profile_node, "qcom,fg-sys-term-current", &temp);
+ if (rc >= 0)
+ chip->dt.sys_term_curr_ma = temp;
+
data = of_get_property(profile_node, "qcom,fg-profile-data", &len);
if (!data) {
pr_err("No profile data available\n");
@@ -1291,6 +1315,43 @@
return true;
}
+static void fg_prime_cc_soc_sw(struct fg_chip *chip)
+{
+ int rc, batt_soc, cc_soc_sw = 0;
+
+ /* If capacity learning is active, don't do anything */
+ if (chip->cl.active)
+ return;
+
+ /* Do nothing if charge_status doesn't change */
+ if (chip->charge_status == chip->prev_charge_status)
+ return;
+
+ /* Prime cc_soc_sw based off charging status */
+ if (chip->charge_status == POWER_SUPPLY_STATUS_DISCHARGING) {
+ rc = fg_get_sram_prop(chip, FG_SRAM_BATT_SOC, &batt_soc);
+ if (rc < 0) {
+ pr_err("Error in getting BATT_SOC, rc=%d\n", rc);
+ return;
+ }
+ cc_soc_sw = div_u64((u32)batt_soc * CC_SOC_30BIT,
+ BATT_SOC_32BIT);
+ } else if (chip->charge_done) {
+ cc_soc_sw = CC_SOC_30BIT;
+ }
+
+ if (!cc_soc_sw)
+ return;
+
+ rc = fg_sram_write(chip, chip->sp[FG_SRAM_CC_SOC_SW].addr_word,
+ chip->sp[FG_SRAM_CC_SOC_SW].addr_byte, (u8 *)&cc_soc_sw,
+ chip->sp[FG_SRAM_CC_SOC_SW].len, FG_IMA_ATOMIC);
+ if (rc < 0)
+ pr_err("Error in writing cc_soc_sw, rc=%d\n", rc);
+ else
+ fg_dbg(chip, FG_STATUS, "cc_soc_sw: %x\n", cc_soc_sw);
+}
+
static int fg_save_learned_cap_to_sram(struct fg_chip *chip)
{
int16_t cc_mah;
@@ -1467,7 +1528,6 @@
return 0;
}
-#define BATT_SOC_32BIT GENMASK(31, 0)
static int fg_cap_learning_begin(struct fg_chip *chip, u32 batt_soc)
{
int rc, cc_soc_sw, batt_soc_msb;
@@ -1664,6 +1724,8 @@
if (batt_temp < 0)
ki_coeff_full_soc = 0;
+ else if (chip->charge_status == POWER_SUPPLY_STATUS_DISCHARGING)
+ ki_coeff_full_soc = chip->dt.ki_coeff_full_soc_dischg;
else
ki_coeff_full_soc = KI_COEFF_FULL_SOC_DEFAULT;
@@ -1716,12 +1778,38 @@
return 0;
}
+static int fg_configure_full_soc(struct fg_chip *chip, int bsoc)
+{
+ int rc;
+ u8 full_soc[2] = {0xFF, 0xFF};
+
+ /*
+ * Once SOC masking condition is cleared, FULL_SOC and MONOTONIC_SOC
+ * needs to be updated to reflect the same. Write battery SOC to
+ * FULL_SOC and write a full value to MONOTONIC_SOC.
+ */
+ rc = fg_sram_write(chip, FULL_SOC_WORD, FULL_SOC_OFFSET,
+ (u8 *)&bsoc, 2, FG_IMA_ATOMIC);
+ if (rc < 0) {
+ pr_err("failed to write full_soc rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = fg_sram_write(chip, MONOTONIC_SOC_WORD, MONOTONIC_SOC_OFFSET,
+ full_soc, 2, FG_IMA_ATOMIC);
+ if (rc < 0) {
+ pr_err("failed to write monotonic_soc rc=%d\n", rc);
+ return rc;
+ }
+
+ return 0;
+}
+
#define AUTO_RECHG_VOLT_LOW_LIMIT_MV 3700
static int fg_charge_full_update(struct fg_chip *chip)
{
union power_supply_propval prop = {0, };
int rc, msoc, bsoc, recharge_soc, msoc_raw;
- u8 full_soc[2] = {0xFF, 0xFF};
if (!chip->dt.hold_soc_while_full)
return 0;
@@ -1756,7 +1844,12 @@
pr_err("Error in getting msoc, rc=%d\n", rc);
goto out;
}
- msoc_raw = DIV_ROUND_CLOSEST(msoc * FULL_SOC_RAW, FULL_CAPACITY);
+
+ rc = fg_get_msoc_raw(chip, &msoc_raw);
+ if (rc < 0) {
+ pr_err("Error in getting msoc_raw, rc=%d\n", rc);
+ goto out;
+ }
fg_dbg(chip, FG_STATUS, "msoc: %d bsoc: %x health: %d status: %d full: %d\n",
msoc, bsoc, chip->health, chip->charge_status,
@@ -1780,24 +1873,25 @@
fg_dbg(chip, FG_STATUS, "Terminated charging @ SOC%d\n",
msoc);
}
- } else if (msoc_raw <= recharge_soc && chip->charge_full) {
- chip->delta_soc = FULL_CAPACITY - msoc;
+ } else if ((msoc_raw <= recharge_soc || !chip->charge_done)
+ && chip->charge_full) {
+ if (chip->dt.linearize_soc) {
+ chip->delta_soc = FULL_CAPACITY - msoc;
- /*
- * We're spreading out the delta SOC over every 10% change
- * in monotonic SOC. We cannot spread more than 9% in the
- * range of 0-100 skipping the first 10%.
- */
- if (chip->delta_soc > 9) {
- chip->delta_soc = 0;
- chip->maint_soc = 0;
- } else {
- chip->maint_soc = FULL_CAPACITY;
- chip->last_msoc = msoc;
+ /*
+ * We're spreading out the delta SOC over every 10%
+ * change in monotonic SOC. We cannot spread more than
+ * 9% in the range of 0-100 skipping the first 10%.
+ */
+ if (chip->delta_soc > 9) {
+ chip->delta_soc = 0;
+ chip->maint_soc = 0;
+ } else {
+ chip->maint_soc = FULL_CAPACITY;
+ chip->last_msoc = msoc;
+ }
}
- chip->charge_full = false;
-
/*
* Raise the recharge voltage so that VBAT_LT_RECHG signal
* will be asserted soon as battery SOC had dropped below
@@ -1810,35 +1904,23 @@
rc);
goto out;
}
+
+ /*
+ * If charge_done is still set, wait for recharging or
+ * discharging to happen.
+ */
+ if (chip->charge_done)
+ goto out;
+
+ rc = fg_configure_full_soc(chip, bsoc);
+ if (rc < 0)
+ goto out;
+
+ chip->charge_full = false;
fg_dbg(chip, FG_STATUS, "msoc_raw = %d bsoc: %d recharge_soc: %d delta_soc: %d\n",
msoc_raw, bsoc >> 8, recharge_soc, chip->delta_soc);
- } else {
- goto out;
}
- if (!chip->charge_full)
- goto out;
-
- /*
- * During JEITA conditions, charge_full can happen early. FULL_SOC
- * and MONOTONIC_SOC needs to be updated to reflect the same. Write
- * battery SOC to FULL_SOC and write a full value to MONOTONIC_SOC.
- */
- rc = fg_sram_write(chip, FULL_SOC_WORD, FULL_SOC_OFFSET, (u8 *)&bsoc, 2,
- FG_IMA_ATOMIC);
- if (rc < 0) {
- pr_err("failed to write full_soc rc=%d\n", rc);
- goto out;
- }
-
- rc = fg_sram_write(chip, MONOTONIC_SOC_WORD, MONOTONIC_SOC_OFFSET,
- full_soc, 2, FG_IMA_ATOMIC);
- if (rc < 0) {
- pr_err("failed to write monotonic_soc rc=%d\n", rc);
- goto out;
- }
-
- fg_dbg(chip, FG_STATUS, "Set charge_full to true @ soc %d\n", msoc);
out:
mutex_unlock(&chip->charge_full_lock);
return rc;
@@ -2242,9 +2324,6 @@
static void fg_batt_avg_update(struct fg_chip *chip)
{
- if (chip->charge_status == chip->prev_charge_status)
- return;
-
cancel_delayed_work_sync(&chip->batt_avg_work);
fg_circ_buf_clr(&chip->ibatt_circ_buf);
fg_circ_buf_clr(&chip->vbatt_circ_buf);
@@ -2274,7 +2353,6 @@
goto out;
}
- chip->prev_charge_status = chip->charge_status;
chip->charge_status = prop.intval;
rc = power_supply_get_property(chip->batt_psy,
POWER_SUPPLY_PROP_CHARGE_TYPE, &prop);
@@ -2295,6 +2373,7 @@
if (chip->cyc_ctr.en)
schedule_work(&chip->cycle_count_work);
+ fg_prime_cc_soc_sw(chip);
fg_cap_learning_update(chip);
rc = fg_charge_full_update(chip);
@@ -2331,7 +2410,7 @@
}
fg_batt_avg_update(chip);
-
+ chip->prev_charge_status = chip->charge_status;
out:
fg_dbg(chip, FG_POWER_SUPPLY, "charge_status:%d charge_type:%d charge_done:%d\n",
chip->charge_status, chip->charge_type, chip->charge_done);
@@ -3054,6 +3133,9 @@
{
int rc = 0, msoc;
+ if (!chip->dt.linearize_soc)
+ return 0;
+
mutex_lock(&chip->charge_full_lock);
if (chip->delta_soc <= 0)
goto out;
@@ -3138,6 +3220,9 @@
case POWER_SUPPLY_PROP_CAPACITY:
rc = fg_get_prop_capacity(chip, &pval->intval);
break;
+ case POWER_SUPPLY_PROP_CAPACITY_RAW:
+ rc = fg_get_msoc_raw(chip, &pval->intval);
+ break;
case POWER_SUPPLY_PROP_VOLTAGE_NOW:
if (chip->battery_missing)
pval->intval = 3700000;
@@ -3186,6 +3271,9 @@
case POWER_SUPPLY_PROP_CHARGE_COUNTER:
rc = fg_get_charge_counter(chip, &pval->intval);
break;
+ case POWER_SUPPLY_PROP_CHARGE_COUNTER_SHADOW:
+ rc = fg_get_charge_counter_shadow(chip, &pval->intval);
+ break;
case POWER_SUPPLY_PROP_TIME_TO_FULL_AVG:
rc = fg_get_time_to_full(chip, &pval->intval);
break;
@@ -3286,6 +3374,7 @@
static enum power_supply_property fg_psy_props[] = {
POWER_SUPPLY_PROP_CAPACITY,
+ POWER_SUPPLY_PROP_CAPACITY_RAW,
POWER_SUPPLY_PROP_TEMP,
POWER_SUPPLY_PROP_VOLTAGE_NOW,
POWER_SUPPLY_PROP_VOLTAGE_OCV,
@@ -3301,6 +3390,7 @@
POWER_SUPPLY_PROP_CHARGE_NOW,
POWER_SUPPLY_PROP_CHARGE_FULL,
POWER_SUPPLY_PROP_CHARGE_COUNTER,
+ POWER_SUPPLY_PROP_CHARGE_COUNTER_SHADOW,
POWER_SUPPLY_PROP_TIME_TO_FULL_AVG,
POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG,
POWER_SUPPLY_PROP_SOC_REPORTING_READY,
@@ -4058,7 +4148,11 @@
static int fg_parse_ki_coefficients(struct fg_chip *chip)
{
struct device_node *node = chip->dev->of_node;
- int rc, i;
+ int rc, i, temp;
+
+ rc = of_property_read_u32(node, "qcom,ki-coeff-full-dischg", &temp);
+ if (!rc)
+ chip->dt.ki_coeff_full_soc_dischg = temp;
rc = fg_parse_dt_property_u32_array(node, "qcom,ki-coeff-soc-dischg",
chip->dt.ki_coeff_soc, KI_COEFF_SOC_LEVELS);
@@ -4403,6 +4497,9 @@
chip->dt.hold_soc_while_full = of_property_read_bool(node,
"qcom,hold-soc-while-full");
+ chip->dt.linearize_soc = of_property_read_bool(node,
+ "qcom,linearize-soc");
+
rc = fg_parse_ki_coefficients(chip);
if (rc < 0)
pr_err("Error in parsing Ki coefficients, rc=%d\n", rc);
@@ -4818,6 +4915,29 @@
return 0;
}
+static void fg_gen3_shutdown(struct platform_device *pdev)
+{
+ struct fg_chip *chip = dev_get_drvdata(&pdev->dev);
+ int rc, bsoc;
+
+ if (chip->charge_full) {
+ rc = fg_get_sram_prop(chip, FG_SRAM_BATT_SOC, &bsoc);
+ if (rc < 0) {
+ pr_err("Error in getting BATT_SOC, rc=%d\n", rc);
+ return;
+ }
+
+ /* We need 2 most significant bytes here */
+ bsoc = (u32)bsoc >> 16;
+
+ rc = fg_configure_full_soc(chip, bsoc);
+ if (rc < 0) {
+ pr_err("Error in configuring full_soc, rc=%d\n", rc);
+ return;
+ }
+ }
+}
+
static const struct of_device_id fg_gen3_match_table[] = {
{.compatible = FG_GEN3_DEV_NAME},
{},
@@ -4832,6 +4952,7 @@
},
.probe = fg_gen3_probe,
.remove = fg_gen3_remove,
+ .shutdown = fg_gen3_shutdown,
};
static int __init fg_gen3_init(void)
diff --git a/drivers/power/supply/qcom/smb138x-charger.c b/drivers/power/supply/qcom/smb138x-charger.c
index 403078b..cd81f9b 100644
--- a/drivers/power/supply/qcom/smb138x-charger.c
+++ b/drivers/power/supply/qcom/smb138x-charger.c
@@ -857,6 +857,13 @@
}
}
+ /* configure to a fixed 700khz freq to avoid tdie errors */
+ rc = smblib_set_charge_param(chg, &chg->param.freq_buck, 700);
+ if (rc < 0) {
+ pr_err("Couldn't configure 700Khz switch freq rc=%d\n", rc);
+ return rc;
+ }
+
/* enable watchdog bark and bite interrupts, and disable the watchdog */
rc = smblib_masked_write(chg, WD_CFG_REG, WDOG_TIMER_EN_BIT
| WDOG_TIMER_EN_ON_PLUGIN_BIT | BITE_WDOG_INT_EN_BIT
diff --git a/drivers/regulator/rpm-smd-regulator.c b/drivers/regulator/rpm-smd-regulator.c
index d26fd3b..dbee7f0 100644
--- a/drivers/regulator/rpm-smd-regulator.c
+++ b/drivers/regulator/rpm-smd-regulator.c
@@ -1691,6 +1691,9 @@
|= REGULATOR_CHANGE_MODE | REGULATOR_CHANGE_DRMS;
init_data->constraints.valid_modes_mask
|= REGULATOR_MODE_NORMAL | REGULATOR_MODE_IDLE;
+ if (regulator_type == RPM_REGULATOR_TYPE_BOB)
+ init_data->constraints.valid_modes_mask
+ |= REGULATOR_MODE_FAST;
}
reg->rdesc.name = init_data->constraints.name;
diff --git a/drivers/regulator/tps65023-regulator.c b/drivers/regulator/tps65023-regulator.c
index d2c3d7c..5ca6d21 100644
--- a/drivers/regulator/tps65023-regulator.c
+++ b/drivers/regulator/tps65023-regulator.c
@@ -311,8 +311,7 @@
/* Enable setting output voltage by I2C */
regmap_update_bits(tps->regmap, TPS65023_REG_CON_CTRL2,
- TPS65023_REG_CTRL2_CORE_ADJ,
- TPS65023_REG_CTRL2_CORE_ADJ);
+ TPS65023_REG_CTRL2_CORE_ADJ, 0);
return 0;
}
diff --git a/drivers/rtc/rtc-s35390a.c b/drivers/rtc/rtc-s35390a.c
index f40afdd0..00662dd 100644
--- a/drivers/rtc/rtc-s35390a.c
+++ b/drivers/rtc/rtc-s35390a.c
@@ -15,6 +15,7 @@
#include <linux/bitrev.h>
#include <linux/bcd.h>
#include <linux/slab.h>
+#include <linux/delay.h>
#define S35390A_CMD_STATUS1 0
#define S35390A_CMD_STATUS2 1
@@ -34,10 +35,14 @@
#define S35390A_ALRM_BYTE_HOURS 1
#define S35390A_ALRM_BYTE_MINS 2
+/* flags for STATUS1 */
#define S35390A_FLAG_POC 0x01
#define S35390A_FLAG_BLD 0x02
+#define S35390A_FLAG_INT2 0x04
#define S35390A_FLAG_24H 0x40
#define S35390A_FLAG_RESET 0x80
+
+/* flag for STATUS2 */
#define S35390A_FLAG_TEST 0x01
#define S35390A_INT2_MODE_MASK 0xF0
@@ -94,19 +99,63 @@
return 0;
}
-static int s35390a_reset(struct s35390a *s35390a)
+/*
+ * Returns <0 on error, 0 if rtc is setup fine and 1 if the chip was reset.
+ * To keep the information if an irq is pending, pass the value read from
+ * STATUS1 to the caller.
+ */
+static int s35390a_reset(struct s35390a *s35390a, char *status1)
{
- char buf[1];
+ char buf;
+ int ret;
+ unsigned initcount = 0;
- if (s35390a_get_reg(s35390a, S35390A_CMD_STATUS1, buf, sizeof(buf)) < 0)
- return -EIO;
+ ret = s35390a_get_reg(s35390a, S35390A_CMD_STATUS1, status1, 1);
+ if (ret < 0)
+ return ret;
- if (!(buf[0] & (S35390A_FLAG_POC | S35390A_FLAG_BLD)))
+ if (*status1 & S35390A_FLAG_POC)
+ /*
+ * Do not communicate for 0.5 seconds since the power-on
+ * detection circuit is in operation.
+ */
+ msleep(500);
+ else if (!(*status1 & S35390A_FLAG_BLD))
+ /*
+ * If both POC and BLD are unset everything is fine.
+ */
return 0;
- buf[0] |= (S35390A_FLAG_RESET | S35390A_FLAG_24H);
- buf[0] &= 0xf0;
- return s35390a_set_reg(s35390a, S35390A_CMD_STATUS1, buf, sizeof(buf));
+ /*
+ * At least one of POC and BLD are set, so reinitialise chip. Keeping
+ * this information in the hardware to know later that the time isn't
+ * valid is unfortunately not possible because POC and BLD are cleared
+ * on read. So the reset is best done now.
+ *
+ * The 24H bit is kept over reset, so set it already here.
+ */
+initialize:
+ *status1 = S35390A_FLAG_24H;
+ buf = S35390A_FLAG_RESET | S35390A_FLAG_24H;
+ ret = s35390a_set_reg(s35390a, S35390A_CMD_STATUS1, &buf, 1);
+
+ if (ret < 0)
+ return ret;
+
+ ret = s35390a_get_reg(s35390a, S35390A_CMD_STATUS1, &buf, 1);
+ if (ret < 0)
+ return ret;
+
+ if (buf & (S35390A_FLAG_POC | S35390A_FLAG_BLD)) {
+ /* Try up to five times to reset the chip */
+ if (initcount < 5) {
+ ++initcount;
+ goto initialize;
+ } else
+ return -EIO;
+ }
+
+ return 1;
}
static int s35390a_disable_test_mode(struct s35390a *s35390a)
@@ -242,6 +291,8 @@
if (alm->time.tm_wday != -1)
buf[S35390A_ALRM_BYTE_WDAY] = bin2bcd(alm->time.tm_wday) | 0x80;
+ else
+ buf[S35390A_ALRM_BYTE_WDAY] = 0;
buf[S35390A_ALRM_BYTE_HOURS] = s35390a_hr2reg(s35390a,
alm->time.tm_hour) | 0x80;
@@ -265,27 +316,61 @@
char buf[3], sts;
int i, err;
+ /*
+ * initialize all members to -1 to signal the core that they are not
+ * defined by the hardware.
+ */
+ alm->time.tm_sec = -1;
+ alm->time.tm_min = -1;
+ alm->time.tm_hour = -1;
+ alm->time.tm_mday = -1;
+ alm->time.tm_mon = -1;
+ alm->time.tm_year = -1;
+ alm->time.tm_wday = -1;
+ alm->time.tm_yday = -1;
+ alm->time.tm_isdst = -1;
+
err = s35390a_get_reg(s35390a, S35390A_CMD_STATUS2, &sts, sizeof(sts));
if (err < 0)
return err;
- if (bitrev8(sts) != S35390A_INT2_MODE_ALARM)
- return -EINVAL;
+ if ((bitrev8(sts) & S35390A_INT2_MODE_MASK) != S35390A_INT2_MODE_ALARM) {
+ /*
+ * When the alarm isn't enabled, the register to configure
+ * the alarm time isn't accessible.
+ */
+ alm->enabled = 0;
+ return 0;
+ } else {
+ alm->enabled = 1;
+ }
err = s35390a_get_reg(s35390a, S35390A_CMD_INT2_REG1, buf, sizeof(buf));
if (err < 0)
return err;
/* This chip returns the bits of each byte in reverse order */
- for (i = 0; i < 3; ++i) {
+ for (i = 0; i < 3; ++i)
buf[i] = bitrev8(buf[i]);
- buf[i] &= ~0x80;
- }
- alm->time.tm_wday = bcd2bin(buf[S35390A_ALRM_BYTE_WDAY]);
- alm->time.tm_hour = s35390a_reg2hr(s35390a,
- buf[S35390A_ALRM_BYTE_HOURS]);
- alm->time.tm_min = bcd2bin(buf[S35390A_ALRM_BYTE_MINS]);
+ /*
+ * B0 of the three matching registers is an enable flag. Iff it is set
+ * the configured value is used for matching.
+ */
+ if (buf[S35390A_ALRM_BYTE_WDAY] & 0x80)
+ alm->time.tm_wday =
+ bcd2bin(buf[S35390A_ALRM_BYTE_WDAY] & ~0x80);
+
+ if (buf[S35390A_ALRM_BYTE_HOURS] & 0x80)
+ alm->time.tm_hour =
+ s35390a_reg2hr(s35390a,
+ buf[S35390A_ALRM_BYTE_HOURS] & ~0x80);
+
+ if (buf[S35390A_ALRM_BYTE_MINS] & 0x80)
+ alm->time.tm_min = bcd2bin(buf[S35390A_ALRM_BYTE_MINS] & ~0x80);
+
+ /* alarm triggers always at s=0 */
+ alm->time.tm_sec = 0;
dev_dbg(&client->dev, "%s: alm is mins=%d, hours=%d, wday=%d\n",
__func__, alm->time.tm_min, alm->time.tm_hour,
@@ -327,11 +412,11 @@
static int s35390a_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
- int err;
+ int err, err_reset;
unsigned int i;
struct s35390a *s35390a;
struct rtc_time tm;
- char buf[1];
+ char buf, status1;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
err = -ENODEV;
@@ -360,29 +445,35 @@
}
}
- err = s35390a_reset(s35390a);
- if (err < 0) {
+ err_reset = s35390a_reset(s35390a, &status1);
+ if (err_reset < 0) {
+ err = err_reset;
dev_err(&client->dev, "error resetting chip\n");
goto exit_dummy;
}
- err = s35390a_disable_test_mode(s35390a);
- if (err < 0) {
- dev_err(&client->dev, "error disabling test mode\n");
- goto exit_dummy;
- }
-
- err = s35390a_get_reg(s35390a, S35390A_CMD_STATUS1, buf, sizeof(buf));
- if (err < 0) {
- dev_err(&client->dev, "error checking 12/24 hour mode\n");
- goto exit_dummy;
- }
- if (buf[0] & S35390A_FLAG_24H)
+ if (status1 & S35390A_FLAG_24H)
s35390a->twentyfourhour = 1;
else
s35390a->twentyfourhour = 0;
- if (s35390a_get_datetime(client, &tm) < 0)
+ if (status1 & S35390A_FLAG_INT2) {
+ /* disable alarm (and maybe test mode) */
+ buf = 0;
+ err = s35390a_set_reg(s35390a, S35390A_CMD_STATUS2, &buf, 1);
+ if (err < 0) {
+ dev_err(&client->dev, "error disabling alarm");
+ goto exit_dummy;
+ }
+ } else {
+ err = s35390a_disable_test_mode(s35390a);
+ if (err < 0) {
+ dev_err(&client->dev, "error disabling test mode\n");
+ goto exit_dummy;
+ }
+ }
+
+ if (err_reset > 0 || s35390a_get_datetime(client, &tm) < 0)
dev_warn(&client->dev, "clock needs to be set\n");
device_set_wakeup_capable(&client->dev, 1);
@@ -395,6 +486,10 @@
err = PTR_ERR(s35390a->rtc);
goto exit_dummy;
}
+
+ if (status1 & S35390A_FLAG_INT2)
+ rtc_update_irq(s35390a->rtc, 1, RTC_AF);
+
return 0;
exit_dummy:
diff --git a/drivers/rtc/rtc-tegra.c b/drivers/rtc/rtc-tegra.c
index 60232bd..71216aa 100644
--- a/drivers/rtc/rtc-tegra.c
+++ b/drivers/rtc/rtc-tegra.c
@@ -18,6 +18,7 @@
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include <linux/kernel.h>
+#include <linux/clk.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/slab.h>
@@ -59,6 +60,7 @@
struct platform_device *pdev;
struct rtc_device *rtc_dev;
void __iomem *rtc_base; /* NULL if not initialized. */
+ struct clk *clk;
int tegra_rtc_irq; /* alarm and periodic irq */
spinlock_t tegra_rtc_lock;
};
@@ -332,6 +334,14 @@
if (info->tegra_rtc_irq <= 0)
return -EBUSY;
+ info->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(info->clk))
+ return PTR_ERR(info->clk);
+
+ ret = clk_prepare_enable(info->clk);
+ if (ret < 0)
+ return ret;
+
/* set context info. */
info->pdev = pdev;
spin_lock_init(&info->tegra_rtc_lock);
@@ -352,7 +362,7 @@
ret = PTR_ERR(info->rtc_dev);
dev_err(&pdev->dev, "Unable to register device (err=%d).\n",
ret);
- return ret;
+ goto disable_clk;
}
ret = devm_request_irq(&pdev->dev, info->tegra_rtc_irq,
@@ -362,12 +372,25 @@
dev_err(&pdev->dev,
"Unable to request interrupt for device (err=%d).\n",
ret);
- return ret;
+ goto disable_clk;
}
dev_notice(&pdev->dev, "Tegra internal Real Time Clock\n");
return 0;
+
+disable_clk:
+ clk_disable_unprepare(info->clk);
+ return ret;
+}
+
+static int tegra_rtc_remove(struct platform_device *pdev)
+{
+ struct tegra_rtc_info *info = platform_get_drvdata(pdev);
+
+ clk_disable_unprepare(info->clk);
+
+ return 0;
}
#ifdef CONFIG_PM_SLEEP
@@ -419,6 +442,7 @@
MODULE_ALIAS("platform:tegra_rtc");
static struct platform_driver tegra_rtc_driver = {
+ .remove = tegra_rtc_remove,
.shutdown = tegra_rtc_shutdown,
.driver = {
.name = "tegra_rtc",
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 24ec282..7c3b8d3 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -1651,6 +1651,9 @@
ap_dev->queue_depth = queue_depth;
ap_dev->raw_hwtype = device_type;
ap_dev->device_type = device_type;
+ /* CEX6 toleration: map to CEX5 */
+ if (device_type == AP_DEVICE_TYPE_CEX6)
+ ap_dev->device_type = AP_DEVICE_TYPE_CEX5;
ap_dev->functions = device_functions;
spin_lock_init(&ap_dev->lock);
INIT_LIST_HEAD(&ap_dev->pendingq);
diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
index 6adcbdf2..cc741e9 100644
--- a/drivers/s390/crypto/ap_bus.h
+++ b/drivers/s390/crypto/ap_bus.h
@@ -105,6 +105,7 @@
#define AP_DEVICE_TYPE_CEX3C 9
#define AP_DEVICE_TYPE_CEX4 10
#define AP_DEVICE_TYPE_CEX5 11
+#define AP_DEVICE_TYPE_CEX6 12
/*
* Known function facilities
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index 1766a20..741f3ee8 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -717,6 +717,7 @@
};
struct qeth_discipline {
+ const struct device_type *devtype;
void (*start_poll)(struct ccw_device *, int, unsigned long);
qdio_handler_t *input_handler;
qdio_handler_t *output_handler;
@@ -881,6 +882,9 @@
extern struct qeth_discipline qeth_l3_discipline;
extern const struct attribute_group *qeth_generic_attr_groups[];
extern const struct attribute_group *qeth_osn_attr_groups[];
+extern const struct attribute_group qeth_device_attr_group;
+extern const struct attribute_group qeth_device_blkt_group;
+extern const struct device_type qeth_generic_devtype;
extern struct workqueue_struct *qeth_wq;
int qeth_card_hw_is_reachable(struct qeth_card *);
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 31ac53f..d10bf3d 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -5449,10 +5449,12 @@
card->discipline = NULL;
}
-static const struct device_type qeth_generic_devtype = {
+const struct device_type qeth_generic_devtype = {
.name = "qeth_generic",
.groups = qeth_generic_attr_groups,
};
+EXPORT_SYMBOL_GPL(qeth_generic_devtype);
+
static const struct device_type qeth_osn_devtype = {
.name = "qeth_osn",
.groups = qeth_osn_attr_groups,
@@ -5578,23 +5580,22 @@
goto err_card;
}
- if (card->info.type == QETH_CARD_TYPE_OSN)
- gdev->dev.type = &qeth_osn_devtype;
- else
- gdev->dev.type = &qeth_generic_devtype;
-
switch (card->info.type) {
case QETH_CARD_TYPE_OSN:
case QETH_CARD_TYPE_OSM:
rc = qeth_core_load_discipline(card, QETH_DISCIPLINE_LAYER2);
if (rc)
goto err_card;
+
+ gdev->dev.type = (card->info.type != QETH_CARD_TYPE_OSN)
+ ? card->discipline->devtype
+ : &qeth_osn_devtype;
rc = card->discipline->setup(card->gdev);
if (rc)
goto err_disc;
- case QETH_CARD_TYPE_OSD:
- case QETH_CARD_TYPE_OSX:
+ break;
default:
+ gdev->dev.type = &qeth_generic_devtype;
break;
}
@@ -5650,8 +5651,10 @@
if (rc)
goto err;
rc = card->discipline->setup(card->gdev);
- if (rc)
+ if (rc) {
+ qeth_core_free_discipline(card);
goto err;
+ }
}
rc = card->discipline->set_online(gdev);
err:
diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c
index e6e5b96..fa844b0 100644
--- a/drivers/s390/net/qeth_core_sys.c
+++ b/drivers/s390/net/qeth_core_sys.c
@@ -409,12 +409,16 @@
if (card->options.layer2 == newdis)
goto out;
- else {
- card->info.mac_bits = 0;
- if (card->discipline) {
- card->discipline->remove(card->gdev);
- qeth_core_free_discipline(card);
- }
+ if (card->info.type == QETH_CARD_TYPE_OSM) {
+ /* fixed layer, can't switch */
+ rc = -EOPNOTSUPP;
+ goto out;
+ }
+
+ card->info.mac_bits = 0;
+ if (card->discipline) {
+ card->discipline->remove(card->gdev);
+ qeth_core_free_discipline(card);
}
rc = qeth_core_load_discipline(card, newdis);
@@ -422,6 +426,8 @@
goto out;
rc = card->discipline->setup(card->gdev);
+ if (rc)
+ qeth_core_free_discipline(card);
out:
mutex_unlock(&card->discipline_mutex);
return rc ? rc : count;
@@ -699,10 +705,11 @@
&dev_attr_inter_jumbo.attr,
NULL,
};
-static struct attribute_group qeth_device_blkt_group = {
+const struct attribute_group qeth_device_blkt_group = {
.name = "blkt",
.attrs = qeth_blkt_device_attrs,
};
+EXPORT_SYMBOL_GPL(qeth_device_blkt_group);
static struct attribute *qeth_device_attrs[] = {
&dev_attr_state.attr,
@@ -722,9 +729,10 @@
&dev_attr_switch_attrs.attr,
NULL,
};
-static struct attribute_group qeth_device_attr_group = {
+const struct attribute_group qeth_device_attr_group = {
.attrs = qeth_device_attrs,
};
+EXPORT_SYMBOL_GPL(qeth_device_attr_group);
const struct attribute_group *qeth_generic_attr_groups[] = {
&qeth_device_attr_group,
diff --git a/drivers/s390/net/qeth_l2.h b/drivers/s390/net/qeth_l2.h
index 0767556..eb87bf9 100644
--- a/drivers/s390/net/qeth_l2.h
+++ b/drivers/s390/net/qeth_l2.h
@@ -8,6 +8,8 @@
#include "qeth_core.h"
+extern const struct attribute_group *qeth_l2_attr_groups[];
+
int qeth_l2_create_device_attributes(struct device *);
void qeth_l2_remove_device_attributes(struct device *);
void qeth_l2_setup_bridgeport_attrs(struct qeth_card *card);
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index df036b8..bf1e0e3 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -1027,11 +1027,21 @@
return 0;
}
+static const struct device_type qeth_l2_devtype = {
+ .name = "qeth_layer2",
+ .groups = qeth_l2_attr_groups,
+};
+
static int qeth_l2_probe_device(struct ccwgroup_device *gdev)
{
struct qeth_card *card = dev_get_drvdata(&gdev->dev);
+ int rc;
- qeth_l2_create_device_attributes(&gdev->dev);
+ if (gdev->dev.type == &qeth_generic_devtype) {
+ rc = qeth_l2_create_device_attributes(&gdev->dev);
+ if (rc)
+ return rc;
+ }
INIT_LIST_HEAD(&card->vid_list);
hash_init(card->mac_htable);
card->options.layer2 = 1;
@@ -1043,7 +1053,8 @@
{
struct qeth_card *card = dev_get_drvdata(&cgdev->dev);
- qeth_l2_remove_device_attributes(&cgdev->dev);
+ if (cgdev->dev.type == &qeth_generic_devtype)
+ qeth_l2_remove_device_attributes(&cgdev->dev);
qeth_set_allowed_threads(card, 0, 1);
wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
@@ -1101,7 +1112,6 @@
case QETH_CARD_TYPE_OSN:
card->dev = alloc_netdev(0, "osn%d", NET_NAME_UNKNOWN,
ether_setup);
- card->dev->flags |= IFF_NOARP;
break;
default:
card->dev = alloc_etherdev(0);
@@ -1114,9 +1124,12 @@
card->dev->watchdog_timeo = QETH_TX_TIMEOUT;
card->dev->mtu = card->info.initial_mtu;
card->dev->netdev_ops = &qeth_l2_netdev_ops;
- card->dev->ethtool_ops =
- (card->info.type != QETH_CARD_TYPE_OSN) ?
- &qeth_l2_ethtool_ops : &qeth_l2_osn_ops;
+ if (card->info.type == QETH_CARD_TYPE_OSN) {
+ card->dev->ethtool_ops = &qeth_l2_osn_ops;
+ card->dev->flags |= IFF_NOARP;
+ } else {
+ card->dev->ethtool_ops = &qeth_l2_ethtool_ops;
+ }
card->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
if (card->info.type == QETH_CARD_TYPE_OSD && !card->info.guestlan) {
card->dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
@@ -1429,6 +1442,7 @@
}
struct qeth_discipline qeth_l2_discipline = {
+ .devtype = &qeth_l2_devtype,
.start_poll = qeth_qdio_start_poll,
.input_handler = (qdio_handler_t *) qeth_qdio_input_handler,
.output_handler = (qdio_handler_t *) qeth_qdio_output_handler,
diff --git a/drivers/s390/net/qeth_l2_sys.c b/drivers/s390/net/qeth_l2_sys.c
index 692db49..a48ed9e 100644
--- a/drivers/s390/net/qeth_l2_sys.c
+++ b/drivers/s390/net/qeth_l2_sys.c
@@ -272,3 +272,11 @@
} else
qeth_bridgeport_an_set(card, 0);
}
+
+const struct attribute_group *qeth_l2_attr_groups[] = {
+ &qeth_device_attr_group,
+ &qeth_device_blkt_group,
+ /* l2 specific, see l2_{create,remove}_device_attributes(): */
+ &qeth_l2_bridgeport_attr_group,
+ NULL,
+};
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index cc4d3c3..285fe0b 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -3227,8 +3227,11 @@
static int qeth_l3_probe_device(struct ccwgroup_device *gdev)
{
struct qeth_card *card = dev_get_drvdata(&gdev->dev);
+ int rc;
- qeth_l3_create_device_attributes(&gdev->dev);
+ rc = qeth_l3_create_device_attributes(&gdev->dev);
+ if (rc)
+ return rc;
card->options.layer2 = 0;
card->info.hwtrap = 0;
return 0;
@@ -3519,6 +3522,7 @@
}
struct qeth_discipline qeth_l3_discipline = {
+ .devtype = &qeth_generic_devtype,
.start_poll = qeth_qdio_start_poll,
.input_handler = (qdio_handler_t *) qeth_qdio_input_handler,
.output_handler = (qdio_handler_t *) qeth_qdio_output_handler,
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 64eed87..433c5e3 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -1637,7 +1637,7 @@
config MAC_SCSI
tristate "Macintosh NCR5380 SCSI"
- depends on MAC && SCSI=y
+ depends on MAC && SCSI
select SCSI_SPI_ATTRS
help
This is the NCR 5380 SCSI controller included on most of the 68030
diff --git a/drivers/scsi/cxlflash/common.h b/drivers/scsi/cxlflash/common.h
index 5ada926..a8ac4c0 100644
--- a/drivers/scsi/cxlflash/common.h
+++ b/drivers/scsi/cxlflash/common.h
@@ -34,7 +34,6 @@
sectors
*/
-#define NUM_RRQ_ENTRY 16 /* for master issued cmds */
#define MAX_RHT_PER_CONTEXT (PAGE_SIZE / sizeof(struct sisl_rht_entry))
/* AFU command retry limit */
@@ -48,9 +47,12 @@
index derivation
*/
-#define CXLFLASH_MAX_CMDS 16
+#define CXLFLASH_MAX_CMDS 256
#define CXLFLASH_MAX_CMDS_PER_LUN CXLFLASH_MAX_CMDS
+/* RRQ for master issued cmds */
+#define NUM_RRQ_ENTRY CXLFLASH_MAX_CMDS
+
static inline void check_sizes(void)
{
@@ -149,7 +151,7 @@
struct afu {
/* Stuff requiring alignment go first. */
- u64 rrq_entry[NUM_RRQ_ENTRY]; /* 128B RRQ */
+ u64 rrq_entry[NUM_RRQ_ENTRY]; /* 2K RRQ */
/*
* Command & data for AFU commands.
*/
diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c
index c86847c..0b09673 100644
--- a/drivers/scsi/cxlflash/main.c
+++ b/drivers/scsi/cxlflash/main.c
@@ -996,6 +996,8 @@
do {
msleep(delay_us / 1000);
status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]);
+ if (status == U64_MAX)
+ nretry /= 2;
} while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_ONLINE &&
nretry--);
@@ -1027,6 +1029,8 @@
do {
msleep(delay_us / 1000);
status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]);
+ if (status == U64_MAX)
+ nretry /= 2;
} while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_OFFLINE &&
nretry--);
@@ -1137,7 +1141,7 @@
{SISL_ASTATUS_FC0_LOGI_F, "login failed", 0, CLR_FC_ERROR},
{SISL_ASTATUS_FC0_LOGI_S, "login succeeded", 0, SCAN_HOST},
{SISL_ASTATUS_FC0_LINK_DN, "link down", 0, 0},
- {SISL_ASTATUS_FC0_LINK_UP, "link up", 0, SCAN_HOST},
+ {SISL_ASTATUS_FC0_LINK_UP, "link up", 0, 0},
{SISL_ASTATUS_FC1_OTHER, "other error", 1, CLR_FC_ERROR | LINK_RESET},
{SISL_ASTATUS_FC1_LOGO, "target initiated LOGO", 1, 0},
{SISL_ASTATUS_FC1_CRC_T, "CRC threshold exceeded", 1, LINK_RESET},
@@ -1145,7 +1149,7 @@
{SISL_ASTATUS_FC1_LOGI_F, "login failed", 1, CLR_FC_ERROR},
{SISL_ASTATUS_FC1_LOGI_S, "login succeeded", 1, SCAN_HOST},
{SISL_ASTATUS_FC1_LINK_DN, "link down", 1, 0},
- {SISL_ASTATUS_FC1_LINK_UP, "link up", 1, SCAN_HOST},
+ {SISL_ASTATUS_FC1_LINK_UP, "link up", 1, 0},
{0x0, "", 0, 0} /* terminator */
};
@@ -1962,6 +1966,11 @@
* cxlflash_eh_host_reset_handler() - reset the host adapter
* @scp: SCSI command from stack identifying host.
*
+ * Following a reset, the state is evaluated again in case an EEH occurred
+ * during the reset. In such a scenario, the host reset will either yield
+ * until the EEH recovery is complete or return success or failure based
+ * upon the current device state.
+ *
* Return:
* SUCCESS as defined in scsi/scsi.h
* FAILED as defined in scsi/scsi.h
@@ -1993,7 +2002,8 @@
} else
cfg->state = STATE_NORMAL;
wake_up_all(&cfg->reset_waitq);
- break;
+ ssleep(1);
+ /* fall through */
case STATE_RESET:
wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
if (cfg->state == STATE_NORMAL)
@@ -2305,7 +2315,7 @@
.eh_device_reset_handler = cxlflash_eh_device_reset_handler,
.eh_host_reset_handler = cxlflash_eh_host_reset_handler,
.change_queue_depth = cxlflash_change_queue_depth,
- .cmd_per_lun = 16,
+ .cmd_per_lun = CXLFLASH_MAX_CMDS_PER_LUN,
.can_queue = CXLFLASH_MAX_CMDS,
.this_id = -1,
.sg_tablesize = SG_NONE, /* No scatter gather support */
@@ -2534,6 +2544,9 @@
* @pdev: PCI device struct.
* @state: PCI channel state.
*
+ * When an EEH occurs during an active reset, wait until the reset is
+ * complete and then take action based upon the device state.
+ *
* Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
*/
static pci_ers_result_t cxlflash_pci_error_detected(struct pci_dev *pdev,
@@ -2547,6 +2560,10 @@
switch (state) {
case pci_channel_io_frozen:
+ wait_event(cfg->reset_waitq, cfg->state != STATE_RESET);
+ if (cfg->state == STATE_FAILTERM)
+ return PCI_ERS_RESULT_DISCONNECT;
+
cfg->state = STATE_RESET;
scsi_block_requests(cfg->host);
drain_ioctls(cfg);
diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h
index ce129e5..5c93584 100644
--- a/drivers/scsi/fnic/fnic.h
+++ b/drivers/scsi/fnic/fnic.h
@@ -248,6 +248,7 @@
struct completion *remove_wait; /* device remove thread blocks */
atomic_t in_flight; /* io counter */
+ bool internal_reset_inprogress;
u32 _reserved; /* fill hole */
unsigned long state_flags; /* protected by host lock */
enum fnic_state state;
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
index 266b909..82e4bc8 100644
--- a/drivers/scsi/fnic/fnic_scsi.c
+++ b/drivers/scsi/fnic/fnic_scsi.c
@@ -2533,6 +2533,19 @@
unsigned long wait_host_tmo;
struct Scsi_Host *shost = sc->device->host;
struct fc_lport *lp = shost_priv(shost);
+ struct fnic *fnic = lport_priv(lp);
+ unsigned long flags;
+
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+ if (fnic->internal_reset_inprogress == 0) {
+ fnic->internal_reset_inprogress = 1;
+ } else {
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "host reset in progress skipping another host reset\n");
+ return SUCCESS;
+ }
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
/*
* If fnic_reset is successful, wait for fabric login to complete
@@ -2553,6 +2566,9 @@
}
}
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+ fnic->internal_reset_inprogress = 0;
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
return ret;
}
diff --git a/drivers/scsi/isci/remote_node_context.c b/drivers/scsi/isci/remote_node_context.c
index 1910100..00602ab 100644
--- a/drivers/scsi/isci/remote_node_context.c
+++ b/drivers/scsi/isci/remote_node_context.c
@@ -66,6 +66,9 @@
{
static const char * const strings[] = RNC_STATES;
+ if (state >= ARRAY_SIZE(strings))
+ return "UNKNOWN";
+
return strings[state];
}
#undef C
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 6bffd91..c1ccf1e 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -560,8 +560,12 @@
WARN_ON_ONCE(task->state == ISCSI_TASK_FREE);
task->state = state;
- if (!list_empty(&task->running))
+ spin_lock_bh(&conn->taskqueuelock);
+ if (!list_empty(&task->running)) {
+ pr_debug_once("%s while task on list", __func__);
list_del_init(&task->running);
+ }
+ spin_unlock_bh(&conn->taskqueuelock);
if (conn->task == task)
conn->task = NULL;
@@ -783,7 +787,9 @@
if (session->tt->xmit_task(task))
goto free_task;
} else {
+ spin_lock_bh(&conn->taskqueuelock);
list_add_tail(&task->running, &conn->mgmtqueue);
+ spin_unlock_bh(&conn->taskqueuelock);
iscsi_conn_queue_work(conn);
}
@@ -1474,8 +1480,10 @@
* this may be on the requeue list already if the xmit_task callout
* is handling the r2ts while we are adding new ones
*/
+ spin_lock_bh(&conn->taskqueuelock);
if (list_empty(&task->running))
list_add_tail(&task->running, &conn->requeue);
+ spin_unlock_bh(&conn->taskqueuelock);
iscsi_conn_queue_work(conn);
}
EXPORT_SYMBOL_GPL(iscsi_requeue_task);
@@ -1512,22 +1520,26 @@
* only have one nop-out as a ping from us and targets should not
* overflow us with nop-ins
*/
+ spin_lock_bh(&conn->taskqueuelock);
check_mgmt:
while (!list_empty(&conn->mgmtqueue)) {
conn->task = list_entry(conn->mgmtqueue.next,
struct iscsi_task, running);
list_del_init(&conn->task->running);
+ spin_unlock_bh(&conn->taskqueuelock);
if (iscsi_prep_mgmt_task(conn, conn->task)) {
/* regular RX path uses back_lock */
spin_lock_bh(&conn->session->back_lock);
__iscsi_put_task(conn->task);
spin_unlock_bh(&conn->session->back_lock);
conn->task = NULL;
+ spin_lock_bh(&conn->taskqueuelock);
continue;
}
rc = iscsi_xmit_task(conn);
if (rc)
goto done;
+ spin_lock_bh(&conn->taskqueuelock);
}
/* process pending command queue */
@@ -1535,19 +1547,24 @@
conn->task = list_entry(conn->cmdqueue.next, struct iscsi_task,
running);
list_del_init(&conn->task->running);
+ spin_unlock_bh(&conn->taskqueuelock);
if (conn->session->state == ISCSI_STATE_LOGGING_OUT) {
fail_scsi_task(conn->task, DID_IMM_RETRY);
+ spin_lock_bh(&conn->taskqueuelock);
continue;
}
rc = iscsi_prep_scsi_cmd_pdu(conn->task);
if (rc) {
if (rc == -ENOMEM || rc == -EACCES) {
+ spin_lock_bh(&conn->taskqueuelock);
list_add_tail(&conn->task->running,
&conn->cmdqueue);
conn->task = NULL;
+ spin_unlock_bh(&conn->taskqueuelock);
goto done;
} else
fail_scsi_task(conn->task, DID_ABORT);
+ spin_lock_bh(&conn->taskqueuelock);
continue;
}
rc = iscsi_xmit_task(conn);
@@ -1558,6 +1575,7 @@
* we need to check the mgmt queue for nops that need to
* be sent to aviod starvation
*/
+ spin_lock_bh(&conn->taskqueuelock);
if (!list_empty(&conn->mgmtqueue))
goto check_mgmt;
}
@@ -1577,12 +1595,15 @@
conn->task = task;
list_del_init(&conn->task->running);
conn->task->state = ISCSI_TASK_RUNNING;
+ spin_unlock_bh(&conn->taskqueuelock);
rc = iscsi_xmit_task(conn);
if (rc)
goto done;
+ spin_lock_bh(&conn->taskqueuelock);
if (!list_empty(&conn->mgmtqueue))
goto check_mgmt;
}
+ spin_unlock_bh(&conn->taskqueuelock);
spin_unlock_bh(&conn->session->frwd_lock);
return -ENODATA;
@@ -1738,7 +1759,9 @@
goto prepd_reject;
}
} else {
+ spin_lock_bh(&conn->taskqueuelock);
list_add_tail(&task->running, &conn->cmdqueue);
+ spin_unlock_bh(&conn->taskqueuelock);
iscsi_conn_queue_work(conn);
}
@@ -2900,6 +2923,7 @@
INIT_LIST_HEAD(&conn->mgmtqueue);
INIT_LIST_HEAD(&conn->cmdqueue);
INIT_LIST_HEAD(&conn->requeue);
+ spin_lock_init(&conn->taskqueuelock);
INIT_WORK(&conn->xmitwork, iscsi_xmitworker);
/* allocate login_task used for the login/text sequences */
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index 9c706d8..6f5e272 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -218,7 +218,7 @@
task->num_scatter = qc->n_elem;
} else {
for_each_sg(qc->sg, sg, qc->n_elem, si)
- xfer += sg->length;
+ xfer += sg_dma_len(sg);
task->total_xfer_len = xfer;
task->num_scatter = si;
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 59ced88..c74f74a 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -1054,7 +1054,10 @@
lpfc_sli4_unreg_all_rpis(vport);
}
}
- lpfc_issue_reg_vfi(vport);
+
+ /* Do not register VFI if the driver aborted FLOGI */
+ if (!lpfc_error_lost_link(irsp))
+ lpfc_issue_reg_vfi(vport);
lpfc_nlp_put(ndlp);
goto out;
}
@@ -3563,12 +3566,14 @@
} else {
buf_ptr1 = (struct lpfc_dmabuf *) elsiocb->context2;
lpfc_els_free_data(phba, buf_ptr1);
+ elsiocb->context2 = NULL;
}
}
if (elsiocb->context3) {
buf_ptr = (struct lpfc_dmabuf *) elsiocb->context3;
lpfc_els_free_bpl(phba, buf_ptr);
+ elsiocb->context3 = NULL;
}
lpfc_sli_release_iocbq(phba, elsiocb);
return 0;
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index c14ab6c..60c2109 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -11387,6 +11387,7 @@
.id_table = lpfc_id_table,
.probe = lpfc_pci_probe_one,
.remove = lpfc_pci_remove_one,
+ .shutdown = lpfc_pci_remove_one,
.suspend = lpfc_pci_suspend_one,
.resume = lpfc_pci_resume_one,
.err_handler = &lpfc_err_handler,
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index f5aeda8..38e90d9 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -5887,18 +5887,25 @@
free_vfi_bmask:
kfree(phba->sli4_hba.vfi_bmask);
+ phba->sli4_hba.vfi_bmask = NULL;
free_xri_ids:
kfree(phba->sli4_hba.xri_ids);
+ phba->sli4_hba.xri_ids = NULL;
free_xri_bmask:
kfree(phba->sli4_hba.xri_bmask);
+ phba->sli4_hba.xri_bmask = NULL;
free_vpi_ids:
kfree(phba->vpi_ids);
+ phba->vpi_ids = NULL;
free_vpi_bmask:
kfree(phba->vpi_bmask);
+ phba->vpi_bmask = NULL;
free_rpi_ids:
kfree(phba->sli4_hba.rpi_ids);
+ phba->sli4_hba.rpi_ids = NULL;
free_rpi_bmask:
kfree(phba->sli4_hba.rpi_bmask);
+ phba->sli4_hba.rpi_bmask = NULL;
err_exit:
return rc;
}
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 5b2c37f..9b53672 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -4981,15 +4981,14 @@
static int
_base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
{
- int r, i;
+ int r, i, index;
unsigned long flags;
u32 reply_address;
u16 smid;
struct _tr_list *delayed_tr, *delayed_tr_next;
u8 hide_flag;
struct adapter_reply_queue *reply_q;
- long reply_post_free;
- u32 reply_post_free_sz, index = 0;
+ Mpi2ReplyDescriptorsUnion_t *reply_post_free_contig;
dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
__func__));
@@ -5061,27 +5060,27 @@
_base_assign_reply_queues(ioc);
/* initialize Reply Post Free Queue */
- reply_post_free_sz = ioc->reply_post_queue_depth *
- sizeof(Mpi2DefaultReplyDescriptor_t);
- reply_post_free = (long)ioc->reply_post[index].reply_post_free;
+ index = 0;
+ reply_post_free_contig = ioc->reply_post[0].reply_post_free;
list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
+ /*
+ * If RDPQ is enabled, switch to the next allocation.
+ * Otherwise advance within the contiguous region.
+ */
+ if (ioc->rdpq_array_enable) {
+ reply_q->reply_post_free =
+ ioc->reply_post[index++].reply_post_free;
+ } else {
+ reply_q->reply_post_free = reply_post_free_contig;
+ reply_post_free_contig += ioc->reply_post_queue_depth;
+ }
+
reply_q->reply_post_host_index = 0;
- reply_q->reply_post_free = (Mpi2ReplyDescriptorsUnion_t *)
- reply_post_free;
for (i = 0; i < ioc->reply_post_queue_depth; i++)
reply_q->reply_post_free[i].Words =
cpu_to_le64(ULLONG_MAX);
if (!_base_is_controller_msix_enabled(ioc))
goto skip_init_reply_post_free_queue;
- /*
- * If RDPQ is enabled, switch to the next allocation.
- * Otherwise advance within the contiguous region.
- */
- if (ioc->rdpq_array_enable)
- reply_post_free = (long)
- ioc->reply_post[++index].reply_post_free;
- else
- reply_post_free += reply_post_free_sz;
}
skip_init_reply_post_free_queue:
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
index 92648a5..63f5965 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
@@ -390,6 +390,7 @@
* @eedp_enable: eedp support enable bit
* @eedp_type: 0(type_1), 1(type_2), 2(type_3)
* @eedp_block_length: block size
+ * @ata_command_pending: SATL passthrough outstanding for device
*/
struct MPT3SAS_DEVICE {
struct MPT3SAS_TARGET *sas_target;
@@ -398,6 +399,17 @@
u8 configured_lun;
u8 block;
u8 tlr_snoop_check;
+ /*
+ * Bug workaround for SATL handling: the mpt2/3sas firmware
+ * doesn't return BUSY or TASK_SET_FULL for subsequent
+ * commands while a SATL pass through is in operation as the
+ * spec requires, it simply does nothing with them until the
+ * pass through completes, causing them possibly to timeout if
+ * the passthrough is a long executing command (like format or
+ * secure erase). This variable allows us to do the right
+ * thing while a SATL command is pending.
+ */
+ unsigned long ata_command_pending;
};
#define MPT3_CMD_NOT_USED 0x8000 /* free */
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index f6a8e99..e333029 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -3707,9 +3707,18 @@
}
}
-static inline bool ata_12_16_cmd(struct scsi_cmnd *scmd)
+static int _scsih_set_satl_pending(struct scsi_cmnd *scmd, bool pending)
{
- return (scmd->cmnd[0] == ATA_12 || scmd->cmnd[0] == ATA_16);
+ struct MPT3SAS_DEVICE *priv = scmd->device->hostdata;
+
+ if (scmd->cmnd[0] != ATA_12 && scmd->cmnd[0] != ATA_16)
+ return 0;
+
+ if (pending)
+ return test_and_set_bit(0, &priv->ata_command_pending);
+
+ clear_bit(0, &priv->ata_command_pending);
+ return 0;
}
/**
@@ -3733,9 +3742,7 @@
if (!scmd)
continue;
count++;
- if (ata_12_16_cmd(scmd))
- scsi_internal_device_unblock(scmd->device,
- SDEV_RUNNING);
+ _scsih_set_satl_pending(scmd, false);
mpt3sas_base_free_smid(ioc, smid);
scsi_dma_unmap(scmd);
if (ioc->pci_error_recovery)
@@ -3866,13 +3873,6 @@
if (ioc->logging_level & MPT_DEBUG_SCSI)
scsi_print_command(scmd);
- /*
- * Lock the device for any subsequent command until command is
- * done.
- */
- if (ata_12_16_cmd(scmd))
- scsi_internal_device_block(scmd->device);
-
sas_device_priv_data = scmd->device->hostdata;
if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
scmd->result = DID_NO_CONNECT << 16;
@@ -3886,6 +3886,19 @@
return 0;
}
+ /*
+ * Bug work around for firmware SATL handling. The loop
+ * is based on atomic operations and ensures consistency
+ * since we're lockless at this point
+ */
+ do {
+ if (test_bit(0, &sas_device_priv_data->ata_command_pending)) {
+ scmd->result = SAM_STAT_BUSY;
+ scmd->scsi_done(scmd);
+ return 0;
+ }
+ } while (_scsih_set_satl_pending(scmd, true));
+
sas_target_priv_data = sas_device_priv_data->sas_target;
/* invalid device handle */
@@ -4439,14 +4452,14 @@
struct MPT3SAS_DEVICE *sas_device_priv_data;
u32 response_code = 0;
unsigned long flags;
+ unsigned int sector_sz;
mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
scmd = _scsih_scsi_lookup_get_clear(ioc, smid);
if (scmd == NULL)
return 1;
- if (ata_12_16_cmd(scmd))
- scsi_internal_device_unblock(scmd->device, SDEV_RUNNING);
+ _scsih_set_satl_pending(scmd, false);
mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
@@ -4498,6 +4511,20 @@
}
xfer_cnt = le32_to_cpu(mpi_reply->TransferCount);
+
+ /* In case of bogus fw or device, we could end up having
+ * unaligned partial completion. We can force alignment here,
+ * then scsi-ml does not need to handle this misbehavior.
+ */
+ sector_sz = scmd->device->sector_size;
+ if (unlikely(scmd->request->cmd_type == REQ_TYPE_FS && sector_sz &&
+ xfer_cnt % sector_sz)) {
+ sdev_printk(KERN_INFO, scmd->device,
+ "unaligned partial completion avoided (xfer_cnt=%u, sector_sz=%u)\n",
+ xfer_cnt, sector_sz);
+ xfer_cnt = round_down(xfer_cnt, sector_sz);
+ }
+
scsi_set_resid(scmd, scsi_bufflen(scmd) - xfer_cnt);
if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)
log_info = le32_to_cpu(mpi_reply->IOCLogInfo);
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 6b942d9..1ed85df 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -329,12 +329,15 @@
struct qla_hw_data *ha = vha->hw;
ssize_t rval = 0;
- if (ha->optrom_state != QLA_SREADING)
- return 0;
-
mutex_lock(&ha->optrom_mutex);
+
+ if (ha->optrom_state != QLA_SREADING)
+ goto out;
+
rval = memory_read_from_buffer(buf, count, &off, ha->optrom_buffer,
ha->optrom_region_size);
+
+out:
mutex_unlock(&ha->optrom_mutex);
return rval;
@@ -349,14 +352,19 @@
struct device, kobj)));
struct qla_hw_data *ha = vha->hw;
- if (ha->optrom_state != QLA_SWRITING)
+ mutex_lock(&ha->optrom_mutex);
+
+ if (ha->optrom_state != QLA_SWRITING) {
+ mutex_unlock(&ha->optrom_mutex);
return -EINVAL;
- if (off > ha->optrom_region_size)
+ }
+ if (off > ha->optrom_region_size) {
+ mutex_unlock(&ha->optrom_mutex);
return -ERANGE;
+ }
if (off + count > ha->optrom_region_size)
count = ha->optrom_region_size - off;
- mutex_lock(&ha->optrom_mutex);
memcpy(&ha->optrom_buffer[off], buf, count);
mutex_unlock(&ha->optrom_mutex);
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 0e59731..1f6a3b8 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -2466,6 +2466,10 @@
if (pkt->entry_status & RF_BUSY)
res = DID_BUS_BUSY << 16;
+ if (pkt->entry_type == NOTIFY_ACK_TYPE &&
+ pkt->handle == QLA_TGT_SKIP_HANDLE)
+ return;
+
sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
if (sp) {
sp->done(ha, sp, res);
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 3588a56..5cbf20a 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -2311,10 +2311,10 @@
if (mem_only) {
if (pci_enable_device_mem(pdev))
- goto probe_out;
+ return ret;
} else {
if (pci_enable_device(pdev))
- goto probe_out;
+ return ret;
}
/* This may fail but that's ok */
@@ -2324,7 +2324,7 @@
if (!ha) {
ql_log_pci(ql_log_fatal, pdev, 0x0009,
"Unable to allocate memory for ha.\n");
- goto probe_out;
+ goto disable_device;
}
ql_dbg_pci(ql_dbg_init, pdev, 0x000a,
"Memory allocated for ha=%p.\n", ha);
@@ -2923,7 +2923,7 @@
kfree(ha);
ha = NULL;
-probe_out:
+disable_device:
pci_disable_device(pdev);
return ret;
}
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index f57d969..e6faa0b 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -2865,7 +2865,7 @@
pkt->entry_type = NOTIFY_ACK_TYPE;
pkt->entry_count = 1;
- pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
+ pkt->handle = QLA_TGT_SKIP_HANDLE;
nack = (struct nack_to_isp *)pkt;
nack->ox_id = ntfy->ox_id;
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 5d81bcc..ed6dce0 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1995,6 +1995,22 @@
#define READ_CAPACITY_RETRIES_ON_RESET 10
+/*
+ * Ensure that we don't overflow sector_t when CONFIG_LBDAF is not set
+ * and the reported logical block size is bigger than 512 bytes. Note
+ * that last_sector is a u64 and therefore logical_to_sectors() is not
+ * applicable.
+ */
+static bool sd_addressable_capacity(u64 lba, unsigned int sector_size)
+{
+ u64 last_sector = (lba + 1ULL) << (ilog2(sector_size) - 9);
+
+ if (sizeof(sector_t) == 4 && last_sector > U32_MAX)
+ return false;
+
+ return true;
+}
+
static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
unsigned char *buffer)
{
@@ -2060,7 +2076,7 @@
return -ENODEV;
}
- if ((sizeof(sdkp->capacity) == 4) && (lba >= 0xffffffffULL)) {
+ if (!sd_addressable_capacity(lba, sector_size)) {
sd_printk(KERN_ERR, sdkp, "Too big for this kernel. Use a "
"kernel compiled with support for large block "
"devices.\n");
@@ -2146,7 +2162,7 @@
return sector_size;
}
- if ((sizeof(sdkp->capacity) == 4) && (lba == 0xffffffff)) {
+ if (!sd_addressable_capacity(lba, sector_size)) {
sd_printk(KERN_ERR, sdkp, "Too big for this kernel. Use a "
"kernel compiled with support for large block "
"devices.\n");
@@ -2481,7 +2497,8 @@
if (sdp->broken_fua) {
sd_first_printk(KERN_NOTICE, sdkp, "Disabling FUA\n");
sdkp->DPOFUA = 0;
- } else if (sdkp->DPOFUA && !sdkp->device->use_10_for_rw) {
+ } else if (sdkp->DPOFUA && !sdkp->device->use_10_for_rw &&
+ !sdkp->device->use_16_for_rw) {
sd_first_printk(KERN_NOTICE, sdkp,
"Uses READ/WRITE(6), disabling FUA\n");
sdkp->DPOFUA = 0;
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index 654630b..765a6f1 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -151,6 +151,11 @@
return blocks << (ilog2(sdev->sector_size) - 9);
}
+static inline unsigned int logical_to_bytes(struct scsi_device *sdev, sector_t blocks)
+{
+ return blocks * sdev->sector_size;
+}
+
/*
* A DIF-capable target device can be formatted with different
* protection schemes. Currently 0 through 3 are defined:
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 542cef3..0871b1c 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -153,6 +153,7 @@
struct sg_device *parentdp; /* owning device */
wait_queue_head_t read_wait; /* queue read until command done */
rwlock_t rq_list_lock; /* protect access to list in req_arr */
+ struct mutex f_mutex; /* protect against changes in this fd */
int timeout; /* defaults to SG_DEFAULT_TIMEOUT */
int timeout_user; /* defaults to SG_DEFAULT_TIMEOUT_USER */
Sg_scatter_hold reserve; /* buffer held for this file descriptor */
@@ -166,6 +167,7 @@
unsigned char next_cmd_len; /* 0: automatic, >0: use on next write() */
char keep_orphan; /* 0 -> drop orphan (def), 1 -> keep for read() */
char mmap_called; /* 0 -> mmap() never called on this fd */
+ char res_in_use; /* 1 -> 'reserve' array in use */
struct kref f_ref;
struct execute_work ew;
} Sg_fd;
@@ -209,7 +211,6 @@
static Sg_request *sg_get_rq_mark(Sg_fd * sfp, int pack_id);
static Sg_request *sg_add_request(Sg_fd * sfp);
static int sg_remove_request(Sg_fd * sfp, Sg_request * srp);
-static int sg_res_in_use(Sg_fd * sfp);
static Sg_device *sg_get_dev(int dev);
static void sg_device_destroy(struct kref *kref);
@@ -628,6 +629,7 @@
}
buf += SZ_SG_HEADER;
__get_user(opcode, buf);
+ mutex_lock(&sfp->f_mutex);
if (sfp->next_cmd_len > 0) {
cmd_size = sfp->next_cmd_len;
sfp->next_cmd_len = 0; /* reset so only this write() effected */
@@ -636,6 +638,7 @@
if ((opcode >= 0xc0) && old_hdr.twelve_byte)
cmd_size = 12;
}
+ mutex_unlock(&sfp->f_mutex);
SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sdp,
"sg_write: scsi opcode=0x%02x, cmd_size=%d\n", (int) opcode, cmd_size));
/* Determine buffer size. */
@@ -735,7 +738,7 @@
sg_remove_request(sfp, srp);
return -EINVAL; /* either MMAP_IO or DIRECT_IO (not both) */
}
- if (sg_res_in_use(sfp)) {
+ if (sfp->res_in_use) {
sg_remove_request(sfp, srp);
return -EBUSY; /* reserve buffer already being used */
}
@@ -907,7 +910,7 @@
return result;
if (val) {
sfp->low_dma = 1;
- if ((0 == sfp->low_dma) && (0 == sg_res_in_use(sfp))) {
+ if ((0 == sfp->low_dma) && !sfp->res_in_use) {
val = (int) sfp->reserve.bufflen;
mutex_lock(&sfp->parentdp->open_rel_lock);
sg_remove_scat(sfp, &sfp->reserve);
@@ -984,14 +987,19 @@
return -EINVAL;
val = min_t(int, val,
max_sectors_bytes(sdp->device->request_queue));
+ mutex_lock(&sfp->f_mutex);
if (val != sfp->reserve.bufflen) {
- if (sg_res_in_use(sfp) || sfp->mmap_called)
+ if (sfp->mmap_called ||
+ sfp->res_in_use) {
+ mutex_unlock(&sfp->f_mutex);
return -EBUSY;
+ }
mutex_lock(&sfp->parentdp->open_rel_lock);
sg_remove_scat(sfp, &sfp->reserve);
sg_build_reserve(sfp, val);
mutex_unlock(&sfp->parentdp->open_rel_lock);
}
+ mutex_unlock(&sfp->f_mutex);
return 0;
case SG_GET_RESERVED_SIZE:
val = min_t(int, sfp->reserve.bufflen,
@@ -1254,6 +1262,7 @@
unsigned long req_sz, len, sa;
Sg_scatter_hold *rsv_schp;
int k, length;
+ int ret = 0;
if ((!filp) || (!vma) || (!(sfp = (Sg_fd *) filp->private_data)))
return -ENXIO;
@@ -1264,8 +1273,11 @@
if (vma->vm_pgoff)
return -EINVAL; /* want no offset */
rsv_schp = &sfp->reserve;
- if (req_sz > rsv_schp->bufflen)
- return -ENOMEM; /* cannot map more than reserved buffer */
+ mutex_lock(&sfp->f_mutex);
+ if (req_sz > rsv_schp->bufflen) {
+ ret = -ENOMEM; /* cannot map more than reserved buffer */
+ goto out;
+ }
sa = vma->vm_start;
length = 1 << (PAGE_SHIFT + rsv_schp->page_order);
@@ -1279,7 +1291,9 @@
vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
vma->vm_private_data = sfp;
vma->vm_ops = &sg_mmap_vm_ops;
- return 0;
+out:
+ mutex_unlock(&sfp->f_mutex);
+ return ret;
}
static void
@@ -1743,13 +1757,25 @@
md = &map_data;
if (md) {
- if (!sg_res_in_use(sfp) && dxfer_len <= rsv_schp->bufflen)
+ mutex_lock(&sfp->f_mutex);
+ if (dxfer_len <= rsv_schp->bufflen &&
+ !sfp->res_in_use) {
+ sfp->res_in_use = 1;
sg_link_reserve(sfp, srp, dxfer_len);
- else {
+ } else if (hp->flags & SG_FLAG_MMAP_IO) {
+ res = -EBUSY; /* sfp->res_in_use == 1 */
+ if (dxfer_len > rsv_schp->bufflen)
+ res = -ENOMEM;
+ mutex_unlock(&sfp->f_mutex);
+ return res;
+ } else {
res = sg_build_indirect(req_schp, sfp, dxfer_len);
- if (res)
+ if (res) {
+ mutex_unlock(&sfp->f_mutex);
return res;
+ }
}
+ mutex_unlock(&sfp->f_mutex);
md->pages = req_schp->pages;
md->page_order = req_schp->page_order;
@@ -2040,6 +2066,8 @@
req_schp->sglist_len = 0;
sfp->save_scat_len = 0;
srp->res_used = 0;
+ /* Called without mutex lock to avoid deadlock */
+ sfp->res_in_use = 0;
}
static Sg_request *
@@ -2151,6 +2179,7 @@
rwlock_init(&sfp->rq_list_lock);
kref_init(&sfp->f_ref);
+ mutex_init(&sfp->f_mutex);
sfp->timeout = SG_DEFAULT_TIMEOUT;
sfp->timeout_user = SG_DEFAULT_TIMEOUT_USER;
sfp->force_packid = SG_DEF_FORCE_PACK_ID;
@@ -2226,20 +2255,6 @@
schedule_work(&sfp->ew.work);
}
-static int
-sg_res_in_use(Sg_fd * sfp)
-{
- const Sg_request *srp;
- unsigned long iflags;
-
- read_lock_irqsave(&sfp->rq_list_lock, iflags);
- for (srp = sfp->headrp; srp; srp = srp->nextrp)
- if (srp->res_used)
- break;
- read_unlock_irqrestore(&sfp->rq_list_lock, iflags);
- return srp ? 1 : 0;
-}
-
#ifdef CONFIG_SCSI_PROC_FS
static int
sg_idr_max_id(int id, void *p, void *data)
diff --git a/drivers/scsi/snic/snic_main.c b/drivers/scsi/snic/snic_main.c
index 2b3c253..8175f99 100644
--- a/drivers/scsi/snic/snic_main.c
+++ b/drivers/scsi/snic/snic_main.c
@@ -584,6 +584,7 @@
if (!pool) {
SNIC_HOST_ERR(shost, "dflt sgl pool creation failed\n");
+ ret = -ENOMEM;
goto err_free_res;
}
@@ -594,6 +595,7 @@
if (!pool) {
SNIC_HOST_ERR(shost, "max sgl pool creation failed\n");
+ ret = -ENOMEM;
goto err_free_dflt_sgl_pool;
}
@@ -604,6 +606,7 @@
if (!pool) {
SNIC_HOST_ERR(shost, "snic tmreq info pool creation failed.\n");
+ ret = -ENOMEM;
goto err_free_max_sgl_pool;
}
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index 64c8674..804586a 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -834,6 +834,7 @@
unsigned char *buffer;
struct scsi_mode_data data;
struct scsi_sense_hdr sshdr;
+ unsigned int ms_len = 128;
int rc, n;
static const char *loadmech[] =
@@ -860,10 +861,11 @@
scsi_test_unit_ready(cd->device, SR_TIMEOUT, MAX_RETRIES, &sshdr);
/* ask for mode page 0x2a */
- rc = scsi_mode_sense(cd->device, 0, 0x2a, buffer, 128,
+ rc = scsi_mode_sense(cd->device, 0, 0x2a, buffer, ms_len,
SR_TIMEOUT, 3, &data, NULL);
- if (!scsi_status_is_good(rc)) {
+ if (!scsi_status_is_good(rc) || data.length > ms_len ||
+ data.header_length + data.block_descriptor_length > data.length) {
/* failed, drive doesn't have capabilities mode page */
cd->cdi.speed = 1;
cd->cdi.mask |= (CDC_CD_R | CDC_CD_RW | CDC_DVD_R |
diff --git a/drivers/scsi/ufs/ufs-debugfs.c b/drivers/scsi/ufs/ufs-debugfs.c
index 9e4e410..0820f83 100644
--- a/drivers/scsi/ufs/ufs-debugfs.c
+++ b/drivers/scsi/ufs/ufs-debugfs.c
@@ -980,6 +980,8 @@
(u32)hba->outstanding_reqs);
seq_printf(file, "hba->capabilities = 0x%x\n", hba->capabilities);
+ seq_printf(file, "hba->auto_hibern8 support = %d\n",
+ ufshcd_is_auto_hibern8_supported(hba));
seq_printf(file, "hba->nutrs = %d\n", hba->nutrs);
seq_printf(file, "hba->nutmrs = %d\n", hba->nutmrs);
seq_printf(file, "hba->ufs_version = 0x%x\n", hba->ufs_version);
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 858445c..436658c 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -173,6 +173,9 @@
}
#endif
+#define PWR_INFO_MASK 0xF
+#define PWR_RX_OFFSET 4
+
#define UFSHCD_REQ_SENSE_SIZE 18
#define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\
@@ -2222,6 +2225,9 @@
hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1;
hba->nutmrs =
((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1;
+
+ /* disable auto hibern8 */
+ hba->capabilities &= ~MASK_AUTO_HIBERN8_SUPPORT;
}
/**
@@ -4453,13 +4459,14 @@
int ret = 0;
/* if already configured to the requested pwr_mode */
- if (pwr_mode->gear_rx == hba->pwr_info.gear_rx &&
- pwr_mode->gear_tx == hba->pwr_info.gear_tx &&
- pwr_mode->lane_rx == hba->pwr_info.lane_rx &&
- pwr_mode->lane_tx == hba->pwr_info.lane_tx &&
- pwr_mode->pwr_rx == hba->pwr_info.pwr_rx &&
- pwr_mode->pwr_tx == hba->pwr_info.pwr_tx &&
- pwr_mode->hs_rate == hba->pwr_info.hs_rate) {
+ if (!hba->restore_needed &&
+ pwr_mode->gear_rx == hba->pwr_info.gear_rx &&
+ pwr_mode->gear_tx == hba->pwr_info.gear_tx &&
+ pwr_mode->lane_rx == hba->pwr_info.lane_rx &&
+ pwr_mode->lane_tx == hba->pwr_info.lane_tx &&
+ pwr_mode->pwr_rx == hba->pwr_info.pwr_rx &&
+ pwr_mode->pwr_tx == hba->pwr_info.pwr_tx &&
+ pwr_mode->hs_rate == hba->pwr_info.hs_rate) {
dev_dbg(hba->dev, "%s: power already configured\n", __func__);
return 0;
}
@@ -6070,6 +6077,52 @@
reg_hist->pos = (reg_hist->pos + 1) % UIC_ERR_REG_HIST_LENGTH;
}
+static void ufshcd_rls_handler(struct work_struct *work)
+{
+ struct ufs_hba *hba;
+ int ret = 0;
+ u32 mode;
+
+ hba = container_of(work, struct ufs_hba, rls_work);
+ pm_runtime_get_sync(hba->dev);
+ ufshcd_scsi_block_requests(hba);
+ ret = ufshcd_wait_for_doorbell_clr(hba, U64_MAX);
+ if (ret) {
+ dev_err(hba->dev,
+ "Timed out (%d) waiting for DB to clear\n",
+ ret);
+ goto out;
+ }
+
+ ufshcd_dme_get(hba, UIC_ARG_MIB(PA_PWRMODE), &mode);
+ if (hba->pwr_info.pwr_rx != ((mode >> PWR_RX_OFFSET) & PWR_INFO_MASK))
+ hba->restore_needed = true;
+
+ if (hba->pwr_info.pwr_tx != (mode & PWR_INFO_MASK))
+ hba->restore_needed = true;
+
+ ufshcd_dme_get(hba, UIC_ARG_MIB(PA_RXGEAR), &mode);
+ if (hba->pwr_info.gear_rx != mode)
+ hba->restore_needed = true;
+
+ ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TXGEAR), &mode);
+ if (hba->pwr_info.gear_tx != mode)
+ hba->restore_needed = true;
+
+ if (hba->restore_needed)
+ ret = ufshcd_config_pwr_mode(hba, &(hba->pwr_info));
+
+ if (ret)
+ dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n",
+ __func__, ret);
+ else
+ hba->restore_needed = false;
+
+out:
+ ufshcd_scsi_unblock_requests(hba);
+ pm_runtime_put_sync(hba->dev);
+}
+
/**
* ufshcd_update_uic_error - check and set fatal UIC error flags.
* @hba: per-adapter instance
@@ -6107,8 +6160,18 @@
dev_err(hba->dev, "%s: LINERESET during hibern8 enter, reg 0x%x\n",
__func__, reg);
hba->full_init_linereset = true;
+ } else {
+ dev_err(hba->dev,
+ "%s: LINERESET with cmd != UIC_CMD_DME_HIBER_ENTER, reg 0x%x\n",
+ __func__, reg);
}
+ } else {
+ dev_err(hba->dev,
+ "%s: LINERESET without cmd, reg 0x%x\n",
+ __func__, reg);
}
+ if (!hba->full_init_linereset)
+ schedule_work(&hba->rls_work);
}
retval |= IRQ_HANDLED;
}
@@ -9710,6 +9773,7 @@
/* Initialize work queues */
INIT_WORK(&hba->eh_work, ufshcd_err_handler);
INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler);
+ INIT_WORK(&hba->rls_work, ufshcd_rls_handler);
/* Initialize UIC command mutex */
mutex_init(&hba->uic_cmd_mutex);
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index 93db660..c0e2dc4 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -3,7 +3,7 @@
*
* This code is based on drivers/scsi/ufs/ufshcd.h
* Copyright (C) 2011-2013 Samsung India Software Operations
- * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
*
* Authors:
* Santosh Yaraganavi <santosh.sy@samsung.com>
@@ -813,6 +813,7 @@
/* Work Queues */
struct work_struct eh_work;
struct work_struct eeh_work;
+ struct work_struct rls_work;
/* HBA Errors */
u32 errors;
@@ -908,9 +909,10 @@
bool full_init_linereset;
struct pinctrl *pctrl;
-
+
int latency_hist_enabled;
struct io_latency_state io_lat_s;
+ bool restore_needed;
};
static inline void ufshcd_mark_shutdown_ongoing(struct ufs_hba *hba)
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index 7dbbb29..03a2aad 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -533,7 +533,9 @@
{
struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev);
struct virtio_scsi_cmd *cmd = scsi_cmd_priv(sc);
+ unsigned long flags;
int req_size;
+ int ret;
BUG_ON(scsi_sg_count(sc) > shost->sg_tablesize);
@@ -561,8 +563,15 @@
req_size = sizeof(cmd->req.cmd);
}
- if (virtscsi_kick_cmd(req_vq, cmd, req_size, sizeof(cmd->resp.cmd)) != 0)
+ ret = virtscsi_kick_cmd(req_vq, cmd, req_size, sizeof(cmd->resp.cmd));
+ if (ret == -EIO) {
+ cmd->resp.cmd.response = VIRTIO_SCSI_S_BAD_TARGET;
+ spin_lock_irqsave(&req_vq->vq_lock, flags);
+ virtscsi_complete_cmd(vscsi, cmd);
+ spin_unlock_irqrestore(&req_vq->vq_lock, flags);
+ } else if (ret != 0) {
return SCSI_MLQUEUE_HOST_BUSY;
+ }
return 0;
}
diff --git a/drivers/soc/qcom/scm.c b/drivers/soc/qcom/scm.c
index 6353286..6f5bd21 100644
--- a/drivers/soc/qcom/scm.c
+++ b/drivers/soc/qcom/scm.c
@@ -380,11 +380,7 @@
#ifdef CONFIG_ARM64
-static
-#ifdef __clang__
-noinline
-#endif
-int __scm_call_armv8_64(u64 x0, u64 x1, u64 x2, u64 x3, u64 x4, u64 x5,
+static int __scm_call_armv8_64(u64 x0, u64 x1, u64 x2, u64 x3, u64 x4, u64 x5,
u64 *ret1, u64 *ret2, u64 *ret3)
{
register u64 r0 asm("x0") = x0;
@@ -401,18 +397,19 @@
__asmeq("%1", R1_STR)
__asmeq("%2", R2_STR)
__asmeq("%3", R3_STR)
- __asmeq("%4", R0_STR)
- __asmeq("%5", R1_STR)
- __asmeq("%6", R2_STR)
- __asmeq("%7", R3_STR)
- __asmeq("%8", R4_STR)
- __asmeq("%9", R5_STR)
- __asmeq("%10", R6_STR)
+ __asmeq("%4", R6_STR)
+ __asmeq("%5", R0_STR)
+ __asmeq("%6", R1_STR)
+ __asmeq("%7", R2_STR)
+ __asmeq("%8", R3_STR)
+ __asmeq("%9", R4_STR)
+ __asmeq("%10", R5_STR)
+ __asmeq("%11", R6_STR)
#ifdef REQUIRES_SEC
".arch_extension sec\n"
#endif
"smc #0\n"
- : "=r" (r0), "=r" (r1), "=r" (r2), "=r" (r3)
+ : "=r" (r0), "=r" (r1), "=r" (r2), "=r" (r3), "=r" (r6)
: "r" (r0), "r" (r1), "r" (r2), "r" (r3), "r" (r4),
"r" (r5), "r" (r6)
: "x7", "x8", "x9", "x10", "x11", "x12", "x13",
diff --git a/drivers/soc/qcom/smp2p.c b/drivers/soc/qcom/smp2p.c
index 1de38bf..ad519ac 100644
--- a/drivers/soc/qcom/smp2p.c
+++ b/drivers/soc/qcom/smp2p.c
@@ -442,7 +442,7 @@
if (!out_list[rpid].feature_ssr_ack_enabled)
return false;
- ssr_done = SMP2P_GET_RESTART_DONE(in_list[rpid].smem_edge_in->flags);
+ ssr_done = SMP2P_GET_RESTART_DONE(in_list[rpid].smem_edge_in, flags);
if (ssr_done != out_list[rpid].restart_ack)
return true;
@@ -467,7 +467,7 @@
SMP2P_INFO("%s: ssr ack pid %d: %d -> %d\n", __func__, rpid,
out_list[rpid].restart_ack, ack);
out_list[rpid].restart_ack = ack;
- SMP2P_SET_RESTART_ACK(out_list[rpid].smem_edge_out->flags, ack);
+ SMP2P_SET_RESTART_ACK(out_list[rpid].smem_edge_out, flags, ack);
smp2p_send_interrupt(rpid);
}
@@ -495,7 +495,7 @@
{
uint32_t features;
- features = SMP2P_GET_FEATURES(out_item->smem_edge_out->feature_version);
+ features = SMP2P_GET_FEATURES(out_item->smem_edge_out, feature_version);
if (features & SMP2P_FEATURE_SSR_ACK)
out_item->feature_ssr_ack_enabled = true;
@@ -574,8 +574,8 @@
}
smp2p_h_ptr = p_list->smem_edge_out;
- entries_total = SMP2P_GET_ENT_TOTAL(smp2p_h_ptr->valid_total_ent);
- entries_valid = SMP2P_GET_ENT_VALID(smp2p_h_ptr->valid_total_ent);
+ entries_total = SMP2P_GET_ENT_TOTAL(smp2p_h_ptr, valid_total_ent);
+ entries_valid = SMP2P_GET_ENT_VALID(smp2p_h_ptr, valid_total_ent);
p_list->ops_ptr->find_entry(smp2p_h_ptr, entries_total,
out_entry->name, &state_entry_ptr, &empty_spot);
@@ -604,7 +604,7 @@
__func__, out_entry->name,
out_entry->remote_pid,
entries_valid, entries_total);
- SMP2P_SET_ENT_VALID(smp2p_h_ptr->valid_total_ent,
+ SMP2P_SET_ENT_VALID(smp2p_h_ptr, valid_total_ent,
entries_valid);
smp2p_send_interrupt(out_entry->remote_pid);
}
@@ -632,7 +632,7 @@
return -EINVAL;
smp2p_h_ptr = out_list[out_entry->remote_pid].smem_edge_out;
- remote_pid = SMP2P_GET_REMOTE_PID(smp2p_h_ptr->rem_loc_proc_id);
+ remote_pid = SMP2P_GET_REMOTE_PID(smp2p_h_ptr, rem_loc_proc_id);
if (remote_pid != out_entry->remote_pid)
return -EINVAL;
@@ -666,7 +666,7 @@
return -EINVAL;
smp2p_h_ptr = out_list[out_entry->remote_pid].smem_edge_out;
- remote_pid = SMP2P_GET_REMOTE_PID(smp2p_h_ptr->rem_loc_proc_id);
+ remote_pid = SMP2P_GET_REMOTE_PID(smp2p_h_ptr, rem_loc_proc_id);
if (remote_pid != out_entry->remote_pid)
return -EINVAL;
@@ -705,7 +705,7 @@
return -EINVAL;
smp2p_h_ptr = out_list[out_entry->remote_pid].smem_edge_out;
- remote_pid = SMP2P_GET_REMOTE_PID(smp2p_h_ptr->rem_loc_proc_id);
+ remote_pid = SMP2P_GET_REMOTE_PID(smp2p_h_ptr, rem_loc_proc_id);
if (remote_pid != out_entry->remote_pid)
return -EINVAL;
@@ -756,7 +756,7 @@
in_item = &in_list[remote_pid];
item_ptr = (struct smp2p_smem __iomem *)smem_item;
- total_entries = SMP2P_GET_ENT_TOTAL(item_ptr->valid_total_ent);
+ total_entries = SMP2P_GET_ENT_TOTAL(item_ptr, valid_total_ent);
if (total_entries > 0) {
in_item->safe_total_entries = total_entries;
in_item->item_size = size;
@@ -812,7 +812,7 @@
{
SMP2P_ERR("%s: invalid negotiation complete for v0 pid %d\n",
__func__,
- SMP2P_GET_REMOTE_PID(out_item->smem_edge_out->rem_loc_proc_id));
+ SMP2P_GET_REMOTE_PID(out_item->smem_edge_out, rem_loc_proc_id));
}
/**
@@ -990,16 +990,16 @@
uint32_t features, uint32_t version)
{
header_ptr->magic = SMP2P_MAGIC;
- SMP2P_SET_LOCAL_PID(header_ptr->rem_loc_proc_id, local_pid);
- SMP2P_SET_REMOTE_PID(header_ptr->rem_loc_proc_id, remote_pid);
- SMP2P_SET_FEATURES(header_ptr->feature_version, features);
- SMP2P_SET_ENT_TOTAL(header_ptr->valid_total_ent, SMP2P_MAX_ENTRY);
- SMP2P_SET_ENT_VALID(header_ptr->valid_total_ent, 0);
+ SMP2P_SET_LOCAL_PID(header_ptr, rem_loc_proc_id, local_pid);
+ SMP2P_SET_REMOTE_PID(header_ptr, rem_loc_proc_id, remote_pid);
+ SMP2P_SET_FEATURES(header_ptr, feature_version, features);
+ SMP2P_SET_ENT_TOTAL(header_ptr, valid_total_ent, SMP2P_MAX_ENTRY);
+ SMP2P_SET_ENT_VALID(header_ptr, valid_total_ent, 0);
header_ptr->flags = 0;
/* ensure that all fields are valid before version is written */
wmb();
- SMP2P_SET_VERSION(header_ptr->feature_version, version);
+ SMP2P_SET_VERSION(header_ptr, feature_version, version);
}
/**
@@ -1049,8 +1049,8 @@
r_version = 0;
if (r_smem_ptr) {
- r_version = SMP2P_GET_VERSION(r_smem_ptr->feature_version);
- r_feature = SMP2P_GET_FEATURES(r_smem_ptr->feature_version);
+ r_version = SMP2P_GET_VERSION(r_smem_ptr, feature_version);
+ r_feature = SMP2P_GET_FEATURES(r_smem_ptr, feature_version);
}
if (r_version == 0) {
@@ -1084,7 +1084,7 @@
"%s: negotiation failure pid %d: RV %d RF %x\n",
__func__, remote_pid, r_version, r_feature
);
- SMP2P_SET_VERSION(l_smem_ptr->feature_version,
+ SMP2P_SET_VERSION(l_smem_ptr, feature_version,
SMP2P_EDGE_STATE_FAILED);
smp2p_send_interrupt(remote_pid);
out_item->smem_edge_state = SMP2P_EDGE_STATE_FAILED;
diff --git a/drivers/soc/qcom/smp2p_debug.c b/drivers/soc/qcom/smp2p_debug.c
index 8d98d07..ffa7d4de 100644
--- a/drivers/soc/qcom/smp2p_debug.c
+++ b/drivers/soc/qcom/smp2p_debug.c
@@ -89,8 +89,8 @@
i += scnprintf(buf + i, max - i,
"%-14s LPID %d RPID %d",
state_text,
- SMP2P_GET_LOCAL_PID(item_ptr->rem_loc_proc_id),
- SMP2P_GET_REMOTE_PID(item_ptr->rem_loc_proc_id)
+ SMP2P_GET_LOCAL_PID(item_ptr, rem_loc_proc_id),
+ SMP2P_GET_REMOTE_PID(item_ptr, rem_loc_proc_id)
);
return i;
@@ -115,8 +115,8 @@
i += scnprintf(buf + i, max - i,
"Version: %08x Features: %08x",
- SMP2P_GET_VERSION(item_ptr->feature_version),
- SMP2P_GET_FEATURES(item_ptr->feature_version)
+ SMP2P_GET_VERSION(item_ptr, feature_version),
+ SMP2P_GET_FEATURES(item_ptr, feature_version)
);
return i;
@@ -142,8 +142,8 @@
i += scnprintf(buf + i, max - i,
"Entries #/Max: %d/%d Flags: %c%c",
- SMP2P_GET_ENT_VALID(item_ptr->valid_total_ent),
- SMP2P_GET_ENT_TOTAL(item_ptr->valid_total_ent),
+ SMP2P_GET_ENT_VALID(item_ptr, valid_total_ent),
+ SMP2P_GET_ENT_TOTAL(item_ptr, valid_total_ent),
item_ptr->flags & SMP2P_FLAGS_RESTART_ACK_MASK ? 'A' : 'a',
item_ptr->flags & SMP2P_FLAGS_RESTART_DONE_MASK ? 'D' : 'd'
);
@@ -230,13 +230,13 @@
if (out_ptr) {
out_entries = (struct smp2p_entry_v1 *)((void *)out_ptr +
sizeof(struct smp2p_smem));
- out_valid = SMP2P_GET_ENT_VALID(out_ptr->valid_total_ent);
+ out_valid = SMP2P_GET_ENT_VALID(out_ptr, valid_total_ent);
}
if (in_ptr) {
in_entries = (struct smp2p_entry_v1 *)((void *)in_ptr +
sizeof(struct smp2p_smem));
- in_valid = SMP2P_GET_ENT_VALID(in_ptr->valid_total_ent);
+ in_valid = SMP2P_GET_ENT_VALID(in_ptr, valid_total_ent);
}
for (entry = 0; out_entries || in_entries; ++entry) {
diff --git a/drivers/soc/qcom/smp2p_private.h b/drivers/soc/qcom/smp2p_private.h
index 57f23e8..dae7cb5 100644
--- a/drivers/soc/qcom/smp2p_private.h
+++ b/drivers/soc/qcom/smp2p_private.h
@@ -17,6 +17,7 @@
#include <linux/types.h>
#include <linux/spinlock.h>
#include <linux/ipc_logging.h>
+#include <asm/io.h>
#include "smp2p_private_api.h"
#define SMP2P_MAX_ENTRY 16
@@ -50,49 +51,63 @@
| (((new_value) << (bit)) & (mask)); \
}
-#define SMP2P_GET_LOCAL_PID(hdr) \
- SMP2P_GET_BITS(hdr, SMP2P_LOCAL_PID_MASK, SMP2P_LOCAL_PID_BIT)
-#define SMP2P_SET_LOCAL_PID(hdr, pid) \
- SMP2P_SET_BITS(hdr, SMP2P_LOCAL_PID_MASK, SMP2P_LOCAL_PID_BIT, pid)
+#define SMP2P_IOMEM_GET_BITS(hdr, val, mask, bit) \
+ ({\
+ const volatile void __iomem *__p = (hdr); \
+ __p += offsetof(typeof(*(hdr)), val); \
+ (readl(__p) & (mask)) >> (bit); \
+ })
+#define SMP2P_IOMEM_SET_BITS(hdr, val, mask, bit, new_value) \
+ ({\
+ volatile void __iomem *__p = (hdr); \
+ __p += offsetof(typeof(*(hdr)), val); \
+ writel((readl(__p) & ~(mask)) \
+ | (((new_value) << (bit)) & (mask)), __p); \
+ })
-#define SMP2P_GET_REMOTE_PID(hdr) \
- SMP2P_GET_BITS(hdr, SMP2P_REMOTE_PID_MASK, SMP2P_REMOTE_PID_BIT)
-#define SMP2P_SET_REMOTE_PID(hdr, pid) \
- SMP2P_SET_BITS(hdr, SMP2P_REMOTE_PID_MASK, SMP2P_REMOTE_PID_BIT, pid)
+#define SMP2P_GET_LOCAL_PID(hdr, val) \
+ SMP2P_IOMEM_GET_BITS(hdr, val, SMP2P_LOCAL_PID_MASK, SMP2P_LOCAL_PID_BIT)
+#define SMP2P_SET_LOCAL_PID(hdr, val, pid) \
+ SMP2P_IOMEM_SET_BITS(hdr, val, SMP2P_LOCAL_PID_MASK, SMP2P_LOCAL_PID_BIT, pid)
-#define SMP2P_GET_VERSION(hdr) \
- SMP2P_GET_BITS(hdr, SMP2P_VERSION_MASK, SMP2P_VERSION_BIT)
-#define SMP2P_SET_VERSION(hdr, version) \
- SMP2P_SET_BITS(hdr, SMP2P_VERSION_MASK, SMP2P_VERSION_BIT, version)
+#define SMP2P_GET_REMOTE_PID(hdr, val) \
+ SMP2P_IOMEM_GET_BITS(hdr, val, SMP2P_REMOTE_PID_MASK, SMP2P_REMOTE_PID_BIT)
+#define SMP2P_SET_REMOTE_PID(hdr, val, pid) \
+ SMP2P_IOMEM_SET_BITS(hdr, val, SMP2P_REMOTE_PID_MASK, SMP2P_REMOTE_PID_BIT, pid)
-#define SMP2P_GET_FEATURES(hdr) \
- SMP2P_GET_BITS(hdr, SMP2P_FEATURE_MASK, SMP2P_FEATURE_BIT)
-#define SMP2P_SET_FEATURES(hdr, features) \
- SMP2P_SET_BITS(hdr, SMP2P_FEATURE_MASK, SMP2P_FEATURE_BIT, features)
+#define SMP2P_GET_VERSION(hdr, val) \
+ SMP2P_IOMEM_GET_BITS(hdr, val, SMP2P_VERSION_MASK, SMP2P_VERSION_BIT)
+#define SMP2P_SET_VERSION(hdr, val, version) \
+ SMP2P_IOMEM_SET_BITS(hdr, val, SMP2P_VERSION_MASK, SMP2P_VERSION_BIT, version)
-#define SMP2P_GET_ENT_TOTAL(hdr) \
- SMP2P_GET_BITS(hdr, SMP2P_ENT_TOTAL_MASK, SMP2P_ENT_TOTAL_BIT)
-#define SMP2P_SET_ENT_TOTAL(hdr, entries) \
- SMP2P_SET_BITS(hdr, SMP2P_ENT_TOTAL_MASK, SMP2P_ENT_TOTAL_BIT, entries)
+#define SMP2P_GET_FEATURES(hdr, val) \
+ SMP2P_IOMEM_GET_BITS(hdr, val, SMP2P_FEATURE_MASK, SMP2P_FEATURE_BIT)
+#define SMP2P_SET_FEATURES(hdr, val, features) \
+ SMP2P_IOMEM_SET_BITS(hdr, val, SMP2P_FEATURE_MASK, SMP2P_FEATURE_BIT, features)
-#define SMP2P_GET_ENT_VALID(hdr) \
- SMP2P_GET_BITS(hdr, SMP2P_ENT_VALID_MASK, SMP2P_ENT_VALID_BIT)
-#define SMP2P_SET_ENT_VALID(hdr, entries) \
- SMP2P_SET_BITS(hdr, SMP2P_ENT_VALID_MASK, SMP2P_ENT_VALID_BIT,\
+#define SMP2P_GET_ENT_TOTAL(hdr, val) \
+ SMP2P_IOMEM_GET_BITS(hdr, val, SMP2P_ENT_TOTAL_MASK, SMP2P_ENT_TOTAL_BIT)
+#define SMP2P_SET_ENT_TOTAL(hdr, val, entries) \
+ SMP2P_IOMEM_SET_BITS(hdr, val, SMP2P_ENT_TOTAL_MASK, SMP2P_ENT_TOTAL_BIT, entries)
+
+#define SMP2P_GET_ENT_VALID(hdr, val) \
+ SMP2P_IOMEM_GET_BITS(hdr, val, SMP2P_ENT_VALID_MASK, SMP2P_ENT_VALID_BIT)
+#define SMP2P_SET_ENT_VALID(hdr, val, entries) \
+ SMP2P_IOMEM_SET_BITS(hdr, val, SMP2P_ENT_VALID_MASK, SMP2P_ENT_VALID_BIT,\
entries)
-#define SMP2P_GET_RESTART_DONE(hdr) \
- SMP2P_GET_BITS(hdr, SMP2P_FLAGS_RESTART_DONE_MASK, \
+#define SMP2P_GET_RESTART_DONE(hdr, val) \
+ SMP2P_IOMEM_GET_BITS(hdr, val, SMP2P_FLAGS_RESTART_DONE_MASK, \
SMP2P_FLAGS_RESTART_DONE_BIT)
-#define SMP2P_SET_RESTART_DONE(hdr, value) \
- SMP2P_SET_BITS(hdr, SMP2P_FLAGS_RESTART_DONE_MASK, \
+#define SMP2P_SET_RESTART_DONE(hdr, val, value) \
+ SMP2P_IOMEM_SET_BITS(hdr, val, SMP2P_FLAGS_RESTART_DONE_MASK, \
SMP2P_FLAGS_RESTART_DONE_BIT, value)
-#define SMP2P_GET_RESTART_ACK(hdr) \
- SMP2P_GET_BITS(hdr, SMP2P_FLAGS_RESTART_ACK_MASK, \
+#define SMP2P_GET_RESTART_ACK(hdr, val) \
+ SMP2P_IOMEM_GET_BITS(hdr, val, SMP2P_FLAGS_RESTART_ACK_MASK, \
SMP2P_FLAGS_RESTART_ACK_BIT)
-#define SMP2P_SET_RESTART_ACK(hdr, value) \
- SMP2P_SET_BITS(hdr, SMP2P_FLAGS_RESTART_ACK_MASK, \
+#define SMP2P_SET_RESTART_ACK(hdr, val, value) \
+ SMP2P_IOMEM_SET_BITS(hdr, val, SMP2P_FLAGS_RESTART_ACK_MASK, \
SMP2P_FLAGS_RESTART_ACK_BIT, value)
/* Loopback Command Macros */
diff --git a/drivers/soc/qcom/smp2p_test.c b/drivers/soc/qcom/smp2p_test.c
index 397a547..569ff85 100644
--- a/drivers/soc/qcom/smp2p_test.c
+++ b/drivers/soc/qcom/smp2p_test.c
@@ -66,19 +66,19 @@
/* simulate response from remote side */
rmp->remote_item.header.magic = SMP2P_MAGIC;
SMP2P_SET_LOCAL_PID(
- rmp->remote_item.header.rem_loc_proc_id,
+ &rmp->remote_item.header, rem_loc_proc_id,
SMP2P_REMOTE_MOCK_PROC);
SMP2P_SET_REMOTE_PID(
- rmp->remote_item.header.rem_loc_proc_id,
+ &rmp->remote_item.header, rem_loc_proc_id,
SMP2P_APPS_PROC);
SMP2P_SET_VERSION(
- rmp->remote_item.header.feature_version, 1);
+ &rmp->remote_item.header, feature_version, 1);
SMP2P_SET_FEATURES(
- rmp->remote_item.header.feature_version, 0);
+ &rmp->remote_item.header, feature_version, 0);
SMP2P_SET_ENT_TOTAL(
- rmp->remote_item.header.valid_total_ent, SMP2P_MAX_ENTRY);
+ &rmp->remote_item.header, valid_total_ent, SMP2P_MAX_ENTRY);
SMP2P_SET_ENT_VALID(
- rmp->remote_item.header.valid_total_ent, 0);
+ &rmp->remote_item.header, valid_total_ent, 0);
rmp->remote_item.header.flags = 0x0;
msm_smp2p_set_remote_mock_exists(true);
rmp->tx_interrupt();
@@ -149,20 +149,20 @@
sizeof(struct smp2p_smem_item));
rmp->remote_item.header.magic = SMP2P_MAGIC;
SMP2P_SET_LOCAL_PID(
- rmp->remote_item.header.rem_loc_proc_id,
+ &rmp->remote_item.header, rem_loc_proc_id,
SMP2P_REMOTE_MOCK_PROC);
SMP2P_SET_REMOTE_PID(
- rmp->remote_item.header.rem_loc_proc_id,
+ &rmp->remote_item.header, rem_loc_proc_id,
SMP2P_APPS_PROC);
SMP2P_SET_VERSION(
- rmp->remote_item.header.feature_version, 1);
+ &rmp->remote_item.header, feature_version, 1);
SMP2P_SET_FEATURES(
- rmp->remote_item.header.feature_version, 0);
+ &rmp->remote_item.header, feature_version, 0);
SMP2P_SET_ENT_TOTAL(
- rmp->remote_item.header.valid_total_ent,
+ &rmp->remote_item.header, valid_total_ent,
SMP2P_MAX_ENTRY);
SMP2P_SET_ENT_VALID(
- rmp->remote_item.header.valid_total_ent, 0);
+ &rmp->remote_item.header, valid_total_ent, 0);
rmp->remote_item.header.flags = 0x0;
msm_smp2p_set_remote_mock_exists(true);
@@ -240,19 +240,19 @@
sizeof(struct smp2p_smem_item));
rmp->remote_item.header.magic = SMP2P_MAGIC;
SMP2P_SET_LOCAL_PID(
- rmp->remote_item.header.rem_loc_proc_id,
+ &rmp->remote_item.header, rem_loc_proc_id,
SMP2P_REMOTE_MOCK_PROC);
SMP2P_SET_REMOTE_PID(
- rmp->remote_item.header.rem_loc_proc_id,
+ &rmp->remote_item.header, rem_loc_proc_id,
SMP2P_APPS_PROC);
SMP2P_SET_VERSION(
- rmp->remote_item.header.feature_version, 1);
+ &rmp->remote_item.header, feature_version, 1);
SMP2P_SET_FEATURES(
- rmp->remote_item.header.feature_version, 0);
+ &rmp->remote_item.header, feature_version, 0);
SMP2P_SET_ENT_TOTAL(
- rmp->remote_item.header.valid_total_ent, SMP2P_MAX_ENTRY);
+ &rmp->remote_item.header, valid_total_ent, SMP2P_MAX_ENTRY);
SMP2P_SET_ENT_VALID(
- rmp->remote_item.header.valid_total_ent, 0);
+ &rmp->remote_item.header, valid_total_ent, 0);
rmp->remote_item.header.flags = 0x0;
msm_smp2p_set_remote_mock_exists(false);
@@ -277,7 +277,7 @@
UT_ASSERT_PTR(outbound_item, !=, NULL);
UT_ASSERT_INT(negotiation_state, ==, SMP2P_EDGE_STATE_OPENING);
UT_ASSERT_INT(0, ==,
- SMP2P_GET_ENT_VALID(outbound_item->valid_total_ent));
+ SMP2P_GET_ENT_VALID(outbound_item, valid_total_ent));
/* verify that read/write don't work yet */
rmp->rx_interrupt_count = 0;
@@ -358,19 +358,19 @@
sizeof(struct smp2p_smem_item));
rmp->remote_item.header.magic = SMP2P_MAGIC;
SMP2P_SET_LOCAL_PID(
- rmp->remote_item.header.rem_loc_proc_id,
+ &rmp->remote_item.header, rem_loc_proc_id,
SMP2P_REMOTE_MOCK_PROC);
SMP2P_SET_REMOTE_PID(
- rmp->remote_item.header.rem_loc_proc_id,
+ &rmp->remote_item.header, rem_loc_proc_id,
SMP2P_APPS_PROC);
SMP2P_SET_VERSION(
- rmp->remote_item.header.feature_version, 1);
+ &rmp->remote_item.header, feature_version, 1);
SMP2P_SET_FEATURES(
- rmp->remote_item.header.feature_version, 0);
+ &rmp->remote_item.header, feature_version, 0);
SMP2P_SET_ENT_TOTAL(
- rmp->remote_item.header.valid_total_ent, SMP2P_MAX_ENTRY);
+ &rmp->remote_item.header, valid_total_ent, SMP2P_MAX_ENTRY);
SMP2P_SET_ENT_VALID(
- rmp->remote_item.header.valid_total_ent, 1);
+ &rmp->remote_item.header, valid_total_ent, 1);
rmp->remote_item.header.flags = 0x0;
msm_smp2p_set_remote_mock_exists(true);
@@ -781,19 +781,19 @@
sizeof(struct smp2p_smem_item));
rmp->remote_item.header.magic = SMP2P_MAGIC;
SMP2P_SET_LOCAL_PID(
- rmp->remote_item.header.rem_loc_proc_id,
+ &rmp->remote_item.header, rem_loc_proc_id,
SMP2P_REMOTE_MOCK_PROC);
SMP2P_SET_REMOTE_PID(
- rmp->remote_item.header.rem_loc_proc_id,
+ &rmp->remote_item.header, rem_loc_proc_id,
SMP2P_APPS_PROC);
SMP2P_SET_VERSION(
- rmp->remote_item.header.feature_version, 1);
+ &rmp->remote_item.header, feature_version, 1);
SMP2P_SET_FEATURES(
- rmp->remote_item.header.feature_version, 0);
+ &rmp->remote_item.header, feature_version, 0);
SMP2P_SET_ENT_TOTAL(
- rmp->remote_item.header.valid_total_ent, SMP2P_MAX_ENTRY);
+ &rmp->remote_item.header, valid_total_ent, SMP2P_MAX_ENTRY);
SMP2P_SET_ENT_VALID(
- rmp->remote_item.header.valid_total_ent, 0);
+ &rmp->remote_item.header, valid_total_ent, 0);
rmp->remote_item.header.flags = 0x0;
msm_smp2p_set_remote_mock_exists(true);
@@ -879,19 +879,19 @@
sizeof(struct smp2p_smem_item));
rmp->remote_item.header.magic = SMP2P_MAGIC;
SMP2P_SET_LOCAL_PID(
- rmp->remote_item.header.rem_loc_proc_id,
+ &rmp->remote_item.header, rem_loc_proc_id,
SMP2P_REMOTE_MOCK_PROC);
SMP2P_SET_REMOTE_PID(
- rmp->remote_item.header.rem_loc_proc_id,
+ &rmp->remote_item.header, rem_loc_proc_id,
SMP2P_APPS_PROC);
SMP2P_SET_VERSION(
- rmp->remote_item.header.feature_version, 1);
+ &rmp->remote_item.header, feature_version, 1);
SMP2P_SET_FEATURES(
- rmp->remote_item.header.feature_version, 0);
+ &rmp->remote_item.header, feature_version, 0);
SMP2P_SET_ENT_TOTAL(
- rmp->remote_item.header.valid_total_ent, 1);
+ &rmp->remote_item.header, valid_total_ent, 1);
SMP2P_SET_ENT_VALID(
- rmp->remote_item.header.valid_total_ent, 0);
+ &rmp->remote_item.header, valid_total_ent, 0);
rmp->remote_item.header.flags = 0x0;
msm_smp2p_set_remote_mock_exists(true);
@@ -980,13 +980,13 @@
rmp->rx_interrupt_count = 0;
memset(&rmp->remote_item, 0, sizeof(struct smp2p_smem_item));
rhdr->magic = SMP2P_MAGIC;
- SMP2P_SET_LOCAL_PID(rhdr->rem_loc_proc_id,
+ SMP2P_SET_LOCAL_PID(rhdr, rem_loc_proc_id,
SMP2P_REMOTE_MOCK_PROC);
- SMP2P_SET_REMOTE_PID(rhdr->rem_loc_proc_id, SMP2P_APPS_PROC);
- SMP2P_SET_VERSION(rhdr->feature_version, 1);
- SMP2P_SET_FEATURES(rhdr->feature_version, 0);
- SMP2P_SET_ENT_TOTAL(rhdr->valid_total_ent, SMP2P_MAX_ENTRY);
- SMP2P_SET_ENT_VALID(rhdr->valid_total_ent, 0);
+ SMP2P_SET_REMOTE_PID(rhdr, rem_loc_proc_id, SMP2P_APPS_PROC);
+ SMP2P_SET_VERSION(rhdr, feature_version, 1);
+ SMP2P_SET_FEATURES(rhdr, feature_version, 0);
+ SMP2P_SET_ENT_TOTAL(rhdr, valid_total_ent, SMP2P_MAX_ENTRY);
+ SMP2P_SET_ENT_VALID(rhdr, valid_total_ent, 0);
rhdr->flags = 0x0;
msm_smp2p_set_remote_mock_exists(true);
rmp->tx_interrupt();
@@ -1000,10 +1000,10 @@
/* verify no response to ack feature */
rmp->rx_interrupt_count = 0;
- SMP2P_SET_RESTART_DONE(rhdr->flags, 1);
+ SMP2P_SET_RESTART_DONE(rhdr, flags, 1);
rmp->tx_interrupt();
- UT_ASSERT_INT(0, ==, SMP2P_GET_RESTART_DONE(lhdr->flags));
- UT_ASSERT_INT(0, ==, SMP2P_GET_RESTART_ACK(lhdr->flags));
+ UT_ASSERT_INT(0, ==, SMP2P_GET_RESTART_DONE(lhdr, flags));
+ UT_ASSERT_INT(0, ==, SMP2P_GET_RESTART_ACK(lhdr, flags));
UT_ASSERT_INT(rmp->rx_interrupt_count, ==, 0);
/* initialize v1 with SMP2P_FEATURE_SSR_ACK enabled */
@@ -1016,14 +1016,14 @@
rmp->rx_interrupt_count = 0;
memset(&rmp->remote_item, 0, sizeof(struct smp2p_smem_item));
rhdr->magic = SMP2P_MAGIC;
- SMP2P_SET_LOCAL_PID(rhdr->rem_loc_proc_id,
+ SMP2P_SET_LOCAL_PID(rhdr, rem_loc_proc_id,
SMP2P_REMOTE_MOCK_PROC);
- SMP2P_SET_REMOTE_PID(rhdr->rem_loc_proc_id, SMP2P_APPS_PROC);
- SMP2P_SET_VERSION(rhdr->feature_version, 1);
- SMP2P_SET_FEATURES(rhdr->feature_version,
+ SMP2P_SET_REMOTE_PID(rhdr, rem_loc_proc_id, SMP2P_APPS_PROC);
+ SMP2P_SET_VERSION(rhdr, feature_version, 1);
+ SMP2P_SET_FEATURES(rhdr, feature_version,
SMP2P_FEATURE_SSR_ACK);
- SMP2P_SET_ENT_TOTAL(rhdr->valid_total_ent, SMP2P_MAX_ENTRY);
- SMP2P_SET_ENT_VALID(rhdr->valid_total_ent, 0);
+ SMP2P_SET_ENT_TOTAL(rhdr, valid_total_ent, SMP2P_MAX_ENTRY);
+ SMP2P_SET_ENT_VALID(rhdr, valid_total_ent, 0);
rmp->rx_interrupt_count = 0;
rhdr->flags = 0x0;
msm_smp2p_set_remote_mock_exists(true);
@@ -1038,17 +1038,17 @@
/* verify response to ack feature */
rmp->rx_interrupt_count = 0;
- SMP2P_SET_RESTART_DONE(rhdr->flags, 1);
+ SMP2P_SET_RESTART_DONE(rhdr, flags, 1);
rmp->tx_interrupt();
- UT_ASSERT_INT(0, ==, SMP2P_GET_RESTART_DONE(lhdr->flags));
- UT_ASSERT_INT(1, ==, SMP2P_GET_RESTART_ACK(lhdr->flags));
+ UT_ASSERT_INT(0, ==, SMP2P_GET_RESTART_DONE(lhdr, flags));
+ UT_ASSERT_INT(1, ==, SMP2P_GET_RESTART_ACK(lhdr, flags));
UT_ASSERT_INT(rmp->rx_interrupt_count, ==, 1);
rmp->rx_interrupt_count = 0;
- SMP2P_SET_RESTART_DONE(rhdr->flags, 0);
+ SMP2P_SET_RESTART_DONE(rhdr, flags, 0);
rmp->tx_interrupt();
- UT_ASSERT_INT(0, ==, SMP2P_GET_RESTART_DONE(lhdr->flags));
- UT_ASSERT_INT(0, ==, SMP2P_GET_RESTART_ACK(lhdr->flags));
+ UT_ASSERT_INT(0, ==, SMP2P_GET_RESTART_DONE(lhdr, flags));
+ UT_ASSERT_INT(0, ==, SMP2P_GET_RESTART_ACK(lhdr, flags));
UT_ASSERT_INT(rmp->rx_interrupt_count, ==, 1);
seq_puts(s, "\tOK\n");
@@ -1117,15 +1117,15 @@
UT_ASSERT_PTR(NULL, !=, rhdr);
/* get initial state of SSR flags */
- if (SMP2P_GET_FEATURES(rhdr->feature_version)
+ if (SMP2P_GET_FEATURES(rhdr, feature_version)
& SMP2P_FEATURE_SSR_ACK)
ssr_ack_enabled = true;
else
ssr_ack_enabled = false;
- ssr_done_start = SMP2P_GET_RESTART_DONE(rhdr->flags);
+ ssr_done_start = SMP2P_GET_RESTART_DONE(rhdr, flags);
UT_ASSERT_INT(ssr_done_start, ==,
- SMP2P_GET_RESTART_ACK(lhdr->flags));
+ SMP2P_GET_RESTART_ACK(lhdr, flags));
/* trigger restart */
name_index = 0;
@@ -1181,22 +1181,22 @@
if (ssr_ack_enabled) {
ssr_done_start ^= 1;
UT_ASSERT_INT(ssr_done_start, ==,
- SMP2P_GET_RESTART_ACK(lhdr->flags));
+ SMP2P_GET_RESTART_ACK(lhdr, flags));
UT_ASSERT_INT(ssr_done_start, ==,
- SMP2P_GET_RESTART_DONE(rhdr->flags));
+ SMP2P_GET_RESTART_DONE(rhdr, flags));
UT_ASSERT_INT(0, ==,
- SMP2P_GET_RESTART_DONE(lhdr->flags));
+ SMP2P_GET_RESTART_DONE(lhdr, flags));
seq_puts(s, "\tSSR ACK Enabled and Toggled\n");
} else {
UT_ASSERT_INT(0, ==,
- SMP2P_GET_RESTART_DONE(lhdr->flags));
+ SMP2P_GET_RESTART_DONE(lhdr, flags));
UT_ASSERT_INT(0, ==,
- SMP2P_GET_RESTART_ACK(lhdr->flags));
+ SMP2P_GET_RESTART_ACK(lhdr, flags));
UT_ASSERT_INT(0, ==,
- SMP2P_GET_RESTART_DONE(rhdr->flags));
+ SMP2P_GET_RESTART_DONE(rhdr, flags));
UT_ASSERT_INT(0, ==,
- SMP2P_GET_RESTART_ACK(rhdr->flags));
+ SMP2P_GET_RESTART_ACK(rhdr, flags));
seq_puts(s, "\tSSR ACK Disabled\n");
}
diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c
index 7d3af3e..1ddba9a 100644
--- a/drivers/spi/spi-davinci.c
+++ b/drivers/spi/spi-davinci.c
@@ -651,7 +651,7 @@
buf = t->rx_buf;
t->rx_dma = dma_map_single(&spi->dev, buf,
t->len, DMA_FROM_DEVICE);
- if (!t->rx_dma) {
+ if (dma_mapping_error(&spi->dev, !t->rx_dma)) {
ret = -EFAULT;
goto err_rx_map;
}
@@ -665,7 +665,7 @@
buf = (void *)t->tx_buf;
t->tx_dma = dma_map_single(&spi->dev, buf,
t->len, DMA_TO_DEVICE);
- if (!t->tx_dma) {
+ if (dma_mapping_error(&spi->dev, t->tx_dma)) {
ret = -EFAULT;
goto err_tx_map;
}
diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c
index 882cd66..87a0e47 100644
--- a/drivers/spi/spi-dw.c
+++ b/drivers/spi/spi-dw.c
@@ -107,7 +107,10 @@
static int dw_spi_debugfs_init(struct dw_spi *dws)
{
- dws->debugfs = debugfs_create_dir("dw_spi", NULL);
+ char name[128];
+
+ snprintf(name, 128, "dw_spi-%s", dev_name(&dws->master->dev));
+ dws->debugfs = debugfs_create_dir(name, NULL);
if (!dws->debugfs)
return -ENOMEM;
diff --git a/drivers/spmi/spmi-pmic-arb.c b/drivers/spmi/spmi-pmic-arb.c
index c2a2b69..eff1637 100644
--- a/drivers/spmi/spmi-pmic-arb.c
+++ b/drivers/spmi/spmi-pmic-arb.c
@@ -25,6 +25,7 @@
#include <linux/slab.h>
#include <linux/spmi.h>
#include <linux/syscore_ops.h>
+#include <linux/wakeup_reason.h>
/* PMIC Arbiter configuration registers */
#define PMIC_ARB_VERSION 0x0000
@@ -562,6 +563,7 @@
else if (desc->action && desc->action->name)
name = desc->action->name;
+ log_base_wakeup_reason(irq);
pr_warn("spmi_show_resume_irq: %d triggered [0x%01x, 0x%02x, 0x%01x] %s\n",
irq, sid, per, id, name);
} else {
diff --git a/drivers/spmi/spmi.c b/drivers/spmi/spmi.c
index b43e565..a62ae90 100644
--- a/drivers/spmi/spmi.c
+++ b/drivers/spmi/spmi.c
@@ -364,11 +364,23 @@
return 0;
}
+static int spmi_drv_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ int ret;
+
+ ret = of_device_uevent_modalias(dev, env);
+ if (ret != -ENODEV)
+ return ret;
+
+ return 0;
+}
+
static struct bus_type spmi_bus_type = {
.name = "spmi",
.match = spmi_device_match,
.probe = spmi_drv_probe,
.remove = spmi_drv_remove,
+ .uevent = spmi_drv_uevent,
};
/**
diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
index 7b4af51..b831f08 100644
--- a/drivers/staging/comedi/comedi_fops.c
+++ b/drivers/staging/comedi/comedi_fops.c
@@ -2901,9 +2901,6 @@
comedi_class->dev_groups = comedi_dev_groups;
- /* XXX requires /proc interface */
- comedi_proc_init();
-
/* create devices files for legacy/manual use */
for (i = 0; i < comedi_num_legacy_minors; i++) {
struct comedi_device *dev;
@@ -2911,6 +2908,7 @@
dev = comedi_alloc_board_minor(NULL);
if (IS_ERR(dev)) {
comedi_cleanup_board_minors();
+ class_destroy(comedi_class);
cdev_del(&comedi_cdev);
unregister_chrdev_region(MKDEV(COMEDI_MAJOR, 0),
COMEDI_NUM_MINORS);
@@ -2920,6 +2918,9 @@
mutex_unlock(&dev->mutex);
}
+ /* XXX requires /proc interface */
+ comedi_proc_init();
+
return 0;
}
module_init(comedi_init);
diff --git a/drivers/staging/comedi/drivers/jr3_pci.c b/drivers/staging/comedi/drivers/jr3_pci.c
index b87192e..109becd 100644
--- a/drivers/staging/comedi/drivers/jr3_pci.c
+++ b/drivers/staging/comedi/drivers/jr3_pci.c
@@ -610,7 +610,7 @@
s = &dev->subdevices[i];
spriv = s->private;
- if (now > spriv->next_time_min) {
+ if (time_after_eq(now, spriv->next_time_min)) {
struct jr3_pci_poll_delay sub_delay;
sub_delay = jr3_pci_poll_subdevice(s);
@@ -726,11 +726,12 @@
s->insn_read = jr3_pci_ai_insn_read;
spriv = jr3_pci_alloc_spriv(dev, s);
- if (spriv) {
- /* Channel specific range and maxdata */
- s->range_table_list = spriv->range_table_list;
- s->maxdata_list = spriv->maxdata_list;
- }
+ if (!spriv)
+ return -ENOMEM;
+
+ /* Channel specific range and maxdata */
+ s->range_table_list = spriv->range_table_list;
+ s->maxdata_list = spriv->maxdata_list;
}
/* Reset DSP card */
diff --git a/drivers/staging/easel/regulator/bcm15602-gpio.c b/drivers/staging/easel/regulator/bcm15602-gpio.c
index 185135e..74a755b 100644
--- a/drivers/staging/easel/regulator/bcm15602-gpio.c
+++ b/drivers/staging/easel/regulator/bcm15602-gpio.c
@@ -29,6 +29,7 @@
struct gpio_chip gpio_chip;
struct bcm15602_chip *bcm15602;
unsigned long int pin_used;
+ int value[BCM15602_NUM_GPIOS];
};
static inline struct bcm15602_gpio *to_bcm15602_gpio
@@ -76,9 +77,11 @@
{
struct bcm15602_gpio *bcm15602_gpio = to_bcm15602_gpio(gpio_chip);
struct bcm15602_chip *bcm15602 = bcm15602_gpio->bcm15602;
+ u8 reg_data = 0xC;
- bcm15602_update_bits(bcm15602, BCM15602_REG_SYS_GPIO_OUT_CTRL,
- (1 << offset), ((value ? 1 : 0) << offset));
+ bcm15602_gpio->value[offset] = (value ? 1 : 0);
+ reg_data |= (bcm15602_gpio->value[1] << 1) | bcm15602_gpio->value[0];
+ bcm15602_write_byte(bcm15602, BCM15602_REG_SYS_GPIO_OUT_CTRL, reg_data);
}
static int bcm15602_gpio_direction_output(struct gpio_chip *gpio_chip,
diff --git a/drivers/staging/easel/regulator/bcm15602-regulator-sysfs.c b/drivers/staging/easel/regulator/bcm15602-regulator-sysfs.c
index 9c0e8a0..221474c 100644
--- a/drivers/staging/easel/regulator/bcm15602-regulator-sysfs.c
+++ b/drivers/staging/easel/regulator/bcm15602-regulator-sysfs.c
@@ -21,64 +21,8 @@
#include "bcm15602-regulator.h"
-static ssize_t bcm15602_attr_show_asr_curr
- (struct device *dev, struct device_attribute *mattr, char *data);
-static ssize_t bcm15602_attr_show_sdsr_curr
- (struct device *dev, struct device_attribute *mattr, char *data);
-static ssize_t bcm15602_attr_show_sdldo_curr
- (struct device *dev, struct device_attribute *mattr, char *data);
-static ssize_t bcm15602_attr_show_ioldo_curr
- (struct device *dev, struct device_attribute *mattr, char *data);
-static ssize_t bcm15602_attr_show_vbat
- (struct device *dev, struct device_attribute *mattr, char *data);
-static ssize_t bcm15602_attr_show_temperature
- (struct device *dev, struct device_attribute *mattr, char *data);
-static ssize_t bcm15602_attr_show_total_power
- (struct device *dev, struct device_attribute *mattr, char *data);
-static ssize_t bcm15602_attr_show_hk_status
- (struct device *dev, struct device_attribute *mattr, char *data);
-static ssize_t bcm15602_attr_show_asr_vsel
- (struct device *dev, struct device_attribute *mattr, char *data);
-static ssize_t bcm15602_attr_store_asr_vsel
- (struct device *dev, struct device_attribute *mattr, const char *data,
- size_t count);
-static ssize_t bcm15602_attr_show_asr_dual_phase
- (struct device *dev, struct device_attribute *mattr, char *data);
-static ssize_t bcm15602_attr_store_asr_dual_phase
- (struct device *dev, struct device_attribute *mattr, const char *data,
- size_t count);
-
-DEVICE_ATTR(asr_curr, 0440, bcm15602_attr_show_asr_curr, NULL);
-DEVICE_ATTR(sdsr_curr, 0440, bcm15602_attr_show_sdsr_curr, NULL);
-DEVICE_ATTR(sdldo_curr, 0440, bcm15602_attr_show_sdldo_curr, NULL);
-DEVICE_ATTR(ioldo_curr, 0440, bcm15602_attr_show_ioldo_curr, NULL);
-DEVICE_ATTR(vbat, 0440, bcm15602_attr_show_vbat, NULL);
-DEVICE_ATTR(temperature, 0440, bcm15602_attr_show_temperature, NULL);
-DEVICE_ATTR(total_power, 0440, bcm15602_attr_show_total_power, NULL);
-DEVICE_ATTR(hk_status, 0440, bcm15602_attr_show_hk_status, NULL);
-DEVICE_ATTR(asr_vsel, S_IWUSR | S_IRUGO, bcm15602_attr_show_asr_vsel,
- bcm15602_attr_store_asr_vsel);
-DEVICE_ATTR(asr_dual_phase, S_IWUSR | S_IRUGO,
- bcm15602_attr_show_asr_dual_phase,
- bcm15602_attr_store_asr_dual_phase);
-
-static struct attribute *bcm15602_attrs[] = {
- &dev_attr_asr_curr.attr,
- &dev_attr_sdsr_curr.attr,
- &dev_attr_sdldo_curr.attr,
- &dev_attr_ioldo_curr.attr,
- &dev_attr_vbat.attr,
- &dev_attr_temperature.attr,
- &dev_attr_total_power.attr,
- &dev_attr_hk_status.attr,
- &dev_attr_asr_vsel.attr,
- &dev_attr_asr_dual_phase.attr,
- NULL
-};
-
-static const struct attribute_group bcm15602_attr_group = {
- .attrs = bcm15602_attrs,
-};
+/* used for reg_read attribute */
+static u8 _reg_read_addr;
/* asr_curr is in uA */
static int bcm15602_get_asr_curr(struct bcm15602_chip *ddata, int *asr_curr)
@@ -132,9 +76,9 @@
return 0;
}
-static ssize_t bcm15602_attr_show_asr_curr(struct device *dev,
- struct device_attribute *mattr,
- char *data)
+static ssize_t asr_curr_show(struct device *dev,
+ struct device_attribute *mattr,
+ char *data)
{
struct bcm15602_chip *ddata = dev_get_drvdata(dev);
int asr_curr;
@@ -143,10 +87,11 @@
return snprintf(data, PAGE_SIZE, "%d\n", asr_curr);
}
+DEVICE_ATTR_RO(asr_curr);
-static ssize_t bcm15602_attr_show_sdsr_curr(struct device *dev,
- struct device_attribute *mattr,
- char *data)
+static ssize_t sdsr_curr_show(struct device *dev,
+ struct device_attribute *mattr,
+ char *data)
{
struct bcm15602_chip *ddata = dev_get_drvdata(dev);
int sdsr_curr;
@@ -155,10 +100,11 @@
return snprintf(data, PAGE_SIZE, "%d\n", sdsr_curr);
}
+DEVICE_ATTR_RO(sdsr_curr);
-static ssize_t bcm15602_attr_show_sdldo_curr(struct device *dev,
- struct device_attribute *mattr,
- char *data)
+static ssize_t sdldo_curr_show(struct device *dev,
+ struct device_attribute *mattr,
+ char *data)
{
struct bcm15602_chip *ddata = dev_get_drvdata(dev);
int sdldo_curr;
@@ -167,10 +113,11 @@
return snprintf(data, PAGE_SIZE, "%d\n", sdldo_curr);
}
+DEVICE_ATTR_RO(sdldo_curr);
-static ssize_t bcm15602_attr_show_ioldo_curr(struct device *dev,
- struct device_attribute *mattr,
- char *data)
+static ssize_t ioldo_curr_show(struct device *dev,
+ struct device_attribute *mattr,
+ char *data)
{
struct bcm15602_chip *ddata = dev_get_drvdata(dev);
int ioldo_curr;
@@ -179,10 +126,11 @@
return snprintf(data, PAGE_SIZE, "%d\n", ioldo_curr);
}
+DEVICE_ATTR_RO(ioldo_curr);
-static ssize_t bcm15602_attr_show_vbat(struct device *dev,
- struct device_attribute *mattr,
- char *data)
+static ssize_t vbat_show(struct device *dev,
+ struct device_attribute *mattr,
+ char *data)
{
struct bcm15602_chip *ddata = dev_get_drvdata(dev);
u16 chan_data;
@@ -192,10 +140,11 @@
return snprintf(data, PAGE_SIZE, "%d\n",
chan_data * BCM15602_ADC_SCALE_VBAT);
}
+DEVICE_ATTR_RO(vbat);
-static ssize_t bcm15602_attr_show_temperature(struct device *dev,
- struct device_attribute *mattr,
- char *data)
+static ssize_t temperature_show(struct device *dev,
+ struct device_attribute *mattr,
+ char *data)
{
struct bcm15602_chip *ddata = dev_get_drvdata(dev);
u16 chan_data;
@@ -204,11 +153,12 @@
return snprintf(data, PAGE_SIZE, "%d\n", PTAT_CODE_TO_TEMP(chan_data));
}
+DEVICE_ATTR_RO(temperature);
/* shows power in mW */
-static ssize_t bcm15602_attr_show_total_power(struct device *dev,
- struct device_attribute *mattr,
- char *data)
+static ssize_t total_power_show(struct device *dev,
+ struct device_attribute *mattr,
+ char *data)
{
struct bcm15602_chip *ddata = dev_get_drvdata(dev);
int asr_curr, sdsr_curr, sdldo_curr, ioldo_curr;
@@ -226,10 +176,11 @@
return snprintf(data, PAGE_SIZE, "%ld\n", total_power);
}
+DEVICE_ATTR_RO(total_power);
-static ssize_t bcm15602_attr_show_hk_status(struct device *dev,
- struct device_attribute *mattr,
- char *data)
+static ssize_t hk_status_show(struct device *dev,
+ struct device_attribute *mattr,
+ char *data)
{
struct bcm15602_chip *ddata = dev_get_drvdata(dev);
u16 status;
@@ -239,10 +190,11 @@
return snprintf(data, PAGE_SIZE, "0x%04x\n", status);
}
+DEVICE_ATTR_RO(hk_status);
-static ssize_t bcm15602_attr_show_asr_vsel(struct device *dev,
- struct device_attribute *mattr,
- char *data)
+static ssize_t asr_vsel_show(struct device *dev,
+ struct device_attribute *mattr,
+ char *data)
{
struct bcm15602_chip *ddata = dev_get_drvdata(dev);
u8 reg_data;
@@ -254,9 +206,9 @@
return snprintf(data, PAGE_SIZE, "%d\n", 565 + voctrl * 5);
}
-static ssize_t bcm15602_attr_store_asr_vsel(struct device *dev,
- struct device_attribute *mattr,
- const char *data, size_t count)
+static ssize_t asr_vsel_store(struct device *dev,
+ struct device_attribute *mattr,
+ const char *data, size_t count)
{
struct bcm15602_chip *ddata = dev_get_drvdata(dev);
int vsel, voctrl;
@@ -281,47 +233,125 @@
return count;
}
+DEVICE_ATTR_RW(asr_vsel);
-static ssize_t bcm15602_attr_show_asr_dual_phase(
- struct device *dev, struct device_attribute *mattr, char *data)
+static ssize_t dump_regs_show(struct device *dev,
+ struct device_attribute *mattr,
+ char *data)
+{
+ struct bcm15602_chip *ddata = dev_get_drvdata(dev);
+
+ bcm15602_dump_regs(ddata);
+
+ return snprintf(data, PAGE_SIZE, "ok\n");
+}
+DEVICE_ATTR_RO(dump_regs);
+
+static ssize_t toggle_pon_show(struct device *dev,
+ struct device_attribute *mattr,
+ char *data)
+{
+ struct bcm15602_chip *ddata = dev_get_drvdata(dev);
+
+ bcm15602_toggle_pon(ddata);
+
+ return snprintf(data, PAGE_SIZE, "ok\n");
+}
+DEVICE_ATTR_RO(toggle_pon);
+
+static ssize_t reg_read_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
struct bcm15602_chip *ddata = dev_get_drvdata(dev);
u8 reg_data;
- bcm15602_read_byte(ddata, BCM15602_REG_BUCK_ASR_TSET_CTRL2, ®_data);
+ bcm15602_read_byte(ddata, _reg_read_addr, ®_data);
- return snprintf(data, PAGE_SIZE, "%d\n", (reg_data & 0x10) >> 4);
+ return snprintf(buf, PAGE_SIZE, "0x%02x\n", reg_data);
}
-static ssize_t bcm15602_attr_store_asr_dual_phase(
- struct device *dev, struct device_attribute *mattr, const char *data,
- size_t count)
+static ssize_t reg_read_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
{
struct bcm15602_chip *ddata = dev_get_drvdata(dev);
- int val;
+ int val = 0;
+ int ret;
- if (kstrtoint(data, 0, &val)) {
- dev_err(dev, "%s: Not a valid int\n", __func__);
+ ret = kstrtoint(buf, 0, &val);
+ if (ret < 0)
+ return ret;
+
+ dev_dbg(ddata->dev, "%s: %d\n", __func__, val);
+
+ if ((val < 0) || (val > 0xff))
return -EINVAL;
- }
- if ((val != 0) && (val != 1)) {
- dev_err(dev, "%s: Can only write 0 or 1\n", __func__);
- return -EINVAL;
- }
-
- if (ddata->rev_id >= BCM15602_REV_A1) {
- dev_warn(dev,
- "%s: Skipping workaround for this silicon version\n",
- __func__);
- return count;
- }
-
- bcm15602_update_bits(ddata, BCM15602_REG_BUCK_ASR_TSET_CTRL2, 0x10,
- (val & 0x1) << 4);
+ _reg_read_addr = val;
return count;
}
+DEVICE_ATTR_RW(reg_read);
+
+static ssize_t reg_write_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct bcm15602_chip *ddata = dev_get_drvdata(dev);
+ int val1 = 0, val2 = 0;
+ int ret;
+ char *str;
+
+ str = strsep((char **)&buf, ";");
+ if (!str)
+ return -EINVAL;
+
+ ret = kstrtoint(str, 0, &val1);
+ if (ret < 0)
+ return ret;
+
+ if ((val1 < 0) || (val1 > 0xff))
+ return -EINVAL;
+
+ ret = kstrtoint(buf, 0, &val2);
+ if (ret < 0)
+ return ret;
+
+ if ((val2 < 0) || (val2 > 0xff))
+ return -EINVAL;
+
+ dev_dbg(ddata->dev, "%s: reg 0x%02x, data 0x%02x\n", __func__,
+ val1, val2);
+
+ bcm15602_write_byte(ddata, val1, val2);
+
+ return count;
+}
+DEVICE_ATTR_WO(reg_write);
+
+static struct attribute *bcm15602_attrs[] = {
+ &dev_attr_asr_curr.attr,
+ &dev_attr_sdsr_curr.attr,
+ &dev_attr_sdldo_curr.attr,
+ &dev_attr_ioldo_curr.attr,
+ &dev_attr_vbat.attr,
+ &dev_attr_temperature.attr,
+ &dev_attr_total_power.attr,
+ &dev_attr_hk_status.attr,
+ &dev_attr_asr_vsel.attr,
+ &dev_attr_dump_regs.attr,
+ &dev_attr_toggle_pon.attr,
+ &dev_attr_reg_read.attr,
+ &dev_attr_reg_write.attr,
+ NULL
+};
+
+static const struct attribute_group bcm15602_attr_group = {
+ .attrs = bcm15602_attrs,
+};
void bcm15602_config_sysfs(struct device *dev)
{
diff --git a/drivers/staging/easel/regulator/bcm15602-regulator.c b/drivers/staging/easel/regulator/bcm15602-regulator.c
index 1695c1f..f61147b 100644
--- a/drivers/staging/easel/regulator/bcm15602-regulator.c
+++ b/drivers/staging/easel/regulator/bcm15602-regulator.c
@@ -266,7 +266,7 @@
},
};
-static int bcm15602_toggle_pon(struct bcm15602_chip *ddata)
+int bcm15602_toggle_pon(struct bcm15602_chip *ddata)
{
dev_err(ddata->dev, "%s: device is stuck, toggling PON\n", __func__);
@@ -275,6 +275,21 @@
return 0;
}
+EXPORT_SYMBOL(bcm15602_toggle_pon);
+
+int bcm15602_dump_regs(struct bcm15602_chip *ddata)
+{
+ u8 reg_data;
+ int i;
+
+ for (i = 0; i <= BCM15602_REG_WDT_WDTSTST; i++) {
+ bcm15602_read_byte(ddata, i, ®_data);
+ dev_info(ddata->dev, "[0x%02x] = 0x%02x\n", i, reg_data);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(bcm15602_dump_regs);
static inline bool bcm15602_is_ready(struct bcm15602_chip *ddata)
{
@@ -974,7 +989,7 @@
break;
case BCM15602_ID_ASR:
ret = bcm15602_write_byte(ddata, BCM15602_REG_BUCK_ASR_CTRL0,
- 0xC5);
+ 0xC1);
break;
case BCM15602_ID_SDSR:
ret = bcm15602_write_byte(ddata, BCM15602_REG_BUCK_SDSR_CTRL0,
@@ -1010,7 +1025,7 @@
break;
case BCM15602_ID_ASR:
ret = bcm15602_write_byte(ddata, BCM15602_REG_BUCK_ASR_CTRL0,
- 0xC4);
+ 0xC0);
break;
case BCM15602_ID_SDSR:
ret = bcm15602_write_byte(ddata, BCM15602_REG_BUCK_SDSR_CTRL0,
@@ -1167,8 +1182,7 @@
if (ddata->rev_id < BCM15602_REV_A1) {
/* enable bandgap curvature correction for improved accuracy */
- bcm15602_update_bits(ddata, BCM15602_REG_ADC_BGCTRL, 0x40,
- 0x40);
+ bcm15602_write_byte(ddata, BCM15602_REG_ADC_BGCTRL, 0x7b);
/* unlock register, then set ASR switching frequency trim */
bcm15602_write_byte(ddata, BCM15602_REG_SYS_WRLOCKEY, 0x38);
@@ -1189,13 +1203,14 @@
/* set ASR rail to 0.9V */
bcm15602_write_byte(ddata, BCM15602_REG_BUCK_ASR_VOCTRL, 0x43);
- /* set ASR to single phase */
- bcm15602_update_bits(ddata, BCM15602_REG_BUCK_ASR_TSET_CTRL2,
- 0x10, 0x00);
+ /* disable zero-I detection */
+ bcm15602_write_byte(ddata, BCM15602_REG_BUCK_ASR_CTRL0, 0xC0);
} else if (ddata->rev_id == BCM15602_REV_A1) {
/* enable bandgap curvature correction for improved accuracy */
- bcm15602_update_bits(ddata, BCM15602_REG_ADC_BGCTRL, 0x40,
- 0x40);
+ bcm15602_write_byte(ddata, BCM15602_REG_ADC_BGCTRL, 0x7b);
+
+ /* disable zero-I detection */
+ bcm15602_write_byte(ddata, BCM15602_REG_BUCK_ASR_CTRL0, 0xC0);
}
return 0;
@@ -1233,6 +1248,7 @@
/* set client data */
i2c_set_clientdata(client, ddata);
+ dev_set_drvdata(dev, ddata);
/* get platform data */
pdata = dev_get_platdata(dev);
@@ -1264,9 +1280,6 @@
return -ENOMEM;
}
- /* create sysfs attributes */
- bcm15602_config_sysfs(dev);
-
/* request GPIOs and IRQs */
devm_gpio_request_one(dev, pdata->pon_gpio, GPIOF_OUT_INIT_LOW,
"BCM15602 PON");
@@ -1318,6 +1331,9 @@
goto error_reset;
}
+ /* create sysfs attributes */
+ bcm15602_config_sysfs(dev);
+
/* enable the irq after power on */
enable_irq(pdata->resetb_irq);
@@ -1349,11 +1365,36 @@
#ifdef CONFIG_PM
static int bcm15602_suspend(struct device *dev)
{
+ struct bcm15602_chip *ddata;
struct bcm15602_platform_data *pdata;
+ u8 reg_addr, reg_data;
+ int rid, ret;
pdata = dev_get_platdata(dev);
if (pdata)
enable_irq_wake(pdata->intb_irq);
+
+ ddata = dev_get_drvdata(dev);
+ if (!ddata)
+ return 0;
+
+ /* check to see if any regulators were left enabled */
+ for (rid = 0; rid < BCM15602_NUM_REGULATORS; rid++) {
+ if (rid == BCM15602_ID_SDLDO)
+ reg_addr = BCM15602_REG_LDO_SDLDO_ENCTRL;
+ else if (rid == BCM15602_ID_IOLDO)
+ reg_addr = BCM15602_REG_LDO_IOLDO_ENCTRL;
+ else if (rid == BCM15602_ID_ASR)
+ reg_addr = BCM15602_REG_BUCK_ASR_CTRL0;
+ else
+ reg_addr = BCM15602_REG_BUCK_SDSR_CTRL0;
+
+ ret = bcm15602_read_byte(ddata, reg_addr, ®_data);
+ if (!ret && (reg_data & 0x1))
+ dev_err(dev, "%s: regulator %d is still enabled\n",
+ __func__, rid);
+ }
+
return 0;
}
diff --git a/drivers/staging/easel/regulator/bcm15602-regulator.h b/drivers/staging/easel/regulator/bcm15602-regulator.h
index 8fe2e67..0724c87 100644
--- a/drivers/staging/easel/regulator/bcm15602-regulator.h
+++ b/drivers/staging/easel/regulator/bcm15602-regulator.h
@@ -358,6 +358,8 @@
int bcm15602_read_hk_slot(struct bcm15602_chip *ddata,
int slot_num, u16 *slot_data);
+int bcm15602_toggle_pon(struct bcm15602_chip *ddata);
+int bcm15602_dump_regs(struct bcm15602_chip *ddata);
void bcm15602_config_sysfs(struct device *dev);
#endif /* BCM15602_REGULATOR_H */
diff --git a/drivers/staging/emxx_udc/emxx_udc.c b/drivers/staging/emxx_udc/emxx_udc.c
index 4e6c16a..91ff8fb 100644
--- a/drivers/staging/emxx_udc/emxx_udc.c
+++ b/drivers/staging/emxx_udc/emxx_udc.c
@@ -3181,7 +3181,7 @@
};
/*-------------------------------------------------------------------------*/
-static void __init nbu2ss_drv_ep_init(struct nbu2ss_udc *udc)
+static void nbu2ss_drv_ep_init(struct nbu2ss_udc *udc)
{
int i;
@@ -3211,7 +3211,7 @@
/*-------------------------------------------------------------------------*/
/* platform_driver */
-static int __init nbu2ss_drv_contest_init(
+static int nbu2ss_drv_contest_init(
struct platform_device *pdev,
struct nbu2ss_udc *udc)
{
diff --git a/drivers/staging/fw-api/fw/wlan_defs.h b/drivers/staging/fw-api/fw/wlan_defs.h
index d627c2a..d81973f 100644
--- a/drivers/staging/fw-api/fw/wlan_defs.h
+++ b/drivers/staging/fw-api/fw/wlan_defs.h
@@ -905,9 +905,29 @@
A_UINT16 tid_sw_qdepth[DBG_STATS_MAX_TID_NUM]; /* WAL_MAX_TID is 20 */
};
-struct wlan_dbg_tidq_stats{
+struct wlan_dbg_tidq_stats {
A_UINT32 wlan_dbg_tid_txq_status;
struct wlan_dbg_txq_stats txq_st;
};
+typedef enum {
+ WLAN_DBG_DATA_STALL_NONE = 0,
+ WLAN_DBG_DATA_STALL_VDEV_PAUSE, /* 1 */
+ WLAN_DBG_DATA_STALL_HWSCHED_CMD_FILTER, /* 2 */
+ WLAN_DBG_DATA_STALL_HWSCHED_CMD_FLUSH, /* 3 */
+ WLAN_DBG_DATA_STALL_RX_REFILL_FAILED, /* 4 */
+ WLAN_DBG_DATA_STALL_RX_FCS_LEN_ERROR, /* 5 */
+ WLAN_DBG_DATA_STALL_MAC_WDOG_ERRORS, /* 6 */ /* Mac watch dog */
+ WLAN_DBG_DATA_STALL_PHY_BB_WDOG_ERROR, /* 7 */ /* PHY watch dog */
+ WLAN_DBG_DATA_STALL_MAX,
+} wlan_dbg_data_stall_type_e;
+
+typedef enum {
+ WLAN_DBG_DATA_STALL_RECOVERY_NONE = 0,
+ WLAN_DBG_DATA_STALL_RECOVERY_CONNECT_DISCONNECT,
+ WLAN_DBG_DATA_STALL_RECOVERY_CONNECT_MAC_PHY_RESET,
+ WLAN_DBG_DATA_STALL_RECOVERY_CONNECT_PDR,
+ WLAN_DBG_DATA_STALL_RECOVERY_CONNECT_SSR,
+} wlan_dbg_data_stall_recovery_type_e;
+
#endif /* __WLANDEFS_H__ */
diff --git a/drivers/staging/fw-api/fw/wmi_services.h b/drivers/staging/fw-api/fw/wmi_services.h
index 3a8251b..c2359b4 100644
--- a/drivers/staging/fw-api/fw/wmi_services.h
+++ b/drivers/staging/fw-api/fw/wmi_services.h
@@ -211,6 +211,12 @@
WMI_SERVICE_TX_PPDU_INFO_STATS_SUPPORT=129, /* support to report tx ppdu info stats via htt events */
WMI_SERVICE_VDEV_LIMIT_OFFCHAN_SUPPORT=130, /* support to report the offchannel duration limiting capability on connected interface */
WMI_SERVICE_FILS_SUPPORT=131, /* support for FILS */
+ WMI_SERVICE_WLAN_OIC_PING_OFFLOAD=132, /* Support for wlan OIC ping service */
+ WMI_SERVICE_WLAN_DHCP_RENEW=133, /* Support for wlan DHCP Renew service */
+ WMI_SERVICE_MAWC_SUPPORT = 134, /* Support for MAWC service */
+ WMI_SERVICE_VDEV_LATENCY_CONFIG=135, /* support for vdev latency config */
+ WMI_SERVICE_PDEV_UPDATE_CTLTABLE_SUPPORT=136, /* support for pdev update ctl table */
+
/******* ADD NEW SERVICES HERE *******/
WMI_MAX_EXT_SERVICE
diff --git a/drivers/staging/fw-api/fw/wmi_tlv_defs.h b/drivers/staging/fw-api/fw/wmi_tlv_defs.h
index d073cb30..16c7149 100644
--- a/drivers/staging/fw-api/fw/wmi_tlv_defs.h
+++ b/drivers/staging/fw-api/fw/wmi_tlv_defs.h
@@ -847,6 +847,17 @@
WMITLV_TAG_STRUC_wmi_roam_fils_synch_tlv_param,
WMITLV_TAG_STRUC_wmi_gtk_offload_extended_tlv_param,
WMITLV_TAG_STRUC_wmi_roam_bg_scan_roaming_param,
+ WMITLV_TAG_STRUC_wmi_oic_ping_offload_params_cmd_fixed_param,
+ WMITLV_TAG_STRUC_wmi_oic_ping_offload_set_enable_cmd_fixed_param,
+ WMITLV_TAG_STRUC_wmi_oic_ping_handoff_event,
+ WMITLV_TAG_STRUC_wmi_dhcp_lease_renew_offload_cmd_fixed_param,
+ WMITLV_TAG_STRUC_wmi_dhcp_lease_renew_event,
+ WMITLV_TAG_STRUC_wmi_btm_config_fixed_param,
+ WMITLV_TAG_STRUC_wmi_debug_mesg_fw_data_stall_param,
+ WMITLV_TAG_STRUC_wmi_wlm_config_cmd_fixed_param,
+ WMITLV_TAG_STRUC_wmi_pdev_update_ctltable_request_fixed_param,
+ WMITLV_TAG_STRUC_wmi_pdev_update_ctltable_event_fixed_param,
+ WMITLV_TAG_STRUC_wmi_roam_cnd_scoring_param,
} WMITLV_TAG_ID;
/*
@@ -1185,6 +1196,12 @@
OP(WMI_VDEV_LIMIT_OFFCHAN_CMDID) \
OP(WMI_PDEV_UPDATE_FILS_HLP_PKT_CMDID) \
OP(WMI_PDEV_UPDATE_PMK_CACHE_CMDID) \
+ OP(WMI_HB_OIC_PING_OFFLOAD_PARAM_CMDID) \
+ OP(WMI_HB_OIC_PING_OFFLOAD_SET_ENABLE_CMDID) \
+ OP(WMI_HB_DHCP_LEASE_RENEW_OFFLOAD_CMDID) \
+ OP(WMI_ROAM_BTM_CONFIG_CMDID) \
+ OP(WMI_WLM_CONFIG_CMDID) \
+ OP(WMI_PDEV_UPDATE_CTLTABLE_REQUEST_CMDID) \
/* add new CMD_LIST elements above this line */
@@ -1373,6 +1390,7 @@
OP(WMI_OEM_DMA_BUF_RELEASE_EVENTID) \
OP(WMI_PDEV_BSS_CHAN_INFO_EVENTID) \
OP(WMI_UNIT_TEST_EVENTID) \
+ OP(WMI_PDEV_UPDATE_CTLTABLE_EVENTID) \
/* add new EVT_LIST elements above this line */
@@ -2220,6 +2238,13 @@
WMITLV_CREATE_PARAM_STRUC(WMI_PDEV_BSS_CHAN_INFO_REQUEST_CMDID);
+/* PDEV update ctl table Cmd */
+#define WMITLV_TABLE_WMI_PDEV_UPDATE_CTLTABLE_REQUEST_CMDID(id,op,buf,len) \
+ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_pdev_update_ctltable_request_fixed_param, wmi_pdev_update_ctltable_request_fixed_param, fixed_param, WMITLV_SIZE_FIX) \
+ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_BYTE, A_UINT8, ctltable_data, WMITLV_SIZE_VAR)
+
+WMITLV_CREATE_PARAM_STRUC(WMI_PDEV_UPDATE_CTLTABLE_REQUEST_CMDID);
+
/* VDEV Get Tx power Cmd */
#define WMITLV_TABLE_WMI_VDEV_GET_TX_POWER_CMDID(id,op,buf,len) \
WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_vdev_get_tx_power_cmd_fixed_param, wmi_vdev_get_tx_power_cmd_fixed_param, fixed_param, WMITLV_SIZE_FIX)
@@ -2252,7 +2277,8 @@
/* Roam AP profile Cmd */
#define WMITLV_TABLE_WMI_ROAM_AP_PROFILE(id,op,buf,len) \
WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_roam_ap_profile_fixed_param, wmi_roam_ap_profile_fixed_param, fixed_param, WMITLV_SIZE_FIX) \
- WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_ap_profile, wmi_ap_profile, ap_profile, WMITLV_SIZE_FIX)
+ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_ap_profile, wmi_ap_profile, ap_profile, WMITLV_SIZE_FIX) \
+ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_roam_cnd_scoring_param, wmi_roam_cnd_scoring_param, roam_cnd_scoring_param, WMITLV_SIZE_FIX)
WMITLV_CREATE_PARAM_STRUC(WMI_ROAM_AP_PROFILE);
@@ -3397,6 +3423,30 @@
WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_STRUC, wmi_therm_throt_level_config_info, therm_throt_level_config_info, WMITLV_SIZE_VAR)
WMITLV_CREATE_PARAM_STRUC(WMI_THERM_THROT_SET_CONF_CMDID);
+/* OIC ping offload cmd */
+#define WMITLV_TABLE_WMI_HB_OIC_PING_OFFLOAD_PARAM_CMDID(id,op,buf,len) \
+ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_oic_ping_offload_params_cmd_fixed_param, wmi_oic_ping_offload_params_cmd_fixed_param, fixed_param, WMITLV_SIZE_FIX)
+WMITLV_CREATE_PARAM_STRUC(WMI_HB_OIC_PING_OFFLOAD_PARAM_CMDID);
+
+#define WMITLV_TABLE_WMI_HB_OIC_PING_OFFLOAD_SET_ENABLE_CMDID(id,op,buf,len) \
+ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_oic_ping_offload_set_enable_cmd_fixed_param, wmi_oic_ping_offload_set_enable_cmd_fixed_param, fixed_param, WMITLV_SIZE_FIX)
+WMITLV_CREATE_PARAM_STRUC(WMI_HB_OIC_PING_OFFLOAD_SET_ENABLE_CMDID);
+
+/* DHCP Lease Renew Offload cmd */
+#define WMITLV_TABLE_WMI_HB_DHCP_LEASE_RENEW_OFFLOAD_CMDID(id,op,buf,len) \
+ WMITLV_ELEM(id,op,buf,len,WMITLV_TAG_STRUC_wmi_dhcp_lease_renew_offload_cmd_fixed_param, wmi_dhcp_lease_renew_offload_cmd_fixed_param, fixed_param, WMITLV_SIZE_FIX)
+WMITLV_CREATE_PARAM_STRUC(WMI_HB_DHCP_LEASE_RENEW_OFFLOAD_CMDID);
+
+/* BTM config command */
+#define WMITLV_TABLE_WMI_ROAM_BTM_CONFIG_CMDID(id,op,buf,len) \
+ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_btm_config_fixed_param, wmi_btm_config_fixed_param, fixed_param, WMITLV_SIZE_FIX)
+WMITLV_CREATE_PARAM_STRUC(WMI_ROAM_BTM_CONFIG_CMDID);
+
+/* vdev latency config cmd */
+#define WMITLV_TABLE_WMI_WLM_CONFIG_CMDID(id,op,buf,len) \
+ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_wlm_config_cmd_fixed_param, wmi_wlm_config_cmd_fixed_param, fixed_param, WMITLV_SIZE_FIX)
+WMITLV_CREATE_PARAM_STRUC(WMI_WLM_CONFIG_CMDID);
+
/************************** TLV definitions of WMI events *******************************/
@@ -3518,6 +3568,11 @@
WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_pdev_bss_chan_info_event_fixed_param, wmi_pdev_bss_chan_info_event_fixed_param, fixed_param, WMITLV_SIZE_FIX)
WMITLV_CREATE_PARAM_STRUC(WMI_PDEV_BSS_CHAN_INFO_EVENTID);
+/* PDEV update ctl table Event */
+#define WMITLV_TABLE_WMI_PDEV_UPDATE_CTLTABLE_EVENTID(id,op,buf,len) \
+ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_pdev_update_ctltable_event_fixed_param, wmi_pdev_update_ctltable_event_fixed_param, fixed_param, WMITLV_SIZE_FIX)
+WMITLV_CREATE_PARAM_STRUC(WMI_PDEV_UPDATE_CTLTABLE_EVENTID);
+
/* VDEV Tx Power Event */
#define WMITLV_TABLE_WMI_VDEV_GET_TX_POWER_EVENTID(id,op,buf,len) \
WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_vdev_tx_power_event_fixed_param, wmi_vdev_get_tx_power_event_fixed_param, fixed_param, WMITLV_SIZE_FIX)
@@ -3649,7 +3704,9 @@
WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_STRUC, WOW_EVENT_INFO_SECTION_BITMAP, wow_bitmap_info, WMITLV_SIZE_VAR) \
WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_BYTE, A_UINT8, wow_packet_buffer, WMITLV_SIZE_VAR) \
WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_STRUC, wmi_hb_ind_event_fixed_param, hb_indevt, WMITLV_SIZE_VAR) \
- WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_STRUC, WMI_GTK_OFFLOAD_STATUS_EVENT_fixed_param, wow_gtkigtk, WMITLV_SIZE_VAR)
+ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_STRUC, WMI_GTK_OFFLOAD_STATUS_EVENT_fixed_param, wow_gtkigtk, WMITLV_SIZE_VAR) \
+ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_STRUC, wmi_oic_ping_handoff_event, wow_oic_ping_handoff, WMITLV_SIZE_VAR) \
+ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_STRUC, wmi_dhcp_lease_renew_event, wow_dhcp_lease_renew, WMITLV_SIZE_VAR)
WMITLV_CREATE_PARAM_STRUC(WMI_WOW_WAKEUP_HOST_EVENTID);
#define WMITLV_TABLE_WMI_WOW_INITIAL_WAKEUP_EVENTID(id,op,buf,len) \
@@ -3699,7 +3756,8 @@
WMITLV_CREATE_PARAM_STRUC(WMI_DEBUG_MESG_EVENTID);
#define WMITLV_TABLE_WMI_DEBUG_MESG_FLUSH_COMPLETE_EVENTID(id,op,buf,len)\
- WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_debug_mesg_flush_complete_fixed_param, wmi_debug_mesg_flush_complete_fixed_param, fixed_param, WMITLV_SIZE_FIX)
+ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_debug_mesg_flush_complete_fixed_param, wmi_debug_mesg_flush_complete_fixed_param, fixed_param, WMITLV_SIZE_FIX) \
+ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_STRUC, wmi_debug_mesg_fw_data_stall_param, data_stall, WMITLV_SIZE_VAR)
WMITLV_CREATE_PARAM_STRUC(WMI_DEBUG_MESG_FLUSH_COMPLETE_EVENTID);
#define WMITLV_TABLE_WMI_RSSI_BREACH_EVENTID(id,op,buf,len)\
diff --git a/drivers/staging/fw-api/fw/wmi_unified.h b/drivers/staging/fw-api/fw/wmi_unified.h
index f22b399..c3dc0e1 100644
--- a/drivers/staging/fw-api/fw/wmi_unified.h
+++ b/drivers/staging/fw-api/fw/wmi_unified.h
@@ -240,6 +240,7 @@
WMI_GRP_MONITOR, /* 0x39 */
WMI_GRP_REGULATORY, /* 0x3a */
WMI_GRP_HW_DATA_FILTER, /* 0x3b */
+ WMI_GRP_WLM, /* 0x3c WLAN Latency Manager */
} WMI_GRP_ID;
#define WMI_CMD_GRP_START_ID(grp_id) (((grp_id) << 12) | 0x1)
@@ -368,12 +369,14 @@
WMI_PDEV_SET_DIVERSITY_GAIN_CMDID,
/** Get chain RSSI and antena index command */
WMI_PDEV_DIV_GET_RSSI_ANTID_CMDID,
- /* get bss chan info */
+ /** get bss chan info */
WMI_PDEV_BSS_CHAN_INFO_REQUEST_CMDID,
- /* update pmk cache info */
+ /** update pmk cache info */
WMI_PDEV_UPDATE_PMK_CACHE_CMDID,
- /* update fils HLP */
+ /** update fils HLP */
WMI_PDEV_UPDATE_FILS_HLP_PKT_CMDID,
+ /** update ctltable request **/
+ WMI_PDEV_UPDATE_CTLTABLE_REQUEST_CMDID,
/* VDEV (virtual device) specific commands */
/** vdev create */
@@ -588,6 +591,8 @@
WMI_ROAM_SET_MBO_PARAM_CMDID, /* DEPRECATED */
/** configure packet error rate threshold for triggering roaming */
WMI_ROAM_PER_CONFIG_CMDID,
+ /** configure BSS Transition Management (BTM) offload for roaming */
+ WMI_ROAM_BTM_CONFIG_CMDID,
/** offload scan specific commands */
/** set offload scan AP profile */
@@ -910,6 +915,13 @@
/* set udp pkt filter for wlan HB */
WMI_HB_SET_UDP_PKT_FILTER_CMDID,
+ /* OIC ping keep alive */
+ WMI_HB_OIC_PING_OFFLOAD_PARAM_CMDID,
+ WMI_HB_OIC_PING_OFFLOAD_SET_ENABLE_CMDID,
+
+ /* WMI commands related to DHCP Lease Renew Offload **/
+ WMI_HB_DHCP_LEASE_RENEW_OFFLOAD_CMDID,
+
/** Wlan RMC commands*/
/** enable/disable RMC */
WMI_RMC_SET_MODE_CMDID = WMI_CMD_GRP_START_ID(WMI_GRP_RMC),
@@ -1056,6 +1068,9 @@
/** WMI commands related to HW data filtering **/
WMI_HW_DATA_FILTER_CMDID = WMI_CMD_GRP_START_ID(WMI_GRP_HW_DATA_FILTER),
+ /** WMI commands related to WLAN latency module **/
+ WMI_WLM_CONFIG_CMDID = WMI_CMD_GRP_START_ID(WMI_GRP_WLM),
+
} WMI_CMD_ID;
typedef enum {
@@ -1140,6 +1155,9 @@
/** provide noise floor and cycle counts for a channel */
WMI_PDEV_BSS_CHAN_INFO_EVENTID,
+ /** Response received the ctl table to host */
+ WMI_PDEV_UPDATE_CTLTABLE_EVENTID,
+
/* VDEV specific events */
/** VDEV started event in response to VDEV_START request */
WMI_VDEV_START_RESP_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_VDEV),
@@ -1999,6 +2017,10 @@
A_UINT32 vht_supp_mcs; /* VHT Supported MCS Set field Rx/Tx same */
A_UINT32 hw_min_tx_power;
A_UINT32 hw_max_tx_power;
+ /* sys_cap_info:
+ * bits 1:0 - RXTX LED + RFKILL enable flags (see WMI_LEDRFKILL_FLAGS)
+ * bits 31:2 - reserved (must be set to zero)
+ */
A_UINT32 sys_cap_info;
A_UINT32 min_pkt_size_enable; /* Enterprise mode short pkt enable */
/** Max beacon and Probe Response IE offload size (includes
@@ -2067,6 +2089,11 @@
*/
} wmi_service_ready_event_fixed_param;
+typedef enum {
+ WMI_RXTX_LED_ENABLE = 0x00000001,
+ WMI_RFKILL_ENABLE = 0x00000002,
+} WMI_LEDRFKILL_FLAGS;
+
#define WMI_SERVICE_SEGMENT_BM_SIZE32 4 /* 4x A_UINT32 = 128 bits */
typedef struct {
A_UINT32 tlv_header; /* TLV tag and len; tag equals WMITLV_TAG_STRUC_wmi_service_available_event_fixed_param */
@@ -3089,6 +3116,12 @@
A_UINT32 scan_id;
/**id of VDEV that requested the scan */
A_UINT32 vdev_id;
+ /** TSF Timestamp when the scan event (wmi_scan_event_type) is completed
+ * In case of AP it is TSF of the AP vdev
+ * In case of STA connected state this is the TSF of the AP
+ * In case of STA not connected it will be the free running HW timer
+ */
+ A_UINT32 tsf_timestamp;
} wmi_scan_event_fixed_param;
/* WMI Diag event */
@@ -4477,6 +4510,14 @@
* 0- Disable DTIM Synth
*/
WMI_PDEV_PARAM_DTIM_SYNTH,
+ /** Configure auto detect power failure feature.
+ * 0 - FW will trigger crash if power failure happens.
+ * 1 - FW will send a failure notification to host, and the host
+ * framework determines how to respond to the power failure
+ * 2 - Silently rejuvenate if power failure occurs.
+ * 3 - Feature disabled.
+ */
+ WMI_PDEV_AUTO_DETECT_POWER_FAILURE,
} WMI_PDEV_PARAM;
typedef struct {
@@ -4543,6 +4584,17 @@
A_UINT32 param; /* 1 = read only, 2= read and clear */
} wmi_pdev_bss_chan_info_request_fixed_param;
+typedef struct {
+ A_UINT32 tlv_header; /* WMITLV_TAG_STRUC_wmi_pdev_update_ctltable_request_fixed_param */
+ A_UINT32 total_len; /* the total number of ctl table bytes to be transferred */
+ A_UINT32 len; /* the number of ctltable bytes in current payload */
+ A_UINT32 seq; /* the number of current transfers */
+/*
+ * This TLV is followed by the following additional TLVs:
+ * ARRAY_BYTE TLV of ctltable_data
+ */
+} wmi_pdev_update_ctltable_request_fixed_param;
+
#define WMI_FAST_DIVERSITY_BIT_OFFSET 0
#define WMI_SLOW_DIVERSITY_BIT_OFFSET 1
@@ -4752,6 +4804,14 @@
} wmi_pdev_bss_chan_info_event_fixed_param;
typedef struct {
+ /* WMI event response update ctltable request to host */
+ A_UINT32 tlv_header; /* WMITLV_TAG_STRUC_wmi_pdev_update_ctltable_event_fixed_param */
+ A_UINT32 total_len; /* the total number of bytes to be transferred */
+ A_UINT32 len; /* the number of FW received bytes from host */
+ A_UINT32 seq; /* the number of current transfers */
+} wmi_pdev_update_ctltable_event_fixed_param;
+
+typedef struct {
A_UINT32 tlv_header; /* TLV tag and len; tag equals WMITLV_TAG_STRUC_wmi_pdev_l1ss_track_event_fixed_param */
A_UINT32 periodCnt;
A_UINT32 L1Cnt;
@@ -5049,6 +5109,7 @@
A_UINT32 vdev_id;
/** peer MAC address */
wmi_mac_addr peer_macaddr;
+ A_UINT32 pdev_id; /** pdev_id for identifying the MAC. See macros starting with WMI_PDEV_ID_ for values. In non-DBDC case host should set it to 0. */
/*
* This TLV is (optionally) followed by other TLVs:
* wmi_inst_rssi_stats_params inst_rssi_params;
@@ -5116,6 +5177,24 @@
} wmi_debug_mesg_flush_fixed_param;
typedef struct {
+ A_UINT32 tlv_header; /** TLV tag and len; tag equals WMITLV_TAG_STRUC_wmi_debug_mesg_fw_data_stall_param */
+ A_UINT32 vdev_id_bitmap; /** bitmap representation for vdev_id's where data stall happened */
+ A_UINT32 data_stall_type; /** wlan_dbg_data_stall_type_e */
+ /** reason_code1:
+ * The information stored in reason_code1 varies based on the data stally
+ * type values:
+ * data_stall_type | reason_code1
+ * -----------------------------------------------------
+ * HWSCHED_CMD_FLUSH | flush req reason (0-40)
+ * RX_REFILL_FAILED | ring_id (0-7)
+ * RX_FCS_LEN_ERROR | exact error type
+ */
+ A_UINT32 reason_code1;
+ A_UINT32 reason_code2; /** on which tid/hwq stall happened */
+ A_UINT32 recovery_type; /** wlan_dbg_data_stall_recovery_type_e */
+} wmi_debug_mesg_fw_data_stall_param;
+
+typedef struct {
A_UINT32 tlv_header; /** TLV tag and len; tag equals WMITLV_TAG_STRUC_wmi_debug_mesg_flush_complete_fixed_param*/
A_UINT32 reserved0; /** placeholder for future */
} wmi_debug_mesg_flush_complete_fixed_param;
@@ -5618,6 +5697,8 @@
A_UINT32 num_chan_stats;
/** number of MIB stats event structures (wmi_mib_stats) */
A_UINT32 num_mib_stats;
+ A_UINT32 pdev_id; /** pdev_id for identifying the MAC. See macros starting with WMI_PDEV_ID_ for values. In non-DBDC case host should set it to 0. */
+
/* This TLV is followed by another TLV of array of bytes
* A_UINT8 data[];
* This data array contains
@@ -7925,6 +8006,13 @@
*/
A_UINT32 ext_csa_switch_count_offset; /* units = bytes */
+ /** Specify when to send the CSA switch count status from FW to host.
+ * See WMI_CSA_EVENT_BMAP* below for more information.
+ * E.g. if CSA switch count event is needed to be sent when the switch count
+ * is 0, 1, 4 and 5, set the bitmap to (0X80000033)
+ */
+ A_UINT32 csa_event_bitmap;
+
/*
* The TLVs follows:
* wmi_bcn_prb_info bcn_prb_info; <-- beacon probe capabilities and IEs
@@ -7932,6 +8020,11 @@
*/
} wmi_bcn_tmpl_cmd_fixed_param;
+#define WMI_CSA_EVENT_BMAP_VALID_MASK 0X80000000 /* Follow bitmap for sending the CSA switch count event */
+#define WMI_CSA_EVENT_BMAP_SWITCH_COUNT_ZERO 0 /* Send only when the switch count becomes zero, added for backward compatibility
+ Same can also be achieved by setting bitmap to 0X80000001 */
+#define WMI_CSA_EVENT_BMAP_ALL 0XFFFFFFFF /* Send CSA switch count event for every update to switch count */
+
typedef struct {
A_UINT32 tlv_header; /** TLV tag and len; tag equals WMITLV_TAG_STRUC_wmi_prb_tmpl_cmd_fixed_param */
/** unique id identifying the VDEV, generated by the caller */
@@ -8870,6 +8963,10 @@
* selected from 0 to 31 values)
*/
#define WMI_PEER_SET_DEFAULT_ROUTING 0x13
+/* peer NSS for VHT160 - Extended NSS support */
+#define WMI_PEER_NSS_VHT160 0x14
+/* peer NSS for VHT160 - Extended NSS support */
+#define WMI_PEER_NSS_VHT80_80 0x15
/** mimo ps values for the parameter WMI_PEER_MIMO_PS_STATE */
#define WMI_PEER_MIMO_PS_NONE 0x0
@@ -9125,17 +9222,19 @@
* peer_ht_rates[] */
A_UINT32 num_peer_ht_rates;
/**
- * Bitmap providing customized mapping of bandwidths to max Rx NSS
- * for this peer.
- * This is required since 802.11 standard currently facilitates peer to be
- * able to advertise only a single max Rx NSS value across all bandwidths.
- * Some QCA chipsets might need to be able to advertise a different max
- * Rx NSS value for 160 MHz, than that for 80 MHz and lower.
+ * Bitmap providing the mapping of bandwidths to max Rx NSS for this peer
+ * in VHT160 / VHT80+80 Mode.
+ * As per the New IEEE 802.11 Update, the AP & Peer could advertise and
+ * handshake with the Max Rx NSS differing for different bandwidths
+ * instead of a single max Rx NSS Value.
+ * Some QCA chipsets have to advertise a different max Rx NSS value for
+ * 160 MHz and 80MHz.
*
* bit[2:0] : Represents value of Rx NSS for VHT 160 MHz
- * bit[30:3]: Reserved
- * bit[31] : MSB(0/1): 1 in case of valid data else all bits will be set
- * to 0 by host
+ * bit[5:3] : Represents value of Rx NSS for VHT 80_80 MHz -
+ * Extended NSS support
+ * bit[30:6]: Reserved
+ * bit[31] : MSB(0/1): 1 in case of valid data sent from Host
*/
A_UINT32 peer_bw_rxnss_override;
@@ -9457,6 +9556,44 @@
*/
} wmi_roam_scan_rssi_threshold_fixed_param;
+/**
+ * WMI_ROAM_BTM_CONFIG_CMDID : set BTM (BSS Transition Management. 802.11v) offload config
+ * Applicable only when WMI_ROAM_SCAN_MODE is enabled with WMI_ROAM_SCAN_MODE_ROAMOFFLOAD
+ */
+
+/**
+ * btm_config.flags
+ * BIT 0 : Enable/Disable the BTM offload.
+ * BIT 1-2 : Action on non matching candidate with cache. Used WMI_ROAM_BTM_OFLD_NON_MATCHING_CND_XXX
+ * BIT 3-5 : Roaming handoff decisions. Use WMI_ROAM_BTM_OFLD_CNDS_MATCH_XXX
+ * BIT 6-31 : Reserved
+ */
+#define WMI_ROAM_BTM_SET_ENABLE(flags, val) WMI_SET_BITS(flags, 0, 1, val)
+#define WMI_ROAM_BTM_GET_ENABLE(flags) WMI_GET_BITS(flags, 0, 1)
+#define WMI_ROAM_BTM_SET_NON_MATCHING_CND_ACTION(flags, val) WMI_SET_BITS(flags, 1, 2, val)
+#define WMI_ROAM_BTM_GET_NON_MATCHING_CND_ACTION(flags) WMI_GET_BITS(flags, 1, 2)
+#define WMI_ROAM_BTM_SET_CNDS_MATCH_CONDITION(flags, val) WMI_SET_BITS(flags, 3, 3, val)
+#define WMI_ROAM_BTM_GET_CNDS_MATCH_CONDITION(flags) WMI_GET_BITS(flags, 3, 3)
+
+/** WMI_ROAM_BTM_SET_NON_MATCHING_CNDS_ACTION definition: When BTM candidate is not matched with cache by WMI_ROAM_BTM_SET_CNDS_MATCH_CONDITION, determine what to do */
+#define WMI_ROAM_BTM_NON_MATCHING_CNDS_SCAN_CONSUME 0 /** Invoke roam scan and consume within firmware. Applicable only when ROAM_SCAN_MODE is enabled. If ROAM_SCAN_MODE is disabled, firmware won't scan and forward it to host */
+#define WMI_ROAM_BTM_NON_MATCHING_CNDS_NO_SCAN_FORWARD 1 /** Does not allow roam scan and forward BTM frame to host */
+/** reserved upto 3 */
+
+/** WMI_ROAM_BTM_SET_CNDS_MATCH_CONDITION definition: This is used to invoke WMI_ROAM_BTM_SET_NON_MATCHING_CND_ACTION when compared with cache. i.e this condition is not applied with fresh scan result */
+#define WMI_ROAM_BTM_CNDS_MATCH_EXACT 0 /** Exactly matched with roam candidate list to BTM candidates */
+#define WMI_ROAM_BTM_CNDS_MATCH_AT_LEAST_TOP 1 /** At least one or more top priority bssid matched */
+/** reserved upto 7 */
+
+typedef struct {
+ /** TLV tag and len; tag equals WMITLV_TAG_STRUC_wmi_btm_config_fixed_param */
+ A_UINT32 tlv_header;
+ /** unique id identifying the VDEV on which BTM is enabled/disabled */
+ A_UINT32 vdev_id;
+ /** BTM configuration control flags */
+ A_UINT32 flags;
+} wmi_btm_config_fixed_param;
+
#define WMI_ROAM_5G_BOOST_PENALIZE_ALGO_FIXED 0x0
#define WMI_ROAM_5G_BOOST_PENALIZE_ALGO_LINEAR 0x1
#define WMI_ROAM_5G_BOOST_PENALIZE_ALGO_LOG 0x2
@@ -9584,6 +9721,227 @@
/** looking for a PMF enabled AP */
#define WMI_AP_PROFILE_FLAG_PMF 0x4
+/* the value used in wmi_roam_cnd_scoring_param->disable_bitmap */
+#define WLAN_ROAM_SCORING_RSSI_DISABLE 0x00000001
+#define WLAN_ROAM_SCORING_HT_DISABLE 0x00000002
+#define WLAN_ROAM_SCORING_VHT_DISABLE 0x00000004
+#define WLAN_ROAM_SCORING_BW_DISABLE 0x00000008
+#define WLAN_ROAM_SCORING_BAND_DISABLE 0x00000010
+#define WLAN_ROAM_SCORING_NSS_DISABLE 0x00000020
+#define WLAN_ROAM_SCORING_CHAN_CONGESTION_DISABLE 0x00000040
+#define WLAN_ROAM_SCORING_BEAMFORMING_DISABLE 0x00000080
+#define WLAN_ROAM_SCORING_PCL_DISABLE 0x00000100
+#define WLAN_ROAM_SCORING_HE_DISABLE 0x00000200
+#define WLAN_ROAM_SCORING_OCE_WAN_DISABLE 0x00000400
+#define WLAN_ROAM_SCORING_DISABLE_ALL 0xFFFFFFFF
+#define WLAN_ROAM_SCORING_DEFAULT_PARAM_ALLOW 0x0
+
+#define WLAN_ROAM_MAX_SELECTION_SCORE 10000
+
+#define WLAN_ROAM_SCORE_20MHZ_BW_INDEX 0
+#define WLAN_ROAM_SCORE_40MHZ_BW_INDEX 1
+#define WLAN_ROAM_SCORE_80MHZ_BW_INDEX 2
+#define WLAN_ROAM_SCORE_160MHZ_BW_INDEX 3
+#define WLAN_ROAM_SCORE_MAX_BW_INDEX 4
+#define WMI_ROAM_GET_BW_SCORE_PERCENTAGE(value32, bw_index) WMI_GET_BITS(value32, (8 * (bw_index)), 8)
+#define WMI_ROAM_SET_BW_SCORE_PERCENTAGE(value32, score_pcnt, bw_index) WMI_SET_BITS(value32, (8 * (bw_index)), 8, score_pcnt)
+
+#define WLAN_ROAM_SCORE_NSS_1x1_INDEX 0
+#define WLAN_ROAM_SCORE_NSS_2x2_INDEX 1
+#define WLAN_ROAM_SCORE_NSS_3x3_INDEX 2
+#define WLAN_ROAM_SCORE_NSS_4x4_INDEX 3
+#define WLAN_ROAM_SCORE_MAX_NSS_INDEX 4
+#define WMI_ROAM_GET_NSS_SCORE_PERCENTAGE(value32, nss_index) WMI_GET_BITS(value32, (8 * (nss_index)), 8)
+#define WMI_ROAM_SET_NSS_SCORE_PERCENTAGE(value32, score_pcnt, nss_index) WMI_SET_BITS(value32, (8 * (nss_index)), 8, score_pcnt)
+
+#define WLAN_ROAM_SCORE_BAND_2G_INDEX 0
+#define WLAN_ROAM_SCORE_BAND_5G_INDEX 1
+/* 2 and 3 are reserved */
+#define WLAN_ROAM_SCORE_MAX_BAND_INDEX 4
+#define WMI_ROAM_GET_BAND_SCORE_PERCENTAGE(value32, band_index) WMI_GET_BITS(value32, (8 * (band_index)), 8)
+#define WMI_ROAM_SET_BAND_SCORE_PERCENTAGE(value32, score_pcnt, band_index) WMI_SET_BITS(value32, (8 * (band_index)), 8, score_pcnt)
+
+#define WLAN_ROAM_SCORE_MAX_CHAN_CONGESTION_SLOT 16
+#define WLAN_ROAM_SCORE_DEFAULT_CONGESTION_SLOT 0
+
+#define WLAN_ROAM_SCORE_MAX_OCE_WAN_SLOT 16
+#define WLAN_ROAM_SCORE_DEFAULT_OCE_WAN_SLOT 0
+
+/**
+ best_rssi_threshold: Roamable AP RSSI equal or better than this threshold, full rssi score 100. Units in dBm.
+ good_rssi_threshold: Below this threshold, scoring linear percentage between rssi_good_pcnt and 100. Units in dBm.
+ bad_rssi_threshold: Between good and bad rssi threshold, scoring linear percentage between rssi_bad_pcnt and rssi_good_pcnt. Units in dBm.
+ good_rssi_pcnt: Used to assigned scoring percentage of each slot between best to good rssi threshold. Units in percentage.
+ bad_rssi_pcnt: Used to assigned scoring percentage of each slot between good to bad rssi threshold. Units in percentage.
+ good_bucket_size : bucket size of slot in good zone. Units in dB.
+ bad_bucket_size : bucket size of slot in bad zone. Units in dB.
+ rssi_pref_5g_rssi_thresh: Below rssi threshold, 5G AP have given preference of band percentage. Units in dBm.
+*/
+/**
+ The graph below explains how RSSI scoring percentage is calculated
+ as the RSSI improves. In the graph, the derived parameters good_buckets
+ and bad_buckets are used. These derived parameters are related to the
+ specified parameters as follows:
+ good_buckets =
+ (best_rssi_threshold - good_rssi_threshold) / good_bucket_size
+ bad_buckets =
+ (good_rssi_threshold - bad_rssi_threshold) / bad_bucket_size
+
+ | (x0,y0) (x0 = best_rssi, y0 = 100)
+p 100 |-------o
+e | |<--------- (100 - good_rssi_pct)/good_buckets
+r | |___ ,---- good_bucket_size
+c | | |
+e | |_V_
+n | |
+t | |___o (x1,y1) (x1 = good_rssi, y1 = good_rssi_pcnt)
+ 80 | |
+% | |<------ (good_rssi_pcnt - bad_rssi_pcnt)/bad_buckets
+ | |_____
+ | | ,-- bad_bucket_size
+ | | |
+ | |__v__
+ | |
+ | |
+ 40 | o------------
+ | (x2,y2) (x2 = bad_rssi, y2 = bad_rssi_pcnt)
+ +------o------------o-----------o------------->
+ -50 -70 -80 rssi dBm
+
+| excellent | good | bad | poor
+| zone | zone | zone | zone
+ V V V
+ BEST_THRES GOOD_THRES BAD_THRES
+ */
+typedef struct {
+ A_INT32 best_rssi_threshold;
+ A_INT32 good_rssi_threshold;
+ A_INT32 bad_rssi_threshold;
+ A_UINT32 good_rssi_pcnt;
+ A_UINT32 bad_rssi_pcnt;
+ A_UINT32 good_bucket_size;
+ A_UINT32 bad_bucket_size;
+ A_INT32 rssi_pref_5g_rssi_thresh;
+} wmi_roam_cnd_rssi_scoring;
+
+/**
+ Use macro WMI_ROAM_CND_GET/SET_BW_SCORE_PERCENTAGE to get and set the value respectively.
+ BITS 0-7 :- It contains scoring percentage of 20MHz BW
+ BITS 8-15 :- It contains scoring percentage of 40MHz BW
+ BITS 16-23 :- It contains scoring percentage of 80MHz BW
+ BITS 24-31 :- It contains scoring percentage of 1600MHz BW
+
+ The value of each index must be 0-100
+ */
+typedef struct {
+ A_UINT32 score_pcnt;
+} wmi_roam_cnd_bw_scoring;
+
+/**
+ Use macro WMI_ROAM_CND_GET/SET_BAND_SCORE_PERCENTAGE to get and set the value respectively.
+ BITS 0-7 :- It contains scoring percentage of 2G
+ BITS 8-15 :- It contains scoring percentage of 5G
+ BITS 16-23 :- reserved
+ BITS 24-31 :- reserved
+
+ The value of each index must be 0-100
+ */
+typedef struct {
+ A_UINT32 score_pcnt;
+} wmi_roam_cnd_band_scoring;
+
+/**
+ Use macro WMI_ROAM_CND_GET/SET_NSS_SCORE_PERCENTAGE to get and set the value respectively.
+ BITS 0-7 :- It contains scoring percentage of 1x1
+ BITS 8-15 :- It contains scoring percentage of 2x2
+ BITS 16-23 :- It contains scoring percentage of 3x3
+ BITS 24-31 :- It contains scoring percentage of 4x4
+
+ The value of each index must be 0-100
+ */
+typedef struct {
+ A_UINT32 score_pcnt;
+} wmi_roam_cnd_nss_scoring;
+
+/**
+ score_pcnt: Contains roam score percentage of each slot of respective channel_congestion_pcnt.It is also used same BITS for slot(0-3)
+ BITS 0-7 :- the scoring pcnt when AP is not advertise QBSS/ESP info
+ BITS 8-15 :- SLOT_1
+ BITS 16-23 :- SLOT_2
+ BITS 24-31 :- SLOT_3
+ BITS 32- ...
+
+ num_slot will equally divide 100. e.g, if num_slot = 4
+ slot 0 = 0-25%, slot 1 = 26-50% slot 2 = 51-75%, slot 3 = 76-100%
+*/
+typedef struct {
+ A_UINT32 num_slot; /* max 15 */
+ A_UINT32 score_pcnt3_to_0;
+ A_UINT32 score_pcnt7_to_4;
+ A_UINT32 score_pcnt11_to_8;
+ A_UINT32 score_pcnt15_to_12;
+} wmi_roam_cnd_esp_qbss_scoring;
+
+/**
+ score_pcnt: Contains roam score percentage of each slot of respective channel_congestion_pcnt.It is also used same BITS for slot(0-3)
+ BITS 0-7 :- the scoring pcnt when AP is not advertise QBSS/ESP info
+ BITS 8-15 :- SLOT_1
+ BITS 16-23 :- SLOT_2
+ BITS 24-31 :- SLOT_3
+ BITS 32- ...
+
+ num_slot will equally divide 100. e.g, if num_slot = 4
+ slot 0 = 0-25%, slot 1 = 26-50% slot 2 = 51-75%, slot 3 = 76-100%
+*/
+typedef struct {
+ A_UINT32 num_slot; /* max 15 */
+ A_UINT32 score_pcnt3_to_0;
+ A_UINT32 score_pcnt7_to_4;
+ A_UINT32 score_pcnt11_to_8;
+ A_UINT32 score_pcnt15_to_12;
+} wmi_roam_cnd_oce_wan_scoring;
+
+/**
+ disable_bitmap :- Each bit will be either allow(0)/disallow(1) to considered the roam score param.
+ rssi_weightage_pcnt :- RSSI weightage out of total score in percentage
+ ht_weightage_pcnt :- HT weightage out of total score in percentage.
+ vht_weightage_pcnt :- VHT weightage out of total score in percentage.
+ he_weightage_pcnt :- 11ax weightage out of total score in percentage.
+ bw_weightage_pcnt :- Bandwidth weightage out of total score in percentage.
+ band_weightage_pcnt :- Band(2G/5G) weightage out of total score in percentage.
+ nss_weightage_pcnt:- NSS(1x1 / 2x2) weightage out of total score in percentage.
+ esp_qbss_weightage_pcnt: ESP/QBSS weightage out of total score in percentage.
+ beamforming_weightage_pcnt :- Beamforming weightage out of total score in percentage.
+ pcl_weightage_pcnt :- PCL weightage out of total score in percentage.
+ oce_wan_weightage_pcnt :- OCE WAN metrics weightage out of total score in percentage.
+ rssi_scoring :- RSSI scoring information.
+ bw_scoring :- channel BW scoring percentage information.
+ band_scoring : - band scording percentage information.
+ nss_scoring :- NSS scoring percentage information.
+ esp_qbss_scoring :- ESP/QBSS scoring percentage information
+ oce_wan_scoring : OCE WAN metrics percentage information
+*/
+typedef struct {
+ A_UINT32 tlv_header; /** TLV tag and len; tag equals WMITLV_TAG_STRUC_wmi_roam_cnd_scoring_param */
+ A_UINT32 disable_bitmap;
+ A_INT32 rssi_weightage_pcnt;
+ A_INT32 ht_weightage_pcnt;
+ A_INT32 vht_weightage_pcnt;
+ A_INT32 he_weightage_pcnt;
+ A_INT32 bw_weightage_pcnt;
+ A_INT32 band_weightage_pcnt;
+ A_INT32 nss_weightage_pcnt;
+ A_INT32 esp_qbss_weightage_pcnt;
+ A_INT32 beamforming_weightage_pcnt;
+ A_INT32 pcl_weightage_pcnt;
+ A_INT32 oce_wan_weightage_pcnt;
+ wmi_roam_cnd_rssi_scoring rssi_scoring;
+ wmi_roam_cnd_bw_scoring bw_scoring;
+ wmi_roam_cnd_band_scoring band_scoring;
+ wmi_roam_cnd_nss_scoring nss_scoring;
+ wmi_roam_cnd_esp_qbss_scoring esp_qbss_scoring;
+ wmi_roam_cnd_oce_wan_scoring oce_wan_scoring;
+} wmi_roam_cnd_scoring_param;
/** To match an open AP, the rs_authmode should be set to WMI_AUTH_NONE
* and WMI_AP_PROFILE_FLAG_CRYPTO should be clear.
@@ -9653,6 +10011,12 @@
A_UINT32 roam_dense_traffic_thres;
} wmi_roam_dense_thres_param;
+/* Definition for flags in wmi_roam_bg_scan_roaming_param
+ * Bit 0: BG roaming enabled when we connect to 2G AP only and roaming to 5G AP only.
+ * Bit 1-31: Reserved
+ */
+#define WMI_ROAM_BG_SCAN_FLAGS_2G_TO_5G_ONLY 1
+
typedef struct {
/** TLV tag and len; tag equals WMITLV_TAG_STRUC_wmi_roam_bg_scan_roaming_param */
A_UINT32 tlv_header;
@@ -9660,6 +10024,12 @@
A_UINT32 roam_bg_scan_bad_rssi_thresh;
/** bitmap for which scan client will enable/disable background roaming. bit position is mapped to the enum WMI_SCAN_CLIENT_ID. 1 = enable, 0 = disable */
A_UINT32 roam_bg_scan_client_bitmap;
+ /** roam scan rssi threshold for 2G band.
+ * offset from roam_bg_scan_bad_rssi_thresh, in dB units
+ */
+ A_INT32 bad_rssi_thresh_offset_2g;
+ /* flags for background roaming */
+ A_UINT32 flags;
} wmi_roam_bg_scan_roaming_param;
/** Beacon filter wmi command info */
@@ -9730,6 +10100,7 @@
/*
* Following this structure is the TLV:
* wmi_ap_profile ap_profile; <-- AP profile info
+ * wmi_roam_cnd_scoring_param roam_cnd_scoring_param
*/
} wmi_roam_ap_profile_fixed_param;
@@ -9891,6 +10262,7 @@
*/
#define WMI_ROAM_REASON_INVOKE_ROAM_FAIL 0x6
#define WMI_ROAM_REASON_RSO_STATUS 0x7
+#define WMI_ROAM_REASON_BTM 0x8 /** Roaming because of BTM request received */
/* reserved up through 0xF */
/* subnet status: bits 4-5 */
@@ -9936,6 +10308,7 @@
#define WMI_ROAM_NOTIF_ROAM_REASSOC 0x3 /** indicate that reassociation is done. sent only in non WOW state */
#define WMI_ROAM_NOTIF_SCAN_MODE_SUCCESS 0x4 /** indicate that roaming scan mode is successful */
#define WMI_ROAM_NOTIF_SCAN_MODE_FAIL 0x5 /** indicate that roaming scan mode is failed due to internal roaming state */
+#define WMI_ROAM_NOTIF_DISCONNECT 0x6 /** indicate that roaming not allowed due BTM req */
/**whenever RIC request information change, host driver should pass all ric related information to firmware (now only support tsepc)
* Once, 11r roaming happens, firmware can generate RIC request in reassoc request based on these informations
@@ -10596,6 +10969,9 @@
WOW_REASON_CHIP_POWER_FAILURE_DETECT,
WOW_REASON_11D_SCAN,
WOW_REASON_THERMAL_CHANGE,
+ WOW_REASON_OIC_PING_OFFLOAD,
+ WOW_REASON_WLAN_DHCP_RENEW,
+
WOW_REASON_DEBUG_TEST = 0xFF,
} WOW_WAKE_REASON_TYPE;
@@ -11924,7 +12300,7 @@
/* unique id identifying the unit test cmd, generated by the caller */
A_UINT32 diag_token;
/**
- * TLV (tag length value) parameters follow the wmi_roam_chan_list
+ * TLV (tag length value) parameters follow the wmi_unit_test_cmd_fixed_param
* structure. The TLV's are:
* A_UINT32 args[];
**/
@@ -12738,27 +13114,27 @@
typedef struct {
/** TLV tag and len; tag equals
* WMITLV_TAG_STRUC_wmi_hb_set_enable_cmd_fixed_param */
- A_UINT32 tlv_header;
- A_UINT32 vdev_id;
- A_UINT32 enable;
- A_UINT32 item;
- A_UINT32 session;
+ A_UINT32 tlv_header; /** TLV header*/
+ A_UINT32 vdev_id; /** Vdev ID */
+ A_UINT32 enable; /** 1: Enable, 0: Disable`*/
+ A_UINT32 item; /** 1: UDP, 2: TCP */
+ A_UINT32 session; /** Session ID from HOST */
} wmi_hb_set_enable_cmd_fixed_param;
typedef struct {
/** TLV tag and len; tag equals
* WMITLV_TAG_STRUC_wmi_hb_set_tcp_params_cmd_fixed_param */
- A_UINT32 tlv_header;
- A_UINT32 vdev_id;
- A_UINT32 srv_ip;
- A_UINT32 dev_ip;
- A_UINT32 seq;
- A_UINT32 src_port;
- A_UINT32 dst_port;
- A_UINT32 interval;
- A_UINT32 timeout;
- A_UINT32 session;
- wmi_mac_addr gateway_mac;
+ A_UINT32 tlv_header; /** TLV header*/
+ A_UINT32 vdev_id; /** Vdev ID */
+ A_UINT32 srv_ip; /** Server IP address (IPv4) */
+ A_UINT32 dev_ip; /** Device IP address (IPv4) */
+ A_UINT32 seq; /** TCP Sequence no */
+ A_UINT32 src_port; /** Source port */
+ A_UINT32 dst_port; /** Destination port */
+ A_UINT32 interval; /** Keep alive interval */
+ A_UINT32 timeout; /** Timeout if keep alive fails */
+ A_UINT32 session; /** Session ID from HOST */
+ wmi_mac_addr gateway_mac; /** Server Mac Address */
} wmi_hb_set_tcp_params_cmd_fixed_param;
typedef struct {
@@ -12806,12 +13182,139 @@
} WMI_HB_WAKEUP_REASON;
typedef struct {
- A_UINT32 tlv_header; /* TLV tag and len; tag equals WMITLV_TAG_STRUC_wmi_hb_ind_event_fixed_param */
- A_UINT32 vdev_id; /* unique id identifying the VDEV */
- A_UINT32 session; /* Session ID from driver */
- A_UINT32 reason; /* wakeup reason */
+ A_UINT32 tlv_header; /** TLV tag and len; tag equals WMITLV_TAG_STRUC_wmi_hb_ind_event_fixed_param */
+ A_UINT32 vdev_id; /** unique id identifying the VDEV */
+ A_UINT32 session; /** Session ID from HOST */
+ A_UINT32 reason; /** wakeup reason */
} wmi_hb_ind_event_fixed_param;
+typedef struct {
+ /** TLV tag and len; tag equals
+ * WMITLV_TAG_STRUC_wmi_oic_set_enable_cmd_fixed_param */
+ A_UINT32 tlv_header; /** TLV Header */
+ A_UINT32 vdev_id; /** Vdev ID */
+ A_UINT32 session; /** Session number from the HOST */
+ A_UINT32 srv_ip; /** IPv4 address of the OCF server */
+ A_UINT32 dev_ip; /** IPv4 address of the device */
+ A_UINT32 tcp_tx_seq; /** TCP sequence number */
+ A_UINT32 src_port; /** Source port */
+ A_UINT32 dst_port; /** Destination port */
+ A_UINT32 protocol; /** Protocol used: TCP:0, UDP:1 */
+ A_UINT32 wlan_hb_session; /** Linked WLAN HB session. If a keepalive is configured for the TCP session, the session ID of the TCP keepalive */
+ A_UINT32 timeout_retries; /** timeout[31:16]: TCP ACK timeout, time to wait for a TCP ACK in ms
+ retries[15:0]: Number of TCP level retries of OIC ping request */
+ wmi_mac_addr peer_macaddr; /** MAC address of the OCF server */
+ A_UINT32 oic_ping_pkt0; /** OIC ping packet content [Byte03:Byte00] */
+ A_UINT32 oic_ping_pkt1; /** OIC ping packet content [Byte07:Byte04] */
+ A_UINT32 oic_ping_pkt2; /** OIC ping packet content [Byte11:Byte08] */
+ A_UINT32 oic_ping_pkt3; /** OIC ping packet content [Byte15:Byte12] */
+
+ A_UINT32 tls_cipher_suite_version; /** Cipher suite [31:16] as defined in https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml
+ TLS version [15:00] */
+ A_UINT32 tls_tx_seq0; /** Tx sequence number [31:00], incremented after every TLS packet transmission */
+ A_UINT32 tls_tx_seq1; /** Tx sequence number [63:32] */
+ A_UINT32 tls_rx_seq0; /** Rx sequence number [31:00], incremented after every TLS packet reception */
+ A_UINT32 tls_rx_seq1; /** Rx sequence number [63:32] */
+ A_UINT32 tls_tx_key0; /** client_write_key[Byte03:Byte00] refer Section 6.3 RFC 5246 */
+ A_UINT32 tls_tx_key1; /** client_write_key[Byte07:Byte04] */
+ A_UINT32 tls_tx_key2; /** client_write_key[Byte11:Byte08] */
+ A_UINT32 tls_tx_key3; /** client_write_key[Byte15:Byte12] */
+ A_UINT32 tls_rx_key0; /** server_write_key[Byte03:Byte00] */
+ A_UINT32 tls_rx_key1; /** server_write_key[Byte07:Byte04] */
+ A_UINT32 tls_rx_key2; /** server_write_key[Byte11:Byte08] */
+ A_UINT32 tls_rx_key3; /** server_write_key[Byte15:Byte12] */
+ A_UINT32 tls_MAC_write_key0; /** client_write_MAC_key[Byte03:Byte00] refer Section 6.3 RFC 5246 */
+ A_UINT32 tls_MAC_write_key1; /** client_write_MAC_key[Byte07:Byte04] */
+ A_UINT32 tls_MAC_write_key2; /** client_write_MAC_key[Byte11:Byte08] */
+ A_UINT32 tls_MAC_write_key3; /** client_write_MAC_key[Byte15:Byte12] */
+ A_UINT32 tls_MAC_write_key4; /** client_write_MAC_key[Byte19:Byte16] */
+ A_UINT32 tls_MAC_write_key5; /** client_write_MAC_key[Byte23:Byte20] */
+ A_UINT32 tls_MAC_write_key6; /** client_write_MAC_key[Byte27:Byte24] */
+ A_UINT32 tls_MAC_write_key7; /** client_write_MAC_key[Byte31:Byte28] */
+ A_UINT32 tls_MAC_read_key0; /** server_write_MAC_key[Byte03:Byte00] refer Section 6.3 RFC 5246 */
+ A_UINT32 tls_MAC_read_key1; /** server_write_MAC_key[Byte07:Byte04] */
+ A_UINT32 tls_MAC_read_key2; /** server_write_MAC_key[Byte11:Byte08] */
+ A_UINT32 tls_MAC_read_key3; /** server_write_MAC_key[Byte15:Byte12] */
+ A_UINT32 tls_MAC_read_key4; /** server_write_MAC_key[Byte19:Byte16] */
+ A_UINT32 tls_MAC_read_key5; /** server_write_MAC_key[Byte23:Byte20] */
+ A_UINT32 tls_MAC_read_key6; /** server_write_MAC_key[Byte27:Byte24] */
+ A_UINT32 tls_MAC_read_key7; /** server_write_MAC_key[Byte31:Byte28] */
+ A_UINT32 tls_client_IV0; /** CBC Mode: CBC_residue [Byte03:Byte00] refer section 6.2.3.2. CBC Block Cipher in RFC 5246
+ GCM Mode: GCMNonce.salt [Byte03:Byte00] refer Section 3 of RFC 5288 */
+ A_UINT32 tls_client_IV1; /** CBC Mode: CBC_residue [Byte7:Byte4] */
+ A_UINT32 tls_client_IV2; /** CBC Mode: CBC_residue [Byte11:Byte8] */
+ A_UINT32 tls_client_IV3; /** CBC Mode: CBC_residue [Byte15:Byte12] */
+ A_UINT32 tls_server_IV0; /** CBC Mode: CBC_residue [Byte3:Byte0] refer section 6.2.3.2. CBC Block Cipher in RFC 5246
+ GCM Mode: GCMNonce.salt [Byte4: Byte0] refer Section 3 of RFC 5288 */
+ A_UINT32 tls_server_IV1; /** CBC Mode: CBC_residue [Byte7:Byte4] */
+ A_UINT32 tls_server_IV2; /** CBC Mode: CBC_residue [Byte11:Byte8] */
+ A_UINT32 tls_server_IV3; /** CBC Mode: CBC_residue [Byte15:Byte12] */
+} wmi_oic_ping_offload_params_cmd_fixed_param;
+
+typedef struct {
+ /** TLV tag and len; tag equals
+ * WMITLV_TAG_STRUC_wmi_oic_set_enable_cmd_fixed_param */
+ A_UINT32 tlv_header; /** TLV Header*/
+ A_UINT32 vdev_id; /** Interface number */
+ A_UINT32 session; /** Session ID*/
+ A_UINT32 enable; /** 1: Enable , 0: Disable */
+} wmi_oic_ping_offload_set_enable_cmd_fixed_param;
+
+/** wlan OIC events */
+typedef enum {
+ WMI_WLAN_OIC_REASON_UNKNOWN = 0, /** Unknown */
+ WMI_WLAN_OIC_REASON_HOST_WAKE = 1, /** No error , but host is woken up due to other reasons */
+ WMI_WLAN_OIC_REASON_TCP_TIMEOUT = 2, /** TCP Timeout */
+ WMI_WLAN_OIC_REASON_PING_TIMEOUT = 3, /** OIC Ping resposnse timeout */
+ WMI_WLAN_OIC_REASON_TLS_ERROR = 4, /** TLS decryption error */
+} WMI_OIC_WAKEUP_REASON;
+
+typedef struct {
+ A_UINT32 tlv_header; /** TLV tag and len; tag equals WMITLV_TAG_STRUC_wmi_oic_ind_event_fixed_param */
+ A_UINT32 vdev_id; /** unique id identifying the VDEV */
+ A_UINT32 session; /** Session ID from driver */
+ A_UINT32 reason; /** wakeup reason as per WMI_OIC_WAKEUP_REASON */
+ A_UINT32 tcp_tx_seq; /** Current TCP sequence number */
+ A_UINT32 tcp_ack_num; /** Current TCP Acknowledgement number */
+ A_UINT32 tls_tx_seq0; /** Tx sequence number [31:00], incremented after every TLS packet transmission */
+ A_UINT32 tls_tx_seq1; /** Tx sequence number [63:32] */
+ A_UINT32 tls_rx_seq0; /** Rx sequence number [31:00], incremented after every TLS packet reception */
+ A_UINT32 tls_rx_seq1; /** Rx sequence number [63:32] */
+ A_UINT32 tls_client_IV0; /** CBC Mode: CBC_residue [Byte03:Byte00] refer section 6.2.3.2. CBC Block Cipher in RFC 5246 */
+ A_UINT32 tls_client_IV1; /** CBC Mode: CBC_residue [Byte7:Byte4] */
+ A_UINT32 tls_client_IV2; /** CBC Mode: CBC_residue [Byte11:Byte8] */
+ A_UINT32 tls_client_IV3; /** CBC Mode: CBC_residue [Byte15:Byte12] */
+ A_UINT32 tls_server_IV0; /** CBC Mode: CBC_residue [Byte3:Byte0] refer section 6.2.3.2. CBC Block Cipher in RFC 5246 */
+ A_UINT32 tls_server_IV1; /** CBC Mode: CBC_residue [Byte7:Byte4] */
+ A_UINT32 tls_server_IV2; /** CBC Mode: CBC_residue [Byte11:Byte8] */
+ A_UINT32 tls_server_IV3; /** CBC Mode: CBC_residue [Byte15:Byte12] */
+} wmi_oic_ping_handoff_event;
+
+typedef struct {
+ /** TLV tag and len; tag equals
+ * WMITLV_TAG_STRUC_wmi_dhcp_lease_renew_offload_fixed_param */
+ A_UINT32 tlv_header; /** TLV Header*/
+ A_UINT32 vdev_id; /** Vdev ID */
+ A_UINT32 enable; /** 1: Enable 0: Disable*/
+ A_UINT32 srv_ip; /** DHCP Server IP address (IPv4) */
+ A_UINT32 client_ip; /** Device IP address (IPv4) */
+ wmi_mac_addr srv_mac; /** DHCP Server MAC address */
+ A_UINT32 parameter_list; /** Optional Parameter list. RFC 1533 gives the complete set of options defined for use with DHCP */
+} wmi_dhcp_lease_renew_offload_cmd_fixed_param;
+
+/** WLAN DHCP Lease Renew Events */
+typedef enum {
+ WMI_WLAN_DHCP_RENEW_REASON_UNKNOWN = 0, /** Unknown */
+ WMI_WLAN_DHCP_RENEW_REASON_ACK_TIMEOUT = 1, /** DHCP ACK Timeout */
+ WMI_WLAN_DHCP_RENEW_REASON_NACK = 2, /** DHCP error */
+} WMI_DHCP_RENEW_WAKEUP_REASON;
+
+typedef struct {
+ A_UINT32 tlv_header; /** TLV tag and len; tag equals WMITLV_TAG_STRUC_wmi_dhcp_renew_ind_event_fixed_param */
+ A_UINT32 vdev_id; /** unique id identifying the VDEV */
+ A_UINT32 reason; /** wakeup reason as per enum WMI_DHCP_RENEW_WAKEUP_REASON*/
+} wmi_dhcp_lease_renew_event;
+
/** WMI_STA_SMPS_PARAM_CMDID */
typedef enum {
/** RSSI threshold to enter Dynamic SMPS mode from inactive mode */
@@ -16896,7 +17399,7 @@
A_UINT32 num_chan;
/**
* TLV (tag length value) parameters follow the wmi_soc_set_pcl_cmd
- 12930 * structure. The TLV's are:
+ * structure. The TLV's are:
* A_UINT32 channel_weight[]; channel order & size will be as per the list provided in WMI_SCAN_CHAN_LIST_CMDID
**/
} wmi_pdev_set_pcl_cmd_fixed_param;
@@ -18653,6 +19156,7 @@
WMI_HW_MODE_DBS_SBS = 4, /* 3 PHYs, with 2 on the same band doing SBS
* as in WMI_HW_MODE_SBS, and 3rd on the other band
*/
+ WMI_HW_MODE_DBS_OR_SBS = 5, /* One PHY is on 5G and the other PHY can be in 2G or 5G. */
} WMI_HW_MODE_CONFIG_TYPE;
#define WMI_SUPPORT_11B_GET(flags) WMI_GET_BITS(flags, 0, 1)
@@ -19671,6 +20175,9 @@
WMI_RETURN_STRING(WMI_OEM_DMA_RING_CFG_REQ_CMDID);
WMI_RETURN_STRING(WMI_PDEV_BSS_CHAN_INFO_REQUEST_CMDID);
WMI_RETURN_STRING(WMI_VDEV_LIMIT_OFFCHAN_CMDID);
+ WMI_RETURN_STRING(WMI_ROAM_BTM_CONFIG_CMDID);
+ WMI_RETURN_STRING(WMI_WLM_CONFIG_CMDID);
+ WMI_RETURN_STRING(WMI_PDEV_UPDATE_CTLTABLE_REQUEST_CMDID);
}
return "Invalid WMI cmd";
@@ -20082,6 +20589,116 @@
*/
} wmi_unit_test_event_fixed_param;
+/* Definition of latency levels */
+typedef enum {
+ WMI_WLM_LL_NORMAL = 0x0,
+ WMI_WLM_LL_MODERATE = 0x1,
+ WMI_WLM_LL_LOW = 0x2,
+ WMI_WLM_LL_ULTRA_LOW = 0x3,
+} WMI_WLM_LATENCY_LEVEL;
+
+/*
+* Lay out of flags in wmi_wlm_config_cmd_fixed_param
+*
+* |31 12| 11 | 10 |9 8|7 6|5 4|3 2| 1 | 0 |
+* +------+------+------+------+------+------+------+-----+-----+
+* | RSVD | SSLP | CSLP | RSVD | Roam | RSVD | DWLT | DFS | SUP |
+* +------+-------------+-------------+-------------------------+
+* | WAL | PS | Roam | Scan |
+*
+*/
+/* bit 0-3 of flags is used for scan operation */
+/* bit 0: WLM_FLAGS_SCAN_SUPPRESS, suppress all scan and other bits would be ignored if bit is set */
+
+#define WLM_FLAGS_SCAN_SUPPRESS 1 /* suppress all scan request */
+
+/* bit 1: WLM_FLAGS_SCAN_SKIP_DFS, skip dfs channel if bit is set */
+
+#define WLM_FLAGS_SCAN_SKIP_DFS 1 /* skip dfs channel operation */
+
+/* bit 2-3: define policy of dwell time/duration of each foreign channel
+ (b2 b3)
+ (0 0 ): Default dwell time
+ (0 1 ): WLM_FLAGS_STICK_SCAN_DWELL_TIME : Stick to original active/passive dwell time, but split
+ foreign channel dwell times into fitting into min (dl_latency, ul_latency). Note it can increase
+ overall scan duration.
+ (1 0 ): WLM_FLAGS_SHRINK_SCAN_DWELL_TIME: Shrink active/passive dwell time to
+ min(dl_latency, ul_latency, dwell_time). It may reduce overall scan duration, but it may decrease
+ the accuracy of scan result.
+ (1 1 ): reserved
+*/
+#define WLM_FLAGS_DEFAULT_SCAN_DWELL_TIME 0 /* Default scan dwell time */
+#define WLM_FLAGS_STICK_SCAN_DWELL_TIME 1 /* Shrink off channel time but extend overall scan duration */
+#define WLM_FLAGS_SHRINK_SCAN_DWELL_TIME 2 /* Shrink scan off channel time */
+
+/* bit 4-5: reserved for scan */
+
+/* bit 6-7 of flags is used for roaming operation */
+/* bit 6-7: define roaming policy:
+ (b6 b7)
+ (0 0 ): WLM_FLAGS_ROAM_ALLOW: Default behavior, allow roaming in all scenarios
+ (0 1 ): WLM_FLAGS_ROAM_SUPPRESS: Disallow all roaming
+ (1 0 ): WLM_FLAGS_ALLOW_FINAL_BMISS_ROAM: Allow final bmiss roaming only
+ (1 1 ): reserved
+*/
+#define WLM_FLAGS_ROAM_ALLOW 0
+#define WLM_FLAGS_ROAM_SUPPRESS 1
+#define WLM_FLAGS_ALLOW_FINAL_BMISS_ROAM 2
+
+/* bit 8-9: reserved for roaming */
+
+/* bit 10-11 of flags is used for powersave operation */
+/* bit 10: WLM_FLAGS_PS_DISABLE_CSS_COLLAPSE, disable css power collapse if bit is set */
+
+#define WLM_FLAGS_PS_DISABLE_CSS_COLLAPSE 1 /* disable css power collapse */
+
+/* bit 11: WLM_FLAGS_PS_DISABLE_SYS_SLEEP, disable sys sleep if bit is set */
+
+#define WLM_FLAGS_PS_DISABLE_SYS_SLEEP 1 /* disable sys sleep */
+
+
+/* bit 12-31 of flags is reserved for powersave and WAL */
+
+#define WLM_FLAGS_SCAN_IS_SUPPRESS(flag) WMI_GET_BITS(flag, 0, 1)
+#define WLM_FLAGS_SCAN_SET_SUPPRESS(flag, val) WMI_SET_BITS(flag, 0, 1, val)
+#define WLM_FLAGS_SCAN_IS_SKIP_DFS(flag) WMI_GET_BITS(flag, 1, 1)
+#define WLM_FLAGS_SCAN_SET_SKIP_DFS(flag, val) WMI_SET_BITS(flag, 1, 1, val)
+#define WLM_FLAGS_SCAN_GET_DWELL_TIME_POLICY(flag) WMI_GET_BITS(flag, 2, 2)
+#define WLM_FLAGS_SCAN_SET_DWELL_TIME_POLICY(flag, val) WMI_SET_BITS(flag, 2, 2, val)
+#define WLM_FLAGS_ROAM_GET_POLICY(flag) WMI_GET_BITS(flag, 6, 2)
+#define WLM_FLAGS_ROAM_SET_POLICY(flag, val) WMI_SET_BITS(flag, 6, 2, val)
+#define WLM_FLAGS_PS_IS_CSS_CLPS_DISABLED(flag) WMI_GET_BITS(flag, 10, 1)
+#define WLM_FLAGS_PS_SET_CSS_CLPS_DISABLE(flag, val) WMI_SET_BITS(flag, 10, 1, val)
+#define WLM_FLAGS_PS_IS_SYS_SLP_DISABLED(flag) WMI_GET_BITS(flag, 11, 1)
+#define WLM_FLAGS_PS_SET_SYS_SLP_DISABLE(flag, val) WMI_SET_BITS(flag, 11, 1, val)
+
+typedef struct {
+ /** TLV tag and len; tag equals
+ * WMI_WLM_CONFIG_CMD_fixed_param */
+ A_UINT32 tlv_header;
+ /* VDEV identifier */
+ A_UINT32 vdev_id;
+ /* Refer to WMI_WLM_LATENCY_LEVEL
+ * Once latency change detected, WLM will notify modules e.g. STAPS or SCAN/ROAM,
+ * who subscribed this event. And subscribers, like SCAN, may disable/cutoff offchan
+ * operation to support lower latency of WLAN.
+ */
+ A_UINT32 latency_level;
+ /* represent uplink latency in ms
+ * This parameter will be used by STAPS module to decide timing parameters, like
+ * ITO or SPEC wakeup interval. For SCAN/ROAM, it may used to calculate offchan
+ * durations.
+ */
+ A_UINT32 ul_latency;
+ /* represent downlink latency in ms
+ * Similar usage as ul_latency
+ */
+ A_UINT32 dl_latency;
+ /* flags for each client of WLM, refer to WLM_FLAGS_ definitions above */
+ A_UINT32 flags;
+} wmi_wlm_config_cmd_fixed_param;
+
+
/* ADD NEW DEFS HERE */
diff --git a/drivers/staging/fw-api/fw/wmi_version.h b/drivers/staging/fw-api/fw/wmi_version.h
index c3a6fe9..0aa4f7e 100644
--- a/drivers/staging/fw-api/fw/wmi_version.h
+++ b/drivers/staging/fw-api/fw/wmi_version.h
@@ -36,7 +36,7 @@
#define __WMI_VER_MINOR_ 0
/** WMI revision number has to be incremented when there is a
* change that may or may not break compatibility. */
-#define __WMI_REVISION_ 421
+#define __WMI_REVISION_ 437
/** The Version Namespace should not be normally changed. Only
* host and firmware of the same WMI namespace will work
diff --git a/drivers/staging/gdm724x/gdm_mux.c b/drivers/staging/gdm724x/gdm_mux.c
index 445f836..fb4f3fe 100644
--- a/drivers/staging/gdm724x/gdm_mux.c
+++ b/drivers/staging/gdm724x/gdm_mux.c
@@ -670,14 +670,14 @@
static void __exit gdm_usb_mux_exit(void)
{
- unregister_lte_tty_driver();
-
if (mux_rx_wq) {
flush_workqueue(mux_rx_wq);
destroy_workqueue(mux_rx_wq);
}
usb_deregister(&gdm_mux_driver);
+ unregister_lte_tty_driver();
+
}
module_init(gdm_usb_mux_init);
diff --git a/drivers/staging/iio/resolver/ad2s1210.c b/drivers/staging/iio/resolver/ad2s1210.c
index d97aa28..8eb7179 100644
--- a/drivers/staging/iio/resolver/ad2s1210.c
+++ b/drivers/staging/iio/resolver/ad2s1210.c
@@ -468,7 +468,7 @@
long m)
{
struct ad2s1210_state *st = iio_priv(indio_dev);
- bool negative;
+ u16 negative;
int ret = 0;
u16 pos;
s16 vel;
diff --git a/drivers/staging/lustre/lustre/lov/lov_pack.c b/drivers/staging/lustre/lustre/lov/lov_pack.c
index 2fb1e97..e11b100 100644
--- a/drivers/staging/lustre/lustre/lov/lov_pack.c
+++ b/drivers/staging/lustre/lustre/lov/lov_pack.c
@@ -399,18 +399,10 @@
struct lov_mds_md *lmmk = NULL;
int rc, lmm_size;
int lum_size;
- mm_segment_t seg;
if (!lsm)
return -ENODATA;
- /*
- * "Switch to kernel segment" to allow copying from kernel space by
- * copy_{to,from}_user().
- */
- seg = get_fs();
- set_fs(KERNEL_DS);
-
/* we only need the header part from user space to get lmm_magic and
* lmm_stripe_count, (the header part is common to v1 and v3) */
lum_size = sizeof(struct lov_user_md_v1);
@@ -485,6 +477,5 @@
obd_free_diskmd(exp, &lmmk);
out_set:
- set_fs(seg);
return rc;
}
diff --git a/drivers/staging/qca-wifi-host-cmn/hif/inc/hif.h b/drivers/staging/qca-wifi-host-cmn/hif/inc/hif.h
index 6f9c170..2ddd733 100644
--- a/drivers/staging/qca-wifi-host-cmn/hif/inc/hif.h
+++ b/drivers/staging/qca-wifi-host-cmn/hif/inc/hif.h
@@ -104,28 +104,39 @@
struct CE_state;
#define CE_COUNT_MAX 12
-#ifdef CONFIG_SLUB_DEBUG_ON
+/**
+ * With a max time limit in BH, max_num_messages is now
+ * reduced to 128, which is much higher than what we
+ * observe. We dont need to have different limits
+ * for PERF and SLUB builds.
+ */
#define QCA_NAPI_BUDGET 64
#define QCA_NAPI_DEF_SCALE 2
-#else /* PERF build */
-#define QCA_NAPI_BUDGET 64
-#define QCA_NAPI_DEF_SCALE 16
-#endif /* SLUB_DEBUG_ON */
-#define HIF_NAPI_MAX_RECEIVES (QCA_NAPI_BUDGET * QCA_NAPI_DEF_SCALE)
/* NOTE: "napi->scale" can be changed,
* but this does not change the number of buckets
*/
-#define QCA_NAPI_NUM_BUCKETS 4
+#define HIF_NAPI_MAX_RECEIVES (QCA_NAPI_BUDGET * QCA_NAPI_DEF_SCALE)
+
+#define QCA_NAPI_MSG_BUCKETS (HIF_NAPI_MAX_RECEIVES) /* 1 msg granularity */
+
+/**
+ * Note: the following MAX has to be the same as the max
+ * in the wlan_hdd_cfg.h
+ */
+#define QCA_NAPI_TIME_MAX 10000
+#define QCA_NAPI_TIME_BUCKETS 40
struct qca_napi_stat {
uint32_t napi_schedules;
uint32_t napi_polls;
uint32_t napi_completes;
uint32_t napi_workdone;
uint32_t cpu_corrected;
- uint32_t napi_budget_uses[QCA_NAPI_NUM_BUCKETS];
+ uint32_t napi_msg_budget[QCA_NAPI_MSG_BUCKETS];
+ uint32_t napi_time_budget[QCA_NAPI_TIME_BUCKETS];
uint32_t time_limit_reached;
uint32_t rxpkt_thresh_reached;
+ unsigned long long napi_max_poll_time;
};
/**
@@ -368,6 +379,7 @@
bool (*is_recovery_in_progress)(void *context);
bool (*is_load_unload_in_progress)(void *context);
bool (*is_driver_unloading)(void *context);
+ bool (*is_target_ready)(void *context);
};
/* This API detaches the HTC layer from the HIF device */
@@ -527,6 +539,12 @@
struct hif_dl_pipe_info dl_pipe;
};
+#ifdef CONFIG_SLUB_DEBUG_ON
+#define MSG_FLUSH_NUM 16
+#else /* PERF build */
+#define MSG_FLUSH_NUM 32
+#endif /* SLUB_DEBUG_ON */
+
struct hif_bus_id;
void hif_claim_device(struct hif_opaque_softc *hif_ctx);
@@ -754,4 +772,40 @@
}
#endif
+/**
+ * hif_set_ce_service_max_yield_time() - sets CE service max yield time
+ * @hif: hif context
+ * @ce_service_max_yield_time: CE service max yield time to set, in usecs
+ *
+ * This API storess CE service max yield time in hif context based
+ * on ini value.
+ *
+ * Return: void
+ */
+void hif_set_ce_service_max_yield_time(struct hif_opaque_softc *hif,
+ uint32_t ce_service_max_yield_time);
+
+/**
+ * hif_get_ce_service_max_yield_time() - get CE service max yield time
+ * @hif: hif context
+ *
+ * This API returns CE service max yield time, in nsecs.
+ *
+ * Return: CE service max yield time
+ */
+unsigned long long
+hif_get_ce_service_max_yield_time(struct hif_opaque_softc *hif);
+
+/**
+ * hif_set_ce_service_max_rx_ind_flush() - sets CE service max rx ind flush
+ * @hif: hif context
+ * @ce_service_max_rx_ind_flush: CE service max rx ind flush to set
+ *
+ * This API stores CE service max rx ind flush in hif context based
+ * on ini value.
+ *
+ * Return: void
+ */
+void hif_set_ce_service_max_rx_ind_flush(struct hif_opaque_softc *hif,
+ uint8_t ce_service_max_rx_ind_flush);
#endif /* _HIF_H_ */
diff --git a/drivers/staging/qca-wifi-host-cmn/hif/inc/hif_napi.h b/drivers/staging/qca-wifi-host-cmn/hif/inc/hif_napi.h
index f682ca0..de511e0 100644
--- a/drivers/staging/qca-wifi-host-cmn/hif/inc/hif_napi.h
+++ b/drivers/staging/qca-wifi-host-cmn/hif/inc/hif_napi.h
@@ -262,4 +262,18 @@
#endif /* FEATURE_NAPI */
+/**
+ * hif_update_napi_max_poll_time() - updates NAPI max poll time
+ * @ce_state: ce state
+ * @napi_info: pointer to napi info structure
+ * @cpu_id: cpu id
+ *
+ * This API updates NAPI max poll time per CE per SPU.
+ *
+ * Return: void
+ */
+void hif_update_napi_max_poll_time(struct CE_state *ce_state,
+ struct qca_napi_info *napi_info,
+ int cpu_id);
+
#endif /* __HIF_NAPI_H__ */
diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/ce/ce_internal.h b/drivers/staging/qca-wifi-host-cmn/hif/src/ce/ce_internal.h
index 4a9592e..137e5f9 100644
--- a/drivers/staging/qca-wifi-host-cmn/hif/src/ce/ce_internal.h
+++ b/drivers/staging/qca-wifi-host-cmn/hif/src/ce/ce_internal.h
@@ -139,6 +139,8 @@
/* time in nanoseconds to yield control of napi poll */
unsigned long long ce_service_yield_time;
+ /* CE service start time in nanoseconds */
+ unsigned long long ce_service_start_time;
/* Num Of Receive Buffers handled for one interrupt DPC routine */
unsigned int receive_count;
/* epping */
diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/ce/ce_main.c b/drivers/staging/qca-wifi-host-cmn/hif/src/ce/ce_main.c
index dcd7a95..bd04caf 100644
--- a/drivers/staging/qca-wifi-host-cmn/hif/src/ce/ce_main.c
+++ b/drivers/staging/qca-wifi-host-cmn/hif/src/ce/ce_main.c
@@ -59,7 +59,7 @@
#include "mp_dev.h"
/* Forward references */
-static int hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info);
+QDF_STATUS hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info);
/*
* Fix EV118783, poll to check whether a BMI response comes
@@ -81,7 +81,7 @@
#define HTT_DATA3_MSG_SVC MAKE_SERVICE_ID(HTT_SERVICE_GROUP, 2)
#endif
-static int hif_post_recv_buffers(struct hif_softc *scn);
+QDF_STATUS hif_post_recv_buffers(struct hif_softc *scn);
static void hif_config_rri_on_ddr(struct hif_softc *scn);
static void hif_clear_rri_on_ddr(struct hif_softc *scn);
@@ -1685,7 +1685,7 @@
-static int hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info)
+QDF_STATUS hif_post_recv_buffers_for_pipe(struct HIF_CE_pipe_info *pipe_info)
{
struct CE_handle *ce_hdl;
qdf_size_t buf_sz;
@@ -1696,7 +1696,7 @@
buf_sz = pipe_info->buf_sz;
if (buf_sz == 0) {
/* Unused Copy Engine */
- return 0;
+ return QDF_STATUS_SUCCESS;
}
ce_hdl = pipe_info->ce_hdl;
@@ -1705,7 +1705,6 @@
while (atomic_read(&pipe_info->recv_bufs_needed) > 0) {
qdf_dma_addr_t CE_data; /* CE space buffer address */
qdf_nbuf_t nbuf;
- int status;
atomic_dec(&pipe_info->recv_bufs_needed);
qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
@@ -1716,7 +1715,7 @@
&pipe_info->nbuf_alloc_err_count,
HIF_RX_NBUF_ALLOC_FAILURE,
"HIF_RX_NBUF_ALLOC_FAILURE");
- return 1;
+ return QDF_STATUS_E_NOMEM;
}
/*
@@ -1733,16 +1732,16 @@
HIF_RX_NBUF_MAP_FAILURE,
"HIF_RX_NBUF_MAP_FAILURE");
qdf_nbuf_free(nbuf);
- return 1;
+ return ret;
}
CE_data = qdf_nbuf_get_frag_paddr(nbuf, 0);
qdf_mem_dma_sync_single_for_device(scn->qdf_dev, CE_data,
buf_sz, DMA_FROM_DEVICE);
- status = ce_recv_buf_enqueue(ce_hdl, (void *)nbuf, CE_data);
- QDF_ASSERT(status == QDF_STATUS_SUCCESS);
- if (unlikely(status != EOK)) {
+ ret = ce_recv_buf_enqueue(ce_hdl, (void *)nbuf, CE_data);
+ if (unlikely(ret != QDF_STATUS_SUCCESS)) {
+ QDF_ASSERT(0);
hif_post_recv_buffers_failure(pipe_info, nbuf,
&pipe_info->nbuf_ce_enqueue_err_count,
HIF_RX_NBUF_ENQUEUE_FAILURE,
@@ -1751,7 +1750,7 @@
qdf_nbuf_unmap_single(scn->qdf_dev, nbuf,
QDF_DMA_FROM_DEVICE);
qdf_nbuf_free(nbuf);
- return 1;
+ return ret;
}
qdf_spin_lock_bh(&pipe_info->recv_bufs_needed_lock);
@@ -1769,7 +1768,7 @@
qdf_spin_unlock_bh(&pipe_info->recv_bufs_needed_lock);
- return 0;
+ return QDF_STATUS_SUCCESS;
}
/*
@@ -1779,11 +1778,12 @@
* failures, non-zero if unable to completely replenish
* receive buffers for fastpath rx Copy engine.
*/
-static int hif_post_recv_buffers(struct hif_softc *scn)
+QDF_STATUS hif_post_recv_buffers(struct hif_softc *scn)
{
struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
- int pipe_num, rv = 0;
+ int pipe_num;
struct CE_state *ce_state;
+ QDF_STATUS qdf_status;
A_TARGET_ACCESS_LIKELY(scn);
for (pipe_num = 0; pipe_num < scn->ce_count; pipe_num++) {
@@ -1796,24 +1796,25 @@
ce_state && (ce_state->htt_rx_data))
continue;
- if (hif_post_recv_buffers_for_pipe(pipe_info) &&
+ qdf_status = hif_post_recv_buffers_for_pipe(pipe_info);
+ if (!QDF_IS_STATUS_SUCCESS(qdf_status) &&
ce_state->htt_rx_data &&
scn->fastpath_mode_on) {
- rv = 1;
- goto done;
+ A_TARGET_ACCESS_UNLIKELY(scn);
+ return qdf_status;
}
}
-done:
A_TARGET_ACCESS_UNLIKELY(scn);
- return rv;
+ return QDF_STATUS_SUCCESS;
}
QDF_STATUS hif_start(struct hif_opaque_softc *hif_ctx)
{
struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(scn);
+ QDF_STATUS qdf_status = QDF_STATUS_SUCCESS;
hif_update_fastpath_recv_bufs_cnt(scn);
@@ -1826,13 +1827,14 @@
hif_state->started = true;
/* Post buffers once to start things off. */
- if (hif_post_recv_buffers(scn)) {
+ qdf_status = hif_post_recv_buffers(scn);
+ if (!QDF_IS_STATUS_SUCCESS(qdf_status)) {
/* cleanup is done in hif_ce_disable */
HIF_ERROR("%s:failed to post buffers", __func__);
- return QDF_STATUS_E_FAILURE;
+ return qdf_status;
}
- return QDF_STATUS_SUCCESS;
+ return qdf_status;
}
static void hif_recv_buffer_cleanup_on_pipe(struct HIF_CE_pipe_info *pipe_info)
@@ -2307,6 +2309,7 @@
QDF_STATUS rv = QDF_STATUS_SUCCESS;
scn->notice_send = true;
+ scn->ce_service_max_rx_ind_flush = MSG_FLUSH_NUM;
hif_post_static_buf_to_target(scn);
@@ -2831,11 +2834,16 @@
struct CE_attr attr;
attr = host_ce_config[COPY_ENGINE_ID(CE_ctrl_addr)];
- if (attr.flags & CE_ATTR_DISABLE_INTR)
+ if (attr.flags & CE_ATTR_DISABLE_INTR) {
return CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr);
- else
- return A_TARGET_READ(scn,
- (CE_ctrl_addr) + CURRENT_SRRI_ADDRESS);
+ } else {
+ if (TARGET_REGISTER_ACCESS_ALLOW(scn))
+ return A_TARGET_READ(scn,
+ (CE_ctrl_addr) + CURRENT_SRRI_ADDRESS);
+ else
+ return CE_SRC_RING_READ_IDX_GET_FROM_DDR(scn,
+ CE_ctrl_addr);
+ }
}
/**
@@ -2856,11 +2864,16 @@
attr = host_ce_config[COPY_ENGINE_ID(CE_ctrl_addr)];
- if (attr.flags & CE_ATTR_DISABLE_INTR)
+ if (attr.flags & CE_ATTR_DISABLE_INTR) {
return CE_DEST_RING_READ_IDX_GET_FROM_DDR(scn, CE_ctrl_addr);
- else
- return A_TARGET_READ(scn,
- (CE_ctrl_addr) + CURRENT_DRRI_ADDRESS);
+ } else {
+ if (TARGET_REGISTER_ACCESS_ALLOW(scn))
+ return A_TARGET_READ(scn,
+ (CE_ctrl_addr) + CURRENT_DRRI_ADDRESS);
+ else
+ return CE_DEST_RING_READ_IDX_GET_FROM_DDR(scn,
+ CE_ctrl_addr);
+ }
}
/**
diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/ce/ce_service.c b/drivers/staging/qca-wifi-host-cmn/hif/src/ce/ce_service.c
index 213f2fb..2250d6d 100644
--- a/drivers/staging/qca-wifi-host-cmn/hif/src/ce/ce_service.c
+++ b/drivers/staging/qca-wifi-host-cmn/hif/src/ce/ce_service.c
@@ -212,7 +212,7 @@
yield = time_limit_reached || rxpkt_thresh_reached;
- if (yield)
+ if (yield && ce_state->htt_rx_data)
hif_napi_update_yield_stats(ce_state,
time_limit_reached,
rxpkt_thresh_reached);
@@ -1640,11 +1640,6 @@
dest_ring->write_index = write_index;
}
-#ifdef CONFIG_SLUB_DEBUG_ON
-#define MSG_FLUSH_NUM 16
-#else /* PERF build */
-#define MSG_FLUSH_NUM 32
-#endif /* SLUB_DEBUG_ON */
/**
* ce_per_engine_service_fast() - CE handler routine to service fastpath msgs
* @scn: hif_context
@@ -1741,15 +1736,17 @@
* we are not posting the buffers back instead
* reusing the buffers
*/
- if (nbuf_cmpl_idx == MSG_FLUSH_NUM) {
+ if (nbuf_cmpl_idx == scn->ce_service_max_rx_ind_flush) {
hif_record_ce_desc_event(scn, ce_state->id,
FAST_RX_SOFTWARE_INDEX_UPDATE,
NULL, NULL, sw_index);
dest_ring->sw_index = sw_index;
ce_fastpath_rx_handle(ce_state, cmpl_msdus,
- MSG_FLUSH_NUM, ctrl_addr);
+ scn->ce_service_max_rx_ind_flush,
+ ctrl_addr);
- ce_state->receive_count += MSG_FLUSH_NUM;
+ ce_state->receive_count +=
+ scn->ce_service_max_rx_ind_flush;
if (qdf_unlikely(hif_ce_service_should_yield(
scn, ce_state))) {
ce_state->force_break = 1;
@@ -1788,9 +1785,18 @@
more_comp_cnt = 0;
goto more_data;
}
+
+ hif_update_napi_max_poll_time(ce_state, scn->napi_data.napis[ce_id],
+ qdf_get_cpu());
+
qdf_atomic_set(&ce_state->rx_pending, 0);
- CE_ENGINE_INT_STATUS_CLEAR(scn, ctrl_addr,
- HOST_IS_COPY_COMPLETE_MASK);
+ if (TARGET_REGISTER_ACCESS_ALLOW(scn)) {
+ CE_ENGINE_INT_STATUS_CLEAR(scn, ctrl_addr,
+ HOST_IS_COPY_COMPLETE_MASK);
+ } else {
+ HIF_ERROR("%s: target access is not allowed", __func__);
+ return;
+ }
if (ce_recv_entries_done_nolock(scn, ce_state)) {
if (more_comp_cnt++ < CE_TXRX_COMP_CHECK_THRESHOLD) {
@@ -1810,11 +1816,6 @@
}
#endif /* WLAN_FEATURE_FASTPATH */
-/* Maximum amount of time in nano seconds before which the CE per engine service
- * should yield. ~1 jiffie.
- */
-#define CE_PER_ENGINE_SERVICE_MAX_YIELD_TIME_NS (10 * 1000 * 1000)
-
/*
* Guts of interrupt handler for per-engine interrupts on a particular CE.
*
@@ -1851,10 +1852,11 @@
/* Clear force_break flag and re-initialize receive_count to 0 */
CE_state->receive_count = 0;
CE_state->force_break = 0;
+ CE_state->ce_service_start_time = sched_clock();
CE_state->ce_service_yield_time =
- sched_clock() +
- (unsigned long long)CE_PER_ENGINE_SERVICE_MAX_YIELD_TIME_NS;
-
+ CE_state->ce_service_start_time +
+ hif_get_ce_service_max_yield_time(
+ (struct hif_opaque_softc *)scn);
qdf_spin_lock(&CE_state->ce_index_lock);
/*
@@ -1986,9 +1988,14 @@
* more copy completions happened while the misc interrupts were being
* handled.
*/
- CE_ENGINE_INT_STATUS_CLEAR(scn, ctrl_addr,
- CE_WATERMARK_MASK |
- HOST_IS_COPY_COMPLETE_MASK);
+ if (TARGET_REGISTER_ACCESS_ALLOW(scn)) {
+ CE_ENGINE_INT_STATUS_CLEAR(scn, ctrl_addr,
+ CE_WATERMARK_MASK |
+ HOST_IS_COPY_COMPLETE_MASK);
+ } else {
+ HIF_ERROR("%s: target access is not allowed", __func__);
+ return CE_state->receive_count;
+ }
/*
* Now that per-engine interrupts are cleared, verify that
@@ -2104,6 +2111,11 @@
if (Q_TARGET_ACCESS_BEGIN(scn) < 0)
return;
+ if (!TARGET_REGISTER_ACCESS_ALLOW(scn)) {
+ HIF_ERROR("%s: target access is not allowed", __func__);
+ return;
+ }
+
if ((!disable_copy_compl_intr) &&
(CE_state->send_cb || CE_state->recv_cb))
CE_COPY_COMPLETE_INTR_ENABLE(scn, ctrl_addr);
diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/ce/ce_tasklet.c b/drivers/staging/qca-wifi-host-cmn/hif/src/ce/ce_tasklet.c
index d4ca8bc..dc7036e 100644
--- a/drivers/staging/qca-wifi-host-cmn/hif/src/ce/ce_tasklet.c
+++ b/drivers/staging/qca-wifi-host-cmn/hif/src/ce/ce_tasklet.c
@@ -482,6 +482,10 @@
return IRQ_NONE;
}
hif_irq_disable(scn, ce_id);
+
+ if (!TARGET_REGISTER_ACCESS_ALLOW(scn))
+ return IRQ_HANDLED;
+
hif_record_ce_desc_event(scn, ce_id, HIF_IRQ_EVENT, NULL, NULL, 0);
hif_ce_increment_interrupt_count(hif_ce_state, ce_id);
diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/hif_io32.h b/drivers/staging/qca-wifi-host-cmn/hif/src/hif_io32.h
index 8c39893..fa4fca9 100644
--- a/drivers/staging/qca-wifi-host-cmn/hif/src/hif_io32.h
+++ b/drivers/staging/qca-wifi-host-cmn/hif/src/hif_io32.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2016 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2017 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
@@ -40,6 +40,8 @@
hif_target_sleep_state_adjust(scn, false, true)
#define Q_TARGET_ACCESS_END(scn) \
hif_target_sleep_state_adjust(scn, true, false)
+#define TARGET_REGISTER_ACCESS_ALLOW(scn)\
+ hif_is_target_register_access_allowed(scn)
/*
* A_TARGET_ACCESS_LIKELY will not wait for the target to wake up before
diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/hif_main.c b/drivers/staging/qca-wifi-host-cmn/hif/src/hif_main.c
index e70f50e..97c471b 100644
--- a/drivers/staging/qca-wifi-host-cmn/hif/src/hif_main.c
+++ b/drivers/staging/qca-wifi-host-cmn/hif/src/hif_main.c
@@ -981,6 +981,23 @@
return false;
}
+
+/**
+ * hif_is_target_ready() - API to query if target is in ready state
+ * progress
+ * @scn: HIF Context
+ *
+ * Return: True/False
+ */
+bool hif_is_target_ready(struct hif_softc *scn)
+{
+ struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
+
+ if (cbk && cbk->is_target_ready)
+ return cbk->is_target_ready(cbk->context);
+
+ return false;
+}
#if defined(HIF_PCI) || defined(SNOC) || defined(HIF_AHB)
/**
* hif_batch_send() - API to access hif specific function
@@ -1106,3 +1123,33 @@
hif_usb_ramdump_handler();
}
#endif
+
+void hif_set_ce_service_max_yield_time(struct hif_opaque_softc *hif,
+ uint32_t ce_service_max_yield_time)
+{
+ struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
+
+ hif_ctx->ce_service_max_yield_time =
+ ce_service_max_yield_time * 1000;
+}
+
+unsigned long long
+hif_get_ce_service_max_yield_time(struct hif_opaque_softc *hif)
+{
+ struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
+
+ return hif_ctx->ce_service_max_yield_time;
+}
+
+void hif_set_ce_service_max_rx_ind_flush(struct hif_opaque_softc *hif,
+ uint8_t ce_service_max_rx_ind_flush)
+{
+ struct hif_softc *hif_ctx = HIF_GET_SOFTC(hif);
+
+ if (ce_service_max_rx_ind_flush == 0 ||
+ ce_service_max_rx_ind_flush > MSG_FLUSH_NUM)
+ hif_ctx->ce_service_max_rx_ind_flush = MSG_FLUSH_NUM;
+ else
+ hif_ctx->ce_service_max_rx_ind_flush =
+ ce_service_max_rx_ind_flush;
+}
diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/hif_main.h b/drivers/staging/qca-wifi-host-cmn/hif/src/hif_main.h
index b4c0a28..a8f8e73 100644
--- a/drivers/staging/qca-wifi-host-cmn/hif/src/hif_main.h
+++ b/drivers/staging/qca-wifi-host-cmn/hif/src/hif_main.h
@@ -157,6 +157,9 @@
#ifdef FEATURE_NAPI
struct qca_napi_data napi_data;
#endif /* FEATURE_NAPI */
+ /* stores ce_service_max_yield_time in ns */
+ unsigned long long ce_service_max_yield_time;
+ uint8_t ce_service_max_rx_ind_flush;
struct hif_driver_state_callbacks callbacks;
uint32_t hif_con_param;
#ifdef QCA_NSS_WIFI_OFFLOAD_SUPPORT
@@ -220,6 +223,7 @@
bool hif_is_driver_unloading(struct hif_softc *scn);
bool hif_is_load_or_unload_in_progress(struct hif_softc *scn);
bool hif_is_recovery_in_progress(struct hif_softc *scn);
+bool hif_is_target_ready(struct hif_softc *scn);
void hif_wlan_disable(struct hif_softc *scn);
int hif_target_sleep_state_adjust(struct hif_softc *scn,
bool sleep_ok,
@@ -234,4 +238,13 @@
static inline void hif_ramdump_handler(struct hif_opaque_softc *scn) {}
#endif
+#ifdef HIF_SNOC
+bool hif_is_target_register_access_allowed(struct hif_softc *hif_sc);
+#else
+static inline
+bool hif_is_target_register_access_allowed(struct hif_softc *hif_sc)
+{
+ return true;
+}
+#endif
#endif /* __HIF_MAIN_H__ */
diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/hif_napi.c b/drivers/staging/qca-wifi-host-cmn/hif/src/hif_napi.c
index 03699b6..b4ff1b3 100644
--- a/drivers/staging/qca-wifi-host-cmn/hif/src/hif_napi.c
+++ b/drivers/staging/qca-wifi-host-cmn/hif/src/hif_napi.c
@@ -821,12 +821,13 @@
{
int rc = 0; /* default: no work done, also takes care of error */
int normalized = 0;
- int bucket;
int cpu = smp_processor_id();
bool poll_on_right_cpu;
struct hif_softc *hif = HIF_GET_SOFTC(hif_ctx);
struct qca_napi_info *napi_info;
struct CE_state *ce_state = NULL;
+ unsigned bucket;
+ unsigned long long time_taken;
if (unlikely(NULL == hif)) {
HIF_ERROR("%s: hif context is NULL", __func__);
@@ -860,13 +861,17 @@
normalized = (rc / napi_info->scale);
if (normalized == 0)
normalized++;
- bucket = normalized / (QCA_NAPI_BUDGET / QCA_NAPI_NUM_BUCKETS);
- if (bucket >= QCA_NAPI_NUM_BUCKETS) {
- bucket = QCA_NAPI_NUM_BUCKETS - 1;
- HIF_ERROR("Bad bucket#(%d) > QCA_NAPI_NUM_BUCKETS(%d)",
- bucket, QCA_NAPI_NUM_BUCKETS);
+ /*
+ * msg bucket calculation uses non-normalizes values (msgs),
+ * as msg bucket granularity is now 1 per message ( > budget)
+ */
+ bucket = rc % QCA_NAPI_MSG_BUCKETS;
+ if (bucket >= QCA_NAPI_MSG_BUCKETS) {
+ bucket = QCA_NAPI_MSG_BUCKETS - 1;
+ HIF_ERROR("Bad bucket#(%d) > QCA_NAPI_MSG_BUCKETS(%d)",
+ bucket, QCA_NAPI_MSG_BUCKETS);
}
- napi_info->stats[cpu].napi_budget_uses[bucket]++;
+ napi_info->stats[cpu].napi_msg_budget[bucket]++;
} else {
/* if ce_per engine reports 0, then poll should be terminated */
NAPI_DEBUG("%s:%d: nothing processed by CE. Completing NAPI",
@@ -874,6 +879,20 @@
}
ce_state = hif->ce_id_to_state[NAPI_ID2PIPE(napi_info->id)];
+
+ /*
+ * update the time historgram here, before re-enabling the interrupts
+ * as there is a risk that this function may be executed in parallel
+ */
+ time_taken = sched_clock() - ce_state->ce_service_start_time;
+ /* time limit is specified in usecs in the config file */
+ time_taken /= 1000; /* nsecs -> usecs, as in the cfg */
+ bucket = time_taken / (QCA_NAPI_TIME_MAX / QCA_NAPI_TIME_BUCKETS);
+ if (bucket >= QCA_NAPI_TIME_BUCKETS)
+ bucket = QCA_NAPI_TIME_BUCKETS - 1;
+ napi_info->stats[cpu].napi_time_budget[bucket]++;
+
+
/*
* Not using the API hif_napi_correct_cpu directly in the if statement
* below since the API may not get evaluated if put at the end if any
@@ -917,6 +936,18 @@
return rc;
}
+void hif_update_napi_max_poll_time(struct CE_state *ce_state,
+ struct qca_napi_info *napi_info,
+ int cpu_id)
+{
+ unsigned long long napi_poll_time = sched_clock() -
+ ce_state->ce_service_start_time;
+
+ if (napi_poll_time >
+ napi_info->stats[cpu_id].napi_max_poll_time)
+ napi_info->stats[cpu_id].napi_max_poll_time = napi_poll_time;
+}
+
#ifdef HELIUMPLUS
/**
*
@@ -954,16 +985,22 @@
return;
}
- if (unlikely(NULL == napi_data->napis[ce_id]))
- return;
-
ce_id = ce_state->id;
cpu_id = qdf_get_cpu();
+ if (unlikely(!napi_data->napis[ce_id])) {
+ HIF_INFO("%s: NAPI info is NULL for ce id: %d",
+ __func__, ce_id);
+ return;
+ }
+
if (time_limit_reached)
napi_data->napis[ce_id]->stats[cpu_id].time_limit_reached++;
else
napi_data->napis[ce_id]->stats[cpu_id].rxpkt_thresh_reached++;
+
+ hif_update_napi_max_poll_time(ce_state, napi_data->napis[ce_id],
+ cpu_id);
}
/**
diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/snoc/hif_io32_snoc.h b/drivers/staging/qca-wifi-host-cmn/hif/src/snoc/hif_io32_snoc.h
index f2a1816..3d440b5 100644
--- a/drivers/staging/qca-wifi-host-cmn/hif/src/snoc/hif_io32_snoc.h
+++ b/drivers/staging/qca-wifi-host-cmn/hif/src/snoc/hif_io32_snoc.h
@@ -47,6 +47,10 @@
uint32_t offset;
offset = HOST_IE_ADDRESS + CE_BASE_ADDRESS(ce_id);
+ if (!TARGET_REGISTER_ACCESS_ALLOW(scn)) {
+ HIF_ERROR("%s: target access is not allowed", __func__);
+ return;
+ }
hif_write32_mb(scn->mem + offset, 1);
}
@@ -56,7 +60,16 @@
uint32_t offset;
offset = HOST_IE_ADDRESS + CE_BASE_ADDRESS(ce_id);
+ if (!TARGET_REGISTER_ACCESS_ALLOW(scn)) {
+ HIF_ERROR("%s: target access is not allowed", __func__);
+ return;
+ }
hif_write32_mb(scn->mem + offset, 0);
+
+ if (!TARGET_REGISTER_ACCESS_ALLOW(scn)) {
+ HIF_ERROR("%s: target access is not allowed", __func__);
+ return;
+ }
hif_read32_mb(scn->mem + offset);
}
#endif
diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/snoc/if_snoc.c b/drivers/staging/qca-wifi-host-cmn/hif/src/snoc/if_snoc.c
index dfc78ad..5052fa5 100644
--- a/drivers/staging/qca-wifi-host-cmn/hif/src/snoc/if_snoc.c
+++ b/drivers/staging/qca-wifi-host-cmn/hif/src/snoc/if_snoc.c
@@ -458,3 +458,18 @@
return 0;
}
+/**
+ * hif_is_target_register_access_allowed(): Check target register access allow
+ * @scn: HIF Context
+ *
+ * This function help to check whether target register access is allowed or not
+ *
+ * Return: true if target access is allowed else false
+ */
+bool hif_is_target_register_access_allowed(struct hif_softc *scn)
+{
+ if (hif_is_recovery_in_progress(scn))
+ return hif_is_target_ready(scn);
+ else
+ return true;
+}
diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_nbuf.c b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_nbuf.c
index c23bc5b..06539c5 100644
--- a/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_nbuf.c
+++ b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_nbuf.c
@@ -190,11 +190,15 @@
{
struct sk_buff *skb;
unsigned long offset;
+ int flags = GFP_KERNEL;
if (align)
size += (align - 1);
- skb = dev_alloc_skb(size);
+ if (in_interrupt() || irqs_disabled() || in_atomic())
+ flags = GFP_ATOMIC;
+
+ skb = __netdev_alloc_skb(NULL, size, flags);
if (!skb)
return NULL;
diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_api.h b/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_api.h
index cc6056d..2d3614c 100644
--- a/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_api.h
+++ b/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_api.h
@@ -740,6 +740,19 @@
struct qdf_mac_addr multicast_addr,
bool clearList);
+/**
+ * wmi_unified_multiple_add_clear_mcbc_filter_cmd() - send multiple mcast
+ * filter command to fw
+ * @wmi_handle: wmi handle
+ * @vdev_id: vdev id
+ * @mcast_filter_params: mcast filter params
+ *
+ * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure
+ */
+QDF_STATUS wmi_unified_multiple_add_clear_mcbc_filter_cmd(void *wmi_hdl,
+ uint8_t vdev_id,
+ struct mcast_filter_params *filter_param);
+
QDF_STATUS wmi_unified_send_gtk_offload_cmd(void *wmi_hdl, uint8_t vdev_id,
struct gtk_offload_params *params,
bool enable_offload,
@@ -890,8 +903,7 @@
#ifndef WMI_NON_TLV_SUPPORT
QDF_STATUS wmi_unified_send_roam_scan_offload_ap_cmd(void *wmi_hdl,
- wmi_ap_profile *ap_profile_p,
- uint32_t vdev_id);
+ struct ap_profile_params *ap_profile);
#endif
QDF_STATUS wmi_unified_roam_scan_offload_scan_period(void *wmi_hdl,
diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_param.h b/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_param.h
index ef23ad0..71177e3 100644
--- a/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_param.h
+++ b/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_param.h
@@ -98,6 +98,7 @@
#define WMI_MSEC_TO_USEC(msec) (msec * 1000) /* msec to usec */
#define WMI_NLO_FREQ_THRESH 1000 /* in MHz */
+#define WMI_SVC_MSG_MAX_SIZE 1536
#define MAX_UTF_EVENT_LENGTH 2048
#define MAX_WMI_UTF_LEN 252
#define MAX_WMI_QVIT_LEN 252
@@ -1237,6 +1238,63 @@
};
/**
+ * struct tx_send_params - TX parameters
+ * @pwr: Tx frame transmission power
+ * @mcs_mask: Modulation and coding index mask for transmission
+ * bit 0 -> CCK 1 Mbps rate is allowed
+ * bit 1 -> CCK 2 Mbps rate is allowed
+ * bit 2 -> CCK 5.5 Mbps rate is allowed
+ * bit 3 -> CCK 11 Mbps rate is allowed
+ * bit 4 -> OFDM BPSK modulation, 1/2 coding rate is allowed
+ * bit 5 -> OFDM BPSK modulation, 3/4 coding rate is allowed
+ * bit 6 -> OFDM QPSK modulation, 1/2 coding rate is allowed
+ * bit 7 -> OFDM QPSK modulation, 3/4 coding rate is allowed
+ * bit 8 -> OFDM 16-QAM modulation, 1/2 coding rate is allowed
+ * bit 9 -> OFDM 16-QAM modulation, 3/4 coding rate is allowed
+ * bit 10 -> OFDM 64-QAM modulation, 2/3 coding rate is allowed
+ * bit 11 -> OFDM 64-QAM modulation, 3/4 coding rate is allowed
+ * @nss_mask: Spatial streams permitted
+ * bit 0: if set, Nss = 1 (non-MIMO) is permitted
+ * bit 1: if set, Nss = 2 (2x2 MIMO) is permitted
+ * bit 2: if set, Nss = 3 (3x3 MIMO) is permitted
+ * bit 3: if set, Nss = 4 (4x4 MIMO) is permitted
+ * bit 4: if set, Nss = 5 (5x5 MIMO) is permitted
+ * bit 5: if set, Nss = 6 (6x6 MIMO) is permitted
+ * bit 6: if set, Nss = 7 (7x7 MIMO) is permitted
+ * bit 7: if set, Nss = 8 (8x8 MIMO) is permitted
+ * If no bits are set, target will choose what NSS type to use
+ * @retry_limit: Maximum number of retries before ACK
+ * @chain_mask: Chains to be used for transmission
+ * @bw_mask: Bandwidth to be used for transmission
+ * bit 0 -> 5MHz
+ * bit 1 -> 10MHz
+ * bit 2 -> 20MHz
+ * bit 3 -> 40MHz
+ * bit 4 -> 80MHz
+ * bit 5 -> 160MHz
+ * bit 6 -> 80_80MHz
+ * @preamble_type: Preamble types for transmission
+ * bit 0: if set, OFDM
+ * bit 1: if set, CCK
+ * bit 2: if set, HT
+ * bit 3: if set, VHT
+ * bit 4: if set, HE
+ * @frame_type: Data or Management frame
+ * Data:1 Mgmt:0
+ */
+struct tx_send_params {
+ uint32_t pwr:8,
+ mcs_mask:12,
+ nss_mask:8,
+ retry_limit:4;
+ uint32_t chain_mask:8,
+ bw_mask:7,
+ preamble_type:5,
+ frame_type:1,
+ reserved:11;
+};
+
+/**
* struct wmi_mgmt_params - wmi mgmt cmd paramters
* @tx_frame: management tx frame
* @frm_len: frame length
@@ -1248,6 +1306,9 @@
* @wmi_desc: command descriptor
* @desc_id: descriptor id relyaed back by target
* @macaddr - macaddr of peer
+ * @qdf_ctx: qdf context for qdf_nbuf_map
+ * @tx_param: TX send parameters
+ * @tx_params_valid: Flag that indicates if TX params are valid
*/
struct wmi_mgmt_params {
void *tx_frame;
@@ -1258,6 +1319,8 @@
uint16_t desc_id;
uint8_t *macaddr;
void *qdf_ctx;
+ struct tx_send_params tx_param;
+ bool tx_params_valid;
};
/**
@@ -1830,7 +1893,11 @@
* @initial_dense_status: dense status detected by host
* @traffic_threshold: dense roam RSSI threshold
* @bg_scan_bad_rssi_thresh: Bad RSSI threshold to perform bg scan
+ * @roam_bad_rssi_thresh_offset_2g: Offset from Bad RSSI threshold for 2G to 5G Roam
* @bg_scan_client_bitmap: Bitmap used to identify the client scans to snoop
+ * @flags: Flags for Background Roaming
+ * Bit 0 : BG roaming enabled when we connect to 2G AP only and roaming to 5G AP only.
+ * Bit 1-31: Reserved
*/
struct roam_offload_scan_rssi_params {
int8_t rssi_thresh;
@@ -1854,7 +1921,170 @@
int initial_dense_status;
int traffic_threshold;
int8_t bg_scan_bad_rssi_thresh;
+ uint8_t roam_bad_rssi_thresh_offset_2g;
uint32_t bg_scan_client_bitmap;
+ int32_t rssi_thresh_offset_5g;
+ uint32_t flags;
+};
+
+/**
+ * struct ap_profile - Structure ap profile to match candidate
+ * @flags: flags
+ * @rssi_threshold: the value of the the candidate AP should higher by this
+ * threshold than the rssi of the currrently associated AP
+ * @ssid: ssid vlaue to be matched
+ * @rsn_authmode: security params to be matched
+ * @rsn_ucastcipherset: unicast cipher set
+ * @rsn_mcastcipherset: mcast/group cipher set
+ * @rsn_mcastmgmtcipherset: mcast/group management frames cipher set
+ * @rssi_abs_thresh: the value of the candidate AP should higher than this
+ * absolute RSSI threshold. Zero means no absolute minimum
+ * RSSI is required. units are the offset from the noise
+ * floor in dB
+ */
+struct ap_profile {
+ uint32_t flags;
+ uint32_t rssi_threshold;
+ struct mac_ssid ssid;
+ uint32_t rsn_authmode;
+ uint32_t rsn_ucastcipherset;
+ uint32_t rsn_mcastcipherset;
+ uint32_t rsn_mcastmgmtcipherset;
+ uint32_t rssi_abs_thresh;
+};
+
+/**
+ * struct rssi_scoring - rssi scoring param to sortlist selected AP
+ * @best_rssi_threshold: Roamable AP RSSI equal or better than this threshold,
+ * full rssi score 100. Units in dBm.
+ * @good_rssi_threshold: Below threshold, scoring linear percentage between
+ * rssi_good_pnt and 100. Units in dBm.
+ * @bad_rssi_threshold: Between good and bad rssi threshold, scoring linear
+ * % between rssi_bad_pcnt and rssi_good_pct in dBm.
+ * @good_rssi_pcnt: Used to assigned scoring percentage of each slot between
+ * best to good rssi threshold. Units in percentage.
+ * @bad_rssi_pcnt: Used to assigned scoring percentage of each slot between good
+ * to bad rssi threshold. Unites in percentage.
+ * @good_bucket_size : bucket size of slot in good zone
+ * @bad_bucket_size : bucket size of slot in bad zone
+ * @rssi_pref_5g_rssi_thresh: Below rssi threshold, 5G AP have given preference
+ * of band percentage. Units in dBm.
+ */
+struct rssi_scoring {
+ int32_t best_rssi_threshold;
+ int32_t good_rssi_threshold;
+ int32_t bad_rssi_threshold;
+ uint32_t good_rssi_pcnt;
+ uint32_t bad_rssi_pcnt;
+ uint32_t good_bucket_size;
+ uint32_t bad_bucket_size;
+ int32_t rssi_pref_5g_rssi_thresh;
+};
+
+/**
+ * struct param_slot_scoring - define % score for differents slots for a
+ * scoring param.
+ * @num_slot: number of slots in which the param will be divided.
+ * Max 15. index 0 is used for 'not_present. Num_slot will
+ * equally divide 100. e.g, if num_slot = 4 slot 0 = 0-25%, slot
+ * 1 = 26-50% slot 2 = 51-75%, slot 3 = 76-100%
+ * @score_pcnt3_to_0: Conatins score percentage for slot 0-3
+ * BITS 0-7 :- the scoring pcnt when not present
+ * BITS 8-15 :- SLOT_1
+ * BITS 16-23 :- SLOT_2
+ * BITS 24-31 :- SLOT_3
+ * @score_pcnt7_to_4: Conatins score percentage for slot 4-7
+ * BITS 0-7 :- SLOT_4
+ * BITS 8-15 :- SLOT_5
+ * BITS 16-23 :- SLOT_6
+ * BITS 24-31 :- SLOT_7
+ * @score_pcnt11_to_8: Conatins score percentage for slot 8-11
+ * BITS 0-7 :- SLOT_8
+ * BITS 8-15 :- SLOT_9
+ * BITS 16-23 :- SLOT_10
+ * BITS 24-31 :- SLOT_11
+ * @score_pcnt15_to_12: Conatins score percentage for slot 12-15
+ * BITS 0-7 :- SLOT_12
+ * BITS 8-15 :- SLOT_13
+ * BITS 16-23 :- SLOT_14
+ * BITS 24-31 :- SLOT_15
+ */
+struct param_slot_scoring {
+ uint32_t num_slot;
+ uint32_t score_pcnt3_to_0;
+ uint32_t score_pcnt7_to_4;
+ uint32_t score_pcnt11_to_8;
+ uint32_t score_pcnt15_to_12;
+};
+
+/**
+ * struct scoring_param - scoring param to sortlist selected AP
+ * @disable_bitmap: Each bit will be either allow(0)/disallow(1) to
+ * considered the roam score param.
+ * @rssi_weightage: RSSI weightage out of total score in %
+ * @ht_weightage: HT weightage out of total score in %.
+ * @vht_weightage: VHT weightage out of total score in %.
+ * @he_weightaget: 11ax weightage out of total score in %.
+ * @bw_weightage: Bandwidth weightage out of total score in %.
+ * @band_weightage: Band(2G/5G) weightage out of total score in %.
+ * @nss_weightage: NSS(1x1 / 2x2)weightage out of total score in %.
+ * @esp_qbss_weightage: ESP/QBSS weightage out of total score in %.
+ * @beamforming_weightage: Beamforming weightage out of total score in %.
+ * @pcl_weightage: PCL weightage out of total score in %.
+ * @oce_wan_weightage OCE WAN metrics weightage out of total score in %.
+ * @bw_index_score: channel BW scoring percentage information.
+ * BITS 0-7 :- It contains scoring percentage of 20MHz BW
+ * BITS 8-15 :- It contains scoring percentage of 40MHz BW
+ * BITS 16-23 :- It contains scoring percentage of 80MHz BW
+ * BITS 24-31 :- It contains scoring percentage of 1600MHz BW
+ * The value of each index must be 0-100
+ * @band_index_score: band scording percentage information.
+ * BITS 0-7 :- It contains scoring percentage of 2G
+ * BITS 8-15 :- It contains scoring percentage of 5G
+ * BITS 16-23 :- reserved
+ * BITS 24-31 :- reserved
+ * The value of each index must be 0-100
+ * @nss_index_score: NSS scoring percentage information.
+ * BITS 0-7 :- It contains scoring percentage of 1x1
+ * BITS 8-15 :- It contains scoring percentage of 2x2
+ * BITS 16-23 :- It contains scoring percentage of 3x3
+ * BITS 24-31 :- It contains scoring percentage of 4x4
+ * The value of each index must be 0-100
+ * @rssi_scoring: RSSI scoring information.
+ * @esp_qbss_scoring: ESP/QBSS scoring percentage information
+ * @oce_wan_scoring: OCE WAN metrics percentage information
+*/
+struct scoring_param {
+ uint32_t disable_bitmap;
+ int32_t rssi_weightage;
+ int32_t ht_weightage;
+ int32_t vht_weightage;
+ int32_t he_weightage;
+ int32_t bw_weightage;
+ int32_t band_weightage;
+ int32_t nss_weightage;
+ int32_t esp_qbss_weightage;
+ int32_t beamforming_weightage;
+ int32_t pcl_weightage;
+ int32_t oce_wan_weightage;
+ uint32_t bw_index_score;
+ uint32_t band_index_score;
+ uint32_t nss_index_score;
+ struct rssi_scoring rssi_scoring;
+ struct param_slot_scoring esp_qbss_scoring;
+ struct param_slot_scoring oce_wan_scoring;
+};
+
+/**
+ * struct ap_profile_params - ap profile params
+ * @vdev_id: vdev id
+ * @profile: ap profile to match candidate
+ * @param: scoring params to short candidate
+ */
+struct ap_profile_params {
+ uint8_t vdev_id;
+ struct ap_profile profile;
+ struct scoring_param param;
};
/**
@@ -3069,6 +3299,18 @@
};
/**
+ * struct mcast_filter_params - mcast filter parameters
+ * @multicast_addr_cnt: num of addresses
+ * @multicast_addr: address array
+ * @action: operation to perform
+ */
+struct mcast_filter_params {
+ uint32_t multicast_addr_cnt;
+ struct qdf_mac_addr multicast_addr[WMI_MAX_NUM_MULTICAST_ADDRESS];
+ uint8_t action;
+};
+
+/**
* struct flashing_req_params - led flashing parameter
* @reqId: request id
* @pattern_id: pattern identifier. 0: disconnected 1: connected
@@ -3293,7 +3535,6 @@
/**
* struct roam_scan_filter_params - Structure holding roaming scan
* parameters
- * @len: length
* @op_bitmap: bitmap to determine reason of roaming
* @session_id: vdev id
* @num_bssid_black_list: The number of BSSID's that we should
@@ -3323,7 +3564,6 @@
*/
struct roam_scan_filter_params {
- uint32_t len;
uint32_t op_bitmap;
uint8_t session_id;
uint32_t num_bssid_black_list;
diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_priv.h b/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_priv.h
index dbdc212..73e38e6 100644
--- a/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_priv.h
+++ b/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_priv.h
@@ -511,8 +511,7 @@
struct roam_offload_scan_params *roam_req);
QDF_STATUS (*send_roam_scan_offload_ap_profile_cmd)(wmi_unified_t wmi_handle,
- wmi_ap_profile * ap_profile_p,
- uint32_t vdev_id);
+ struct ap_profile_params *ap_profile);
QDF_STATUS (*send_pktlog_wmi_send_cmd)(wmi_unified_t wmi_handle,
WMI_PKTLOG_EVENT pktlog_event,
@@ -577,6 +576,10 @@
struct qdf_mac_addr multicast_addr,
bool clearList);
+QDF_STATUS (*send_multiple_add_clear_mcbc_filter_cmd)(wmi_unified_t wmi_handle,
+ uint8_t vdev_id,
+ struct mcast_filter_params *filter_param);
+
QDF_STATUS (*send_gtk_offload_cmd)(wmi_unified_t wmi_handle, uint8_t vdev_id,
struct gtk_offload_params *params,
bool enable_offload,
diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_tlv.h b/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_tlv.h
index e1e8c33..51246bcd 100644
--- a/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_tlv.h
+++ b/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_tlv.h
@@ -430,6 +430,10 @@
struct qdf_mac_addr multicast_addr,
bool clearList);
+QDF_STATUS send_multiple_add_clear_mcbc_filter_cmd_tlv(wmi_unified_t wmi_handle,
+ uint8_t vdev_id,
+ struct mcast_filter_params *filter_param);
+
QDF_STATUS send_gtk_offload_cmd_tlv(wmi_unified_t wmi_handle, uint8_t vdev_id,
struct gtk_offload_params *params,
bool enable_offload,
@@ -572,8 +576,7 @@
uint32_t command, uint32_t vdev_id);
QDF_STATUS send_roam_scan_offload_ap_profile_cmd_tlv(wmi_unified_t wmi_handle,
- wmi_ap_profile *ap_profile_p,
- uint32_t vdev_id);
+ struct ap_profile_params *ap_profile);
QDF_STATUS send_roam_scan_offload_scan_period_cmd_tlv(wmi_unified_t wmi_handle,
uint32_t scan_period,
diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified.c b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified.c
index 2eef15a..729777e 100644
--- a/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified.c
+++ b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified.c
@@ -1593,9 +1593,8 @@
id = WMI_GET_FIELD(qdf_nbuf_data(evt_buf), WMI_CMD_HDR, COMMANDID);
idx = wmi_unified_get_event_handler_ix(wmi_handle, id);
if (qdf_unlikely(idx == A_ERROR)) {
- qdf_print
- ("%s :event handler is not registered: event id 0x%x\n",
- __func__, id);
+ WMI_LOGD("%s :event handler is not registered: event id 0x%x\n",
+ __func__, id);
qdf_nbuf_free(evt_buf);
return;
}
diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_api.c b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_api.c
index ff2c9a8..cdb188e 100644
--- a/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_api.c
+++ b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_api.c
@@ -2583,6 +2583,19 @@
return QDF_STATUS_E_FAILURE;
}
+QDF_STATUS wmi_unified_multiple_add_clear_mcbc_filter_cmd(void *wmi_hdl,
+ uint8_t vdev_id,
+ struct mcast_filter_params *filter_param)
+{
+ wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl;
+
+ if (wmi_handle->ops->send_multiple_add_clear_mcbc_filter_cmd)
+ return wmi_handle->ops->send_multiple_add_clear_mcbc_filter_cmd(
+ wmi_handle, vdev_id, filter_param);
+
+ return QDF_STATUS_E_FAILURE;
+}
+
/**
* wmi_unified_send_gtk_offload_cmd() - send GTK offload command to fw
* @wmi_handle: wmi handle
@@ -3372,22 +3385,20 @@
/**
* wmi_unified_send_roam_scan_offload_ap_cmd() - set roam ap profile in fw
* @wmi_hdl: wmi handle
- * @ap_profile_p: ap profile
- * @vdev_id: vdev id
+ * @ap_profile: ap profile params
*
* Send WMI_ROAM_AP_PROFILE to firmware
*
* Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure
*/
QDF_STATUS wmi_unified_send_roam_scan_offload_ap_cmd(void *wmi_hdl,
- wmi_ap_profile *ap_profile_p,
- uint32_t vdev_id)
+ struct ap_profile_params *ap_profile)
{
wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl;
if (wmi_handle->ops->send_roam_scan_offload_ap_profile_cmd)
- return wmi_handle->ops->send_roam_scan_offload_ap_profile_cmd(wmi_handle,
- ap_profile_p, vdev_id);
+ return wmi_handle->ops->send_roam_scan_offload_ap_profile_cmd(
+ wmi_handle, ap_profile);
return QDF_STATUS_E_FAILURE;
}
diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_tlv.c b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_tlv.c
index 24571bb..a3f4bca 100644
--- a/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_tlv.c
+++ b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_tlv.c
@@ -1810,6 +1810,48 @@
return qdf_status;
}
#endif
+
+/**
+ * populate_tx_send_params - Populate TX param TLV for mgmt and offchan tx
+ *
+ * @bufp: Pointer to buffer
+ * @param: Pointer to tx param
+ *
+ * Return: QDF_STATUS_SUCCESS for success and QDF_STATUS_E_FAILURE for failure
+ */
+static inline QDF_STATUS populate_tx_send_params(uint8_t *bufp,
+ struct tx_send_params param)
+{
+ wmi_tx_send_params *tx_param;
+ QDF_STATUS status = QDF_STATUS_SUCCESS;
+
+ if (!bufp) {
+ status = QDF_STATUS_E_FAILURE;
+ return status;
+ }
+ tx_param = (wmi_tx_send_params *)bufp;
+ WMITLV_SET_HDR(&tx_param->tlv_header,
+ WMITLV_TAG_STRUC_wmi_tx_send_params,
+ WMITLV_GET_STRUCT_TLVLEN(wmi_tx_send_params));
+ WMI_TX_SEND_PARAM_PWR_SET(tx_param->tx_param_dword0, param.pwr);
+ WMI_TX_SEND_PARAM_MCS_MASK_SET(tx_param->tx_param_dword0,
+ param.mcs_mask);
+ WMI_TX_SEND_PARAM_NSS_MASK_SET(tx_param->tx_param_dword0,
+ param.nss_mask);
+ WMI_TX_SEND_PARAM_RETRY_LIMIT_SET(tx_param->tx_param_dword0,
+ param.retry_limit);
+ WMI_TX_SEND_PARAM_CHAIN_MASK_SET(tx_param->tx_param_dword1,
+ param.chain_mask);
+ WMI_TX_SEND_PARAM_BW_MASK_SET(tx_param->tx_param_dword1,
+ param.bw_mask);
+ WMI_TX_SEND_PARAM_PREAMBLE_SET(tx_param->tx_param_dword1,
+ param.preamble_type);
+ WMI_TX_SEND_PARAM_FRAME_TYPE_SET(tx_param->tx_param_dword1,
+ param.frame_type);
+
+ return status;
+}
+
/**
* send_mgmt_cmd_tlv() - WMI scan start function
* @wmi_handle : handle to WMI.
@@ -1826,14 +1868,15 @@
uint64_t dma_addr;
void *qdf_ctx = param->qdf_ctx;
uint8_t *bufp;
- QDF_STATUS status;
+ QDF_STATUS status = QDF_STATUS_SUCCESS;
int32_t bufp_len = (param->frm_len < mgmt_tx_dl_frm_len) ? param->frm_len :
mgmt_tx_dl_frm_len;
cmd_len = sizeof(wmi_mgmt_tx_send_cmd_fixed_param) +
- WMI_TLV_HDR_SIZE + roundup(bufp_len, sizeof(uint32_t));
+ WMI_TLV_HDR_SIZE +
+ roundup(bufp_len, sizeof(uint32_t));
- buf = wmi_buf_alloc(wmi_handle, cmd_len);
+ buf = wmi_buf_alloc(wmi_handle, sizeof(wmi_tx_send_params) + cmd_len);
if (!buf) {
WMI_LOGE("%s:wmi_buf_alloc failed", __func__);
return QDF_STATUS_E_NOMEM;
@@ -1870,10 +1913,22 @@
#endif
cmd->frame_len = param->frm_len;
cmd->buf_len = bufp_len;
+ cmd->tx_params_valid = param->tx_params_valid;
wmi_mgmt_cmd_record(wmi_handle, WMI_MGMT_TX_SEND_CMDID,
bufp, cmd->vdev_id, cmd->chanfreq);
+ bufp += roundup(bufp_len, sizeof(uint32_t));
+ if (param->tx_params_valid) {
+ status = populate_tx_send_params(bufp, param->tx_param);
+ if (status != QDF_STATUS_SUCCESS) {
+ WMI_LOGE("%s: Populate TX send params failed",
+ __func__);
+ goto err1;
+ }
+ cmd_len += sizeof(wmi_tx_send_params);
+ }
+
if (wmi_unified_cmd_send(wmi_handle, buf, cmd_len,
WMI_MGMT_TX_SEND_CMDID)) {
WMI_LOGE("%s: Failed to send mgmt Tx", __func__);
@@ -4743,6 +4798,8 @@
rssi_threshold_fp->hirssi_scan_delta =
roam_req->hi_rssi_scan_rssi_delta;
rssi_threshold_fp->hirssi_upper_bound = roam_req->hi_rssi_scan_rssi_ub;
+ rssi_threshold_fp->rssi_thresh_offset_5g =
+ roam_req->rssi_thresh_offset_5g;
buf_ptr += sizeof(wmi_roam_scan_rssi_threshold_fixed_param);
WMITLV_SET_HDR(buf_ptr,
@@ -4810,6 +4867,9 @@
roam_req->bg_scan_bad_rssi_thresh;
bg_scan_params->roam_bg_scan_client_bitmap =
roam_req->bg_scan_client_bitmap;
+ bg_scan_params->bad_rssi_thresh_offset_2g =
+ roam_req->roam_bad_rssi_thresh_offset_2g;
+ bg_scan_params->flags = roam_req->flags;
WMITLV_SET_HDR(&bg_scan_params->tlv_header,
WMITLV_TAG_STRUC_wmi_roam_bg_scan_roaming_param,
WMITLV_GET_STRUCT_TLVLEN
@@ -4984,8 +5044,21 @@
wmi_roam_lca_disallow_config_tlv_param *blist_param;
len = sizeof(wmi_roam_filter_fixed_param);
+
len += WMI_TLV_HDR_SIZE;
- len += roam_req->len;
+ if (roam_req->num_bssid_black_list)
+ len += roam_req->num_bssid_black_list * sizeof(wmi_mac_addr);
+ len += WMI_TLV_HDR_SIZE;
+ if (roam_req->num_ssid_white_list)
+ len += roam_req->num_ssid_white_list * sizeof(wmi_ssid);
+ len += 2 * WMI_TLV_HDR_SIZE;
+ if (roam_req->num_bssid_preferred_list) {
+ len += roam_req->num_bssid_preferred_list * sizeof(wmi_mac_addr);
+ len += roam_req->num_bssid_preferred_list * sizeof(A_UINT32);
+ }
+ if (roam_req->lca_disallow_config_present)
+ len += WMI_TLV_HDR_SIZE +
+ sizeof(wmi_roam_lca_disallow_config_tlv_param);
buf = wmi_buf_alloc(wmi_handle, len);
if (!buf) {
@@ -5073,7 +5146,9 @@
blist_param->rssi_channel_penalization =
roam_req->rssi_channel_penalization;
blist_param->num_disallowed_aps = roam_req->num_disallowed_aps;
- blist_param->disallow_lca_enable_source_bitmap = 0x1;
+ blist_param->disallow_lca_enable_source_bitmap =
+ (WMI_ROAM_LCA_DISALLOW_SOURCE_PER |
+ WMI_ROAM_LCA_DISALLOW_SOURCE_BACKGROUND);
buf_ptr += (sizeof(wmi_roam_lca_disallow_config_tlv_param));
}
@@ -8443,6 +8518,82 @@
}
/**
+ * send_multiple_add_clear_mcbc_filter_cmd_tlv() - send multiple mcast filter
+ * command to fw
+ * @wmi_handle: wmi handle
+ * @vdev_id: vdev id
+ * @mcast_filter_params: mcast filter params
+ *
+ * Return: QDF_STATUS_SUCCESS for success or error code
+ */
+QDF_STATUS send_multiple_add_clear_mcbc_filter_cmd_tlv(wmi_unified_t wmi_handle,
+ uint8_t vdev_id,
+ struct mcast_filter_params *filter_param)
+
+{
+ WMI_SET_MULTIPLE_MCAST_FILTER_CMD_fixed_param *cmd;
+ uint8_t *buf_ptr;
+ wmi_buf_t buf;
+ int err;
+ int i;
+ uint8_t *mac_addr_src_ptr = NULL;
+ wmi_mac_addr *mac_addr_dst_ptr;
+ uint32_t len = sizeof(*cmd) + WMI_TLV_HDR_SIZE +
+ sizeof(wmi_mac_addr) * filter_param->multicast_addr_cnt;
+
+ buf = wmi_buf_alloc(wmi_handle, len);
+ if (!buf) {
+ WMI_LOGE("Failed to allocate memory");
+ return QDF_STATUS_E_NOMEM;
+ }
+
+ buf_ptr = (A_UINT8 *) wmi_buf_data(buf);
+ cmd = (WMI_SET_MULTIPLE_MCAST_FILTER_CMD_fixed_param *)
+ wmi_buf_data(buf);
+ qdf_mem_zero(cmd, sizeof(*cmd));
+
+ WMITLV_SET_HDR(&cmd->tlv_header,
+ WMITLV_TAG_STRUC_wmi_set_multiple_mcast_filter_cmd_fixed_param,
+ WMITLV_GET_STRUCT_TLVLEN
+ (WMI_SET_MULTIPLE_MCAST_FILTER_CMD_fixed_param));
+ cmd->operation =
+ ((filter_param->action == 0) ? WMI_MULTIPLE_MCAST_FILTER_DELETE
+ : WMI_MULTIPLE_MCAST_FILTER_ADD);
+ cmd->vdev_id = vdev_id;
+ cmd->num_mcastaddrs = filter_param->multicast_addr_cnt;
+
+ buf_ptr += sizeof(*cmd);
+ WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_FIXED_STRUC,
+ sizeof(wmi_mac_addr) *
+ filter_param->multicast_addr_cnt);
+
+ if (filter_param->multicast_addr_cnt == 0)
+ goto send_cmd;
+
+ mac_addr_src_ptr = (uint8_t *)&filter_param->multicast_addr;
+ mac_addr_dst_ptr = (wmi_mac_addr *)
+ (buf_ptr + WMI_TLV_HDR_SIZE);
+
+ for (i = 0; i < filter_param->multicast_addr_cnt; i++) {
+ WMI_CHAR_ARRAY_TO_MAC_ADDR(mac_addr_src_ptr, mac_addr_dst_ptr);
+ mac_addr_src_ptr += ATH_MAC_LEN;
+ mac_addr_dst_ptr++;
+ }
+
+send_cmd:
+ err = wmi_unified_cmd_send(wmi_handle, buf,
+ len,
+ WMI_SET_MULTIPLE_MCAST_FILTER_CMDID);
+ if (err) {
+ WMI_LOGE("Failed to send set_param cmd");
+ wmi_buf_free(buf);
+ return QDF_STATUS_E_FAILURE;
+ }
+
+ return QDF_STATUS_SUCCESS;
+}
+
+/**
* send_gtk_offload_cmd_tlv() - send GTK offload command to fw
* @wmi_handle: wmi handle
* @vdev_id: vdev id
@@ -10054,6 +10205,13 @@
if (wmi_handle->events_logs_list)
qdf_mem_free(wmi_handle->events_logs_list);
+ if (num_of_diag_events_logs >
+ (WMI_SVC_MSG_MAX_SIZE / sizeof(uint32_t))) {
+ WMI_LOGE("%s: excess num of logs:%d", __func__,
+ num_of_diag_events_logs);
+ QDF_ASSERT(0);
+ return QDF_STATUS_E_INVAL;
+ }
/* Store the event list for run time enable/disable */
wmi_handle->events_logs_list = qdf_mem_malloc(num_of_diag_events_logs *
sizeof(uint32_t));
@@ -11133,17 +11291,18 @@
* Return: CDF status
*/
QDF_STATUS send_roam_scan_offload_ap_profile_cmd_tlv(wmi_unified_t wmi_handle,
- wmi_ap_profile *ap_profile_p,
- uint32_t vdev_id)
+ struct ap_profile_params *ap_profile)
{
wmi_buf_t buf = NULL;
QDF_STATUS status;
int len;
uint8_t *buf_ptr;
wmi_roam_ap_profile_fixed_param *roam_ap_profile_fp;
+ wmi_roam_cnd_scoring_param *score_param;
+ wmi_ap_profile *profile;
len = sizeof(wmi_roam_ap_profile_fixed_param) + sizeof(wmi_ap_profile);
-
+ len += sizeof(*score_param);
buf = wmi_buf_alloc(wmi_handle, len);
if (!buf) {
WMI_LOGE("%s : wmi_buf_alloc failed", __func__);
@@ -11157,14 +11316,143 @@
WMITLV_GET_STRUCT_TLVLEN
(wmi_roam_ap_profile_fixed_param));
/* fill in threshold values */
- roam_ap_profile_fp->vdev_id = vdev_id;
+ roam_ap_profile_fp->vdev_id = ap_profile->vdev_id;
roam_ap_profile_fp->id = 0;
buf_ptr += sizeof(wmi_roam_ap_profile_fixed_param);
- qdf_mem_copy(buf_ptr, ap_profile_p, sizeof(wmi_ap_profile));
- WMITLV_SET_HDR(buf_ptr,
+ profile = (wmi_ap_profile *)buf_ptr;
+ WMITLV_SET_HDR(&profile->tlv_header,
WMITLV_TAG_STRUC_wmi_ap_profile,
WMITLV_GET_STRUCT_TLVLEN(wmi_ap_profile));
+ profile->flags = ap_profile->profile.flags;
+ profile->rssi_threshold = ap_profile->profile.rssi_threshold;
+ profile->ssid.ssid_len = ap_profile->profile.ssid.length;
+ qdf_mem_copy(profile->ssid.ssid, ap_profile->profile.ssid.mac_ssid,
+ profile->ssid.ssid_len);
+ profile->rsn_authmode = ap_profile->profile.rsn_authmode;
+ profile->rsn_ucastcipherset = ap_profile->profile.rsn_ucastcipherset;
+ profile->rsn_mcastcipherset = ap_profile->profile.rsn_mcastcipherset;
+ profile->rsn_mcastmgmtcipherset =
+ ap_profile->profile.rsn_mcastmgmtcipherset;
+ profile->rssi_abs_thresh = ap_profile->profile.rssi_abs_thresh;
+
+ WMI_LOGD("AP profile: flags %x rssi_threshold %d ssid:%.*s authmode %d uc cipher %d mc cipher %d mc mgmt cipher %d rssi abs thresh %d",
+ profile->flags, profile->rssi_threshold,
+ profile->ssid.ssid_len, ap_profile->profile.ssid.mac_ssid,
+ profile->rsn_authmode, profile->rsn_ucastcipherset,
+ profile->rsn_mcastcipherset, profile->rsn_mcastmgmtcipherset,
+ profile->rssi_abs_thresh);
+
+ buf_ptr += sizeof(wmi_ap_profile);
+
+ score_param = (wmi_roam_cnd_scoring_param *)buf_ptr;
+ WMITLV_SET_HDR(&score_param->tlv_header,
+ WMITLV_TAG_STRUC_wmi_roam_cnd_scoring_param,
+ WMITLV_GET_STRUCT_TLVLEN(wmi_roam_cnd_scoring_param));
+ score_param->disable_bitmap = ap_profile->param.disable_bitmap;
+ score_param->rssi_weightage_pcnt =
+ ap_profile->param.rssi_weightage;
+ score_param->ht_weightage_pcnt = ap_profile->param.ht_weightage;
+ score_param->vht_weightage_pcnt = ap_profile->param.vht_weightage;
+ score_param->he_weightage_pcnt = ap_profile->param.he_weightage;
+ score_param->bw_weightage_pcnt = ap_profile->param.bw_weightage;
+ score_param->band_weightage_pcnt = ap_profile->param.band_weightage;
+ score_param->nss_weightage_pcnt = ap_profile->param.nss_weightage;
+ score_param->esp_qbss_weightage_pcnt =
+ ap_profile->param.esp_qbss_weightage;
+ score_param->beamforming_weightage_pcnt =
+ ap_profile->param.beamforming_weightage;
+ score_param->pcl_weightage_pcnt = ap_profile->param.pcl_weightage;
+ score_param->oce_wan_weightage_pcnt =
+ ap_profile->param.oce_wan_weightage;
+
+ WMI_LOGD("Score params weightage: disable_bitmap %x rssi %d ht %d vht %d he %d BW %d band %d NSS %d ESP %d BF %d PCL %d OCE WAN %d",
+ score_param->disable_bitmap, score_param->rssi_weightage_pcnt,
+ score_param->ht_weightage_pcnt,
+ score_param->vht_weightage_pcnt,
+ score_param->he_weightage_pcnt, score_param->bw_weightage_pcnt,
+ score_param->band_weightage_pcnt,
+ score_param->nss_weightage_pcnt,
+ score_param->esp_qbss_weightage_pcnt,
+ score_param->beamforming_weightage_pcnt,
+ score_param->pcl_weightage_pcnt,
+ score_param->oce_wan_weightage_pcnt);
+
+ score_param->bw_scoring.score_pcnt = ap_profile->param.bw_index_score;
+ score_param->band_scoring.score_pcnt =
+ ap_profile->param.band_index_score;
+ score_param->nss_scoring.score_pcnt =
+ ap_profile->param.nss_index_score;
+
+ WMI_LOGD("Params index score bitmask: bw_index_score %x band_index_score %x nss_index_score %x",
+ score_param->bw_scoring.score_pcnt,
+ score_param->band_scoring.score_pcnt,
+ score_param->nss_scoring.score_pcnt);
+
+ score_param->rssi_scoring.best_rssi_threshold =
+ (-1) * ap_profile->param.rssi_scoring.best_rssi_threshold;
+ score_param->rssi_scoring.good_rssi_threshold =
+ (-1) * ap_profile->param.rssi_scoring.good_rssi_threshold;
+ score_param->rssi_scoring.bad_rssi_threshold =
+ (-1) * ap_profile->param.rssi_scoring.bad_rssi_threshold;
+ score_param->rssi_scoring.good_rssi_pcnt =
+ ap_profile->param.rssi_scoring.good_rssi_pcnt;
+ score_param->rssi_scoring.bad_rssi_pcnt =
+ ap_profile->param.rssi_scoring.bad_rssi_pcnt;
+ score_param->rssi_scoring.good_bucket_size =
+ ap_profile->param.rssi_scoring.good_bucket_size;
+ score_param->rssi_scoring.bad_bucket_size =
+ ap_profile->param.rssi_scoring.bad_bucket_size;
+ score_param->rssi_scoring.rssi_pref_5g_rssi_thresh =
+ (-1) * ap_profile->param.rssi_scoring.rssi_pref_5g_rssi_thresh;
+
+ WMI_LOGD("Rssi scoring threshold: best RSSI %d good RSSI %d bad RSSI %d prefer 5g threshold %d",
+ score_param->rssi_scoring.best_rssi_threshold,
+ score_param->rssi_scoring.good_rssi_threshold,
+ score_param->rssi_scoring.bad_rssi_threshold,
+ score_param->rssi_scoring.rssi_pref_5g_rssi_thresh);
+ WMI_LOGD("Good RSSI score for each slot %d bad RSSI score for each slot %d good bucket %d bad bucket %d",
+ score_param->rssi_scoring.good_rssi_pcnt,
+ score_param->rssi_scoring.bad_rssi_pcnt,
+ score_param->rssi_scoring.good_bucket_size,
+ score_param->rssi_scoring.bad_bucket_size);
+
+ score_param->esp_qbss_scoring.num_slot =
+ ap_profile->param.esp_qbss_scoring.num_slot;
+ score_param->esp_qbss_scoring.score_pcnt3_to_0 =
+ ap_profile->param.esp_qbss_scoring.score_pcnt3_to_0;
+ score_param->esp_qbss_scoring.score_pcnt7_to_4 =
+ ap_profile->param.esp_qbss_scoring.score_pcnt7_to_4;
+ score_param->esp_qbss_scoring.score_pcnt11_to_8 =
+ ap_profile->param.esp_qbss_scoring.score_pcnt11_to_8;
+ score_param->esp_qbss_scoring.score_pcnt15_to_12 =
+ ap_profile->param.esp_qbss_scoring.score_pcnt15_to_12;
+
+ WMI_LOGD("ESP QBSS index weight: slots %d weight 0to3 %x weight 4to7 %x weight 8to11 %x weight 12to15 %x",
+ score_param->esp_qbss_scoring.num_slot,
+ score_param->esp_qbss_scoring.score_pcnt3_to_0,
+ score_param->esp_qbss_scoring.score_pcnt7_to_4,
+ score_param->esp_qbss_scoring.score_pcnt11_to_8,
+ score_param->esp_qbss_scoring.score_pcnt15_to_12);
+
+ score_param->oce_wan_scoring.num_slot =
+ ap_profile->param.oce_wan_scoring.num_slot;
+ score_param->oce_wan_scoring.score_pcnt3_to_0 =
+ ap_profile->param.oce_wan_scoring.score_pcnt3_to_0;
+ score_param->oce_wan_scoring.score_pcnt7_to_4 =
+ ap_profile->param.oce_wan_scoring.score_pcnt7_to_4;
+ score_param->oce_wan_scoring.score_pcnt11_to_8 =
+ ap_profile->param.oce_wan_scoring.score_pcnt11_to_8;
+ score_param->oce_wan_scoring.score_pcnt15_to_12 =
+ ap_profile->param.oce_wan_scoring.score_pcnt15_to_12;
+
+ WMI_LOGD("OCE WAN index weight: slots %d weight 0to3 %x weight 4to7 %x weight 8to11 %x weight 12to15 %x",
+ score_param->oce_wan_scoring.num_slot,
+ score_param->oce_wan_scoring.score_pcnt3_to_0,
+ score_param->oce_wan_scoring.score_pcnt7_to_4,
+ score_param->oce_wan_scoring.score_pcnt11_to_8,
+ score_param->oce_wan_scoring.score_pcnt15_to_12);
+
status = wmi_unified_cmd_send(wmi_handle, buf,
len, WMI_ROAM_AP_PROFILE);
if (QDF_IS_STATUS_ERROR(status)) {
@@ -13340,6 +13628,8 @@
send_enable_disable_packet_filter_cmd_tlv,
.send_config_packet_filter_cmd = send_config_packet_filter_cmd_tlv,
.send_add_clear_mcbc_filter_cmd = send_add_clear_mcbc_filter_cmd_tlv,
+ .send_multiple_add_clear_mcbc_filter_cmd =
+ send_multiple_add_clear_mcbc_filter_cmd_tlv,
.send_gtk_offload_cmd = send_gtk_offload_cmd_tlv,
.send_process_gtk_offload_getinfo_cmd =
send_process_gtk_offload_getinfo_cmd_tlv,
diff --git a/drivers/staging/qcacld-3.0/core/cds/inc/cds_api.h b/drivers/staging/qcacld-3.0/core/cds/inc/cds_api.h
index 2814a72..259249d 100644
--- a/drivers/staging/qcacld-3.0/core/cds/inc/cds_api.h
+++ b/drivers/staging/qcacld-3.0/core/cds/inc/cds_api.h
@@ -71,7 +71,8 @@
CDS_DRIVER_STATE_LOADING = BIT(1),
CDS_DRIVER_STATE_UNLOADING = BIT(2),
CDS_DRIVER_STATE_RECOVERING = BIT(3),
- CDS_DRIVER_STATE_BAD = BIT(4)
+ CDS_DRIVER_STATE_BAD = BIT(4),
+ CDS_DRIVER_STATE_FW_READY = BIT(5),
};
#define __CDS_IS_DRIVER_STATE(_state, _mask) (((_state) & (_mask)) == (_mask))
@@ -224,6 +225,18 @@
}
/**
+ * cds_is_target_ready() - Is target is in ready state
+ *
+ * Return: true if target is in ready state and false otherwise.
+ */
+static inline bool cds_is_target_ready(void)
+{
+ enum cds_driver_state state = cds_get_driver_state();
+
+ return __CDS_IS_DRIVER_STATE(state, CDS_DRIVER_STATE_FW_READY);
+}
+
+/**
* cds_set_recovery_in_progress() - Set recovery in progress
* @value: value to set
*
@@ -252,6 +265,20 @@
}
/**
+ * cds_set_target_ready() - Set target ready state
+ * @value: value to set
+ *
+ * Return: none
+ */
+static inline void cds_set_target_ready(uint8_t value)
+{
+ if (value)
+ cds_set_driver_state(CDS_DRIVER_STATE_FW_READY);
+ else
+ cds_clear_driver_state(CDS_DRIVER_STATE_FW_READY);
+}
+
+/**
* cds_set_load_in_progress() - Set load in progress
* @value: value to set
*
diff --git a/drivers/staging/qcacld-3.0/core/cds/inc/cds_config.h b/drivers/staging/qcacld-3.0/core/cds/inc/cds_config.h
index d648ee0..59119a8 100644
--- a/drivers/staging/qcacld-3.0/core/cds/inc/cds_config.h
+++ b/drivers/staging/qcacld-3.0/core/cds/inc/cds_config.h
@@ -156,6 +156,7 @@
uint16_t self_gen_frm_pwr;
enum cfg_sub_20_channel_width sub_20_channel_width;
bool flow_steering_enabled;
+ uint8_t max_msdus_per_rxinorderind;
bool self_recovery_enabled;
bool fw_timeout_crash;
diff --git a/drivers/staging/qcacld-3.0/core/cds/inc/cds_sched.h b/drivers/staging/qcacld-3.0/core/cds/inc/cds_sched.h
index f9cc4c4..d1f437a 100644
--- a/drivers/staging/qcacld-3.0/core/cds/inc/cds_sched.h
+++ b/drivers/staging/qcacld-3.0/core/cds/inc/cds_sched.h
@@ -602,5 +602,11 @@
* Return: None
*/
void cds_shutdown_notifier_purge(void);
-
+/**
+ * cds_shutdown_notifier_call() - Call shutdown notifier call back
+ *
+ * Call registered shutdown notifier call back to indicate about remove or
+ * shutdown.
+ */
+void cds_shutdown_notifier_call(void);
#endif /* #if !defined __CDS_SCHED_H */
diff --git a/drivers/staging/qcacld-3.0/core/cds/src/cds_api.c b/drivers/staging/qcacld-3.0/core/cds/src/cds_api.c
index 40e9599..9513cf9 100644
--- a/drivers/staging/qcacld-3.0/core/cds/src/cds_api.c
+++ b/drivers/staging/qcacld-3.0/core/cds/src/cds_api.c
@@ -456,11 +456,14 @@
"%s: HTCHandle is null!", __func__);
goto err_wma_close;
}
- if (htc_wait_target(HTCHandle)) {
+
+ qdf_status = htc_wait_target(HTCHandle);
+ if (!QDF_IS_STATUS_SUCCESS(qdf_status)) {
QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_FATAL,
"%s: Failed to complete BMI phase", __func__);
- if (!cds_is_fw_down())
+ if (qdf_status != QDF_STATUS_E_NOMEM
+ && !cds_is_fw_down())
QDF_BUG(0);
goto err_wma_close;
@@ -1347,6 +1350,9 @@
case QDF_MODULE_ID_HIF:
p_cds_context->pHIFContext = context;
break;
+ case QDF_MODULE_ID_HDD:
+ p_cds_context->pHDDContext = context;
+ break;
default:
QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
"%s: Module ID %i does not have its context managed by CDS",
diff --git a/drivers/staging/qcacld-3.0/core/cds/src/cds_concurrency.c b/drivers/staging/qcacld-3.0/core/cds/src/cds_concurrency.c
index fb541cf..12070c1 100644
--- a/drivers/staging/qcacld-3.0/core/cds/src/cds_concurrency.c
+++ b/drivers/staging/qcacld-3.0/core/cds/src/cds_concurrency.c
@@ -285,32 +285,32 @@
[CDS_STA_24_1x1] = {
[CDS_STA_MODE] = {CDS_5G, CDS_SCC_CH, CDS_SCC_CH},
[CDS_SAP_MODE] = {CDS_5G, CDS_SCC_CH, CDS_SCC_CH_5G},
- [CDS_P2P_CLIENT_MODE] = {CDS_5G, CDS_SCC_CH, CDS_SCC_CH_5G},
- [CDS_P2P_GO_MODE] = {CDS_5G, CDS_SCC_CH, CDS_SCC_CH_5G},
+ [CDS_P2P_CLIENT_MODE] = {CDS_SCC_CH, CDS_SCC_CH, CDS_SCC_CH_5G},
+ [CDS_P2P_GO_MODE] = {CDS_SCC_CH, CDS_SCC_CH, CDS_SCC_CH_5G},
[CDS_IBSS_MODE] = {
CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
[CDS_STA_24_2x2] = {
[CDS_STA_MODE] = {CDS_5G, CDS_SCC_CH, CDS_SCC_CH},
[CDS_SAP_MODE] = {CDS_5G, CDS_SCC_CH, CDS_SCC_CH_5G},
- [CDS_P2P_CLIENT_MODE] = {CDS_5G, CDS_SCC_CH, CDS_SCC_CH_5G},
- [CDS_P2P_GO_MODE] = {CDS_5G, CDS_SCC_CH, CDS_SCC_CH_5G},
+ [CDS_P2P_CLIENT_MODE] = {CDS_SCC_CH, CDS_SCC_CH, CDS_SCC_CH_5G},
+ [CDS_P2P_GO_MODE] = {CDS_SCC_CH, CDS_SCC_CH, CDS_SCC_CH_5G},
[CDS_IBSS_MODE] = {
CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
[CDS_STA_5_1x1] = {
[CDS_STA_MODE] = {CDS_SCC_CH_5G, CDS_SCC_CH, CDS_SCC_CH_5G},
[CDS_SAP_MODE] = {CDS_SCC_CH_5G, CDS_SCC_CH, CDS_SCC_CH},
- [CDS_P2P_CLIENT_MODE] = {CDS_SCC_CH_5G, CDS_SCC_CH, CDS_SCC_CH},
- [CDS_P2P_GO_MODE] = {CDS_SCC_CH_5G, CDS_SCC_CH, CDS_SCC_CH},
+ [CDS_P2P_CLIENT_MODE] = {CDS_SCC_CH, CDS_SCC_CH, CDS_SCC_CH},
+ [CDS_P2P_GO_MODE] = {CDS_SCC_CH, CDS_SCC_CH, CDS_SCC_CH},
[CDS_IBSS_MODE] = {
CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
[CDS_STA_5_2x2] = {
[CDS_STA_MODE] = {CDS_SCC_CH_5G, CDS_SCC_CH, CDS_SCC_CH_5G},
[CDS_SAP_MODE] = {CDS_SCC_CH_5G, CDS_SCC_CH, CDS_SCC_CH},
- [CDS_P2P_CLIENT_MODE] = {CDS_SCC_CH_5G, CDS_SCC_CH, CDS_SCC_CH},
- [CDS_P2P_GO_MODE] = {CDS_SCC_CH_5G, CDS_SCC_CH, CDS_SCC_CH},
+ [CDS_P2P_CLIENT_MODE] = {CDS_SCC_CH, CDS_SCC_CH, CDS_SCC_CH},
+ [CDS_P2P_GO_MODE] = {CDS_SCC_CH, CDS_SCC_CH, CDS_SCC_CH},
[CDS_IBSS_MODE] = {
CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE, CDS_MAX_PCL_TYPE} },
diff --git a/drivers/staging/qcacld-3.0/core/cds/src/cds_sched.c b/drivers/staging/qcacld-3.0/core/cds/src/cds_sched.c
index 071aca7..5e2a182 100644
--- a/drivers/staging/qcacld-3.0/core/cds/src/cds_sched.c
+++ b/drivers/staging/qcacld-3.0/core/cds/src/cds_sched.c
@@ -1645,7 +1645,7 @@
* Call registered shutdown notifier call back to indicate about remove or
* shutdown.
*/
-static void cds_shutdown_notifier_call(void)
+void cds_shutdown_notifier_call(void)
{
struct shutdown_notifier *notifier;
unsigned long irq_flags;
@@ -1678,8 +1678,6 @@
int count = MAX_SSR_WAIT_ITERATIONS;
int r;
- cds_shutdown_notifier_call();
-
while (count) {
r = atomic_read(&ssr_protect_entry_count);
diff --git a/drivers/staging/qcacld-3.0/core/dp/htt/htt.c b/drivers/staging/qcacld-3.0/core/dp/htt/htt.c
index 1eb6c67..1bdfe6a 100644
--- a/drivers/staging/qcacld-3.0/core/dp/htt/htt.c
+++ b/drivers/staging/qcacld-3.0/core/dp/htt/htt.c
@@ -729,10 +729,14 @@
status = htc_connect_service(pdev->htc_pdev, &connect, &response);
if (status != QDF_STATUS_SUCCESS) {
- if (!cds_is_fw_down())
- QDF_BUG(0);
+ if (cds_is_fw_down())
+ return -EIO;
- return -EIO; /* failure */
+ if (status == QDF_STATUS_E_NOMEM ||
+ cds_is_self_recovery_enabled())
+ return qdf_status_to_os_return(status);
+
+ QDF_BUG(0);
}
htt_update_endpoint(pdev, service_id, response.Endpoint);
diff --git a/drivers/staging/qcacld-3.0/core/dp/htt/htt_h2t.c b/drivers/staging/qcacld-3.0/core/dp/htt/htt_h2t.c
index c8f3f9ff..540a7fd 100644
--- a/drivers/staging/qcacld-3.0/core/dp/htt/htt_h2t.c
+++ b/drivers/staging/qcacld-3.0/core/dp/htt/htt_h2t.c
@@ -307,6 +307,8 @@
struct htt_htc_pkt *pkt;
qdf_nbuf_t msg;
uint32_t *msg_word;
+ uint32_t msg_local;
+ struct cds_config_info *cds_cfg;
QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_INFO,
"Receive flow steering configuration, disable gEnableFlowSteering(=0) in ini if FW doesnot support it\n");
@@ -340,16 +342,28 @@
/* rewind beyond alignment pad to get to the HTC header reserved area */
qdf_nbuf_push_head(msg, HTC_HDR_ALIGNMENT_PADDING);
- *msg_word = 0;
- HTT_H2T_MSG_TYPE_SET(*msg_word, HTT_H2T_MSG_TYPE_RFS_CONFIG);
+ msg_local = 0;
+ HTT_H2T_MSG_TYPE_SET(msg_local, HTT_H2T_MSG_TYPE_RFS_CONFIG);
if (ol_cfg_is_flow_steering_enabled(pdev->ctrl_pdev)) {
- HTT_RX_RFS_CONFIG_SET(*msg_word, 1);
- QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_INFO,
- "Enable Rx flow steering\n");
+ HTT_RX_RFS_CONFIG_SET(msg_local, 1);
+ QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_INFO,
+ "Enable Rx flow steering");
} else {
QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_INFO,
- "Disable Rx flow steering\n");
+ "Disable Rx flow steering");
}
+ cds_cfg = cds_get_ini_config();
+ if (cds_cfg != NULL) {
+ msg_local |= ((cds_cfg->max_msdus_per_rxinorderind & 0xff)
+ << 16);
+ QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_INFO,
+ "Updated maxMSDUsPerRxInd");
+ }
+
+ *msg_word = msg_local;
+ QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_INFO,
+ "%s: Sending msg_word: 0x%08x",
+ __func__, *msg_word);
SET_HTC_PACKET_INFO_TX(&pkt->htc_pkt,
htt_h2t_send_complete_free_netbuf,
diff --git a/drivers/staging/qcacld-3.0/core/dp/htt/htt_rx.c b/drivers/staging/qcacld-3.0/core/dp/htt/htt_rx.c
index 89bbdc4..78190ab 100644
--- a/drivers/staging/qcacld-3.0/core/dp/htt/htt_rx.c
+++ b/drivers/staging/qcacld-3.0/core/dp/htt/htt_rx.c
@@ -120,7 +120,32 @@
#define RX_HASH_LOG(x) /* no-op */
#endif
+#if HTT_PADDR64
+#define NEXT_FIELD_OFFSET_IN32 2
+#else /* ! HTT_PADDR64 */
+#define NEXT_FIELD_OFFSET_IN32 1
+#endif /* HTT_PADDR64 */
+
#ifndef CONFIG_HL_SUPPORT
+
+/**
+ * htt_get_first_packet_after_wow_wakeup() - get first packet after wow wakeup
+ * @msg_word: pointer to rx indication message word
+ * @buf: pointer to buffer
+ *
+ * Return: None
+ */
+static void
+htt_get_first_packet_after_wow_wakeup(uint32_t *msg_word, qdf_nbuf_t buf)
+{
+ if (HTT_RX_IN_ORD_PADDR_IND_MSDU_INFO_GET(*msg_word) &
+ FW_MSDU_INFO_FIRST_WAKEUP_M) {
+ qdf_nbuf_mark_wakeup_frame(buf);
+ QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_INFO,
+ "%s: First packet after WOW Wakeup rcvd", __func__);
+ }
+}
+
/* De -initialization function of the rx buffer hash table. This function will
* free up the hash table which includes freeing all the pending rx buffers
*/
@@ -1558,15 +1583,9 @@
qdf_nbuf_unmap(pdev->osdev, buf, QDF_DMA_FROM_DEVICE);
#endif
- if (pdev->cfg.is_first_wakeup_packet) {
- if (HTT_RX_IN_ORD_PADDR_IND_MSDU_INFO_GET(*(curr_msdu + 1)) &
- FW_MSDU_INFO_FIRST_WAKEUP_M) {
- qdf_nbuf_mark_wakeup_frame(buf);
- QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_INFO,
- "%s: First packet after WOW Wakeup rcvd",
- __func__);
- }
- }
+ if (pdev->cfg.is_first_wakeup_packet)
+ htt_get_first_packet_after_wow_wakeup(
+ msg_word + NEXT_FIELD_OFFSET_IN32, buf);
msdu_hdr = (uint32_t *) qdf_nbuf_data(buf);
@@ -1587,11 +1606,6 @@
#endif
#define MIN(a, b) (((a) < (b)) ? (a) : (b))
-#if HTT_PADDR64
-#define NEXT_FIELD_OFFSET_IN32 2
-#else /* ! HTT_PADDR64 */
-#define NEXT_FIELD_OFFSET_IN32 1
-#endif /* HTT_PADDR64 */
#ifndef CONFIG_HL_SUPPORT
/**
@@ -2050,7 +2064,6 @@
last_frag = ((struct htt_rx_in_ord_paddr_ind_msdu_t *)
msg_word)->msdu_info;
-#undef NEXT_FIELD_OFFSET_IN32
/* Handle amsdu packet */
if (!last_frag) {
/*
@@ -2210,11 +2223,6 @@
qdf_nbuf_data_addr(msdu),
sizeof(qdf_nbuf_data(msdu)), QDF_RX));
-#if HTT_PADDR64
-#define NEXT_FIELD_OFFSET_IN32 2
-#else /* ! HTT_PADDR64 */
-#define NEXT_FIELD_OFFSET_IN32 1
-#endif /* HTT_PADDR64 */
qdf_nbuf_trim_tail(msdu,
HTT_RX_BUF_SIZE -
(RX_STD_DESC_SIZE +
@@ -2226,7 +2234,6 @@
*((uint8_t *) &rx_desc->fw_desc.u.val) =
HTT_RX_IN_ORD_PADDR_IND_FW_DESC_GET(*(msg_word +
NEXT_FIELD_OFFSET_IN32));
-#undef NEXT_FIELD_OFFSET_IN32
msdu_count--;
@@ -2239,6 +2246,10 @@
pdev->rx_pkt_dump_cb(msdu, peer_id, status);
}
+ if (pdev->cfg.is_first_wakeup_packet)
+ htt_get_first_packet_after_wow_wakeup(
+ msg_word + NEXT_FIELD_OFFSET_IN32, msdu);
+
if (qdf_unlikely((*((u_int8_t *) &rx_desc->fw_desc.u.val)) &
FW_RX_DESC_ANY_ERR_M)) {
uint8_t tid =
diff --git a/drivers/staging/qcacld-3.0/core/dp/txrx/ol_tx.c b/drivers/staging/qcacld-3.0/core/dp/txrx/ol_tx.c
index 370fcf3..6f74305 100644
--- a/drivers/staging/qcacld-3.0/core/dp/txrx/ol_tx.c
+++ b/drivers/staging/qcacld-3.0/core/dp/txrx/ol_tx.c
@@ -563,6 +563,7 @@
}
htt_tx_desc = tx_desc->htt_tx_desc;
+ qdf_mem_zero(tx_desc->htt_frag_desc, sizeof(struct msdu_ext_desc_t));
/* Make sure frags num is set to 0 */
/*
diff --git a/drivers/staging/qcacld-3.0/core/hdd/inc/wlan_hdd_cfg.h b/drivers/staging/qcacld-3.0/core/hdd/inc/wlan_hdd_cfg.h
index f27b630..47e27d8 100644
--- a/drivers/staging/qcacld-3.0/core/hdd/inc/wlan_hdd_cfg.h
+++ b/drivers/staging/qcacld-3.0/core/hdd/inc/wlan_hdd_cfg.h
@@ -1976,6 +1976,36 @@
/*
* <ini>
+ * roam_bad_rssi_thresh_offset_2g - RSSI threshold offset for 2G to 5G roam
+ * @Min: 0
+ * @Max: 86
+ * @Default: 40
+ *
+ * If the DUT is connected to an AP with weak signal in 2G band, then the
+ * bad RSSI offset for 2g would be used as offset from the bad RSSI
+ * threshold configured and then use the resulting rssi for an opportunity
+ * to use the scan results from other scan clients and try to roam to
+ * 5G Band ONLY if there is a better AP available in the environment.
+ *
+ * For example if the roam_bg_scan_bad_rssi_thresh is -76 and
+ * roam_bad_rssi_thresh_offset_2g is 40 then the difference of -36 would be
+ * used as a trigger to roam to a 5G AP if DUT initially connected to a 2G AP
+ *
+ * Related: roam_bg_scan_bad_rssi_thresh
+ *
+ * Supported Feature: Roaming
+ *
+ * Usage: External
+ *
+ * </ini>
+ */
+#define CFG_ROAM_BG_SCAN_BAD_RSSI_OFFSET_2G_NAME "roam_bad_rssi_thresh_offset_2g"
+#define CFG_ROAM_BG_SCAN_BAD_RSSI_OFFSET_2G_MIN (0)
+#define CFG_ROAM_BG_SCAN_BAD_RSSI_OFFSET_2G_MAX (86)
+#define CFG_ROAM_BG_SCAN_BAD_RSSI_OFFSET_2G_DEFAULT (40)
+
+/*
+ * <ini>
* roamscan_adaptive_dwell_mode - Sets dwell time adaptive mode
* @Min: 0
* @Max: 4
@@ -4157,6 +4187,39 @@
#define CFG_NEIGHBOR_LOOKUP_RSSI_THRESHOLD_MAX (120)
#define CFG_NEIGHBOR_LOOKUP_RSSI_THRESHOLD_DEFAULT (78)
+/*
+ * <ini>
+ * lookup_threshold_5g_offset - Lookup Threshold offset for 5G band
+ * @Min: -120
+ * @Max: +120
+ * @Default: 0
+ *
+ * This ini is used to set the 5G band lookup threshold for roaming.
+ * It depends on another INI which is gNeighborLookupThreshold.
+ * gNeighborLookupThreshold is a legacy INI item which will be used to
+ * set the RSSI lookup threshold for both 2G and 5G bands. If the
+ * user wants to setup a different threshold for a 5G band, then user
+ * can use this offset value which will be summed up to the value of
+ * gNeighborLookupThreshold and used for 5G
+ * e.g: gNeighborLookupThreshold = -76dBm
+ * lookup_threshold_5g_offset = 6dBm
+ * Then the 5G band will be configured to -76+6 = -70dBm
+ * A default value of Zero to lookup_threshold_5g_offset will keep the
+ * thresholds same for both 2G and 5G bands
+ *
+ * Related: gNeighborLookupThreshold
+ *
+ * Supported Feature: Roaming
+ *
+ * Usage: Internal/External
+ *
+ * </ini>
+ */
+#define CFG_5G_RSSI_THRESHOLD_OFFSET_NAME "lookup_threshold_5g_offset"
+#define CFG_5G_RSSI_THRESHOLD_OFFSET_MIN (-120)
+#define CFG_5G_RSSI_THRESHOLD_OFFSET_MAX (120)
+#define CFG_5G_RSSI_THRESHOLD_OFFSET_DEFAULT (0)
+
#define CFG_DELAY_BEFORE_VDEV_STOP_NAME "gDelayBeforeVdevStop"
#define CFG_DELAY_BEFORE_VDEV_STOP_MIN (2)
#define CFG_DELAY_BEFORE_VDEV_STOP_MAX (200)
@@ -8092,6 +8155,20 @@
#define CFG_FLOW_STEERING_ENABLED_DEFAULT (0)
/*
+ * Max number of MSDUs per HTT RX IN ORDER INDICATION msg.
+ * Note that this has a direct impact on the size of source CE rings.
+ * It is possible to go below 8, but would require testing; so we are
+ * restricting the lower limit to 8 artificially
+ *
+ * It is recommended that this value is a POWER OF 2.
+ *
+ * Values lower than 8 are for experimental purposes only.
+ */
+#define CFG_MAX_MSDUS_PER_RXIND_NAME "maxMSDUsPerRxInd"
+#define CFG_MAX_MSDUS_PER_RXIND_MIN (4)
+#define CFG_MAX_MSDUS_PER_RXIND_MAX (32)
+#define CFG_MAX_MSDUS_PER_RXIND_DEFAULT (CFG_MAX_MSDUS_PER_RXIND_MAX)
+/*
* In static display use case when APPS is in stand alone power save mode enable
* active offload mode which helps FW to filter out MC/BC data packets to avoid
* APPS wake up and save more power.
@@ -9128,6 +9205,49 @@
#define CFG_RX_MODE_DEFAULT (CFG_ENABLE_RX_THREAD | CFG_ENABLE_NAPI)
#endif
+/*
+ * <ini>
+ * ce_service_max_yield_time - Control to set ce service max yield time (in usec)
+ *
+ * @Min: 500
+ * @Max: 10000
+ * @Default: 10000
+ *
+ * This ini is used to set ce service max yield time (in ms)
+ *
+ * Supported Feature: NAPI
+ *
+ * Usage: Internal
+ *
+ * </ini>
+ */
+#define CFG_CE_SERVICE_MAX_YIELD_TIME_NAME "ce_service_max_yield_time"
+#define CFG_CE_SERVICE_MAX_YIELD_TIME_MIN (500)
+#define CFG_CE_SERVICE_MAX_YIELD_TIME_MAX (10000)
+#define CFG_CE_SERVICE_MAX_YIELD_TIME_DEFAULT (10000)
+
+/*
+ * <ini>
+ * ce_service_max_rx_ind_flush - Control to set ce service max rx ind flush
+ *
+ * @Min: 1
+ * @Max: 32
+ * @Default: 1
+ *
+ * This ini is used to set ce service max rx ind flush
+ *
+ * Supported Feature: NAPI
+ *
+ * Usage: Internal
+ *
+ * </ini>
+ */
+#define CFG_CE_SERVICE_MAX_RX_IND_FLUSH_NAME "ce_service_max_rx_ind_flush"
+#define CFG_CE_SERVICE_MAX_RX_IND_FLUSH_MIN (1)
+#define CFG_CE_SERVICE_MAX_RX_IND_FLUSH_MAX (32)
+#define CFG_CE_SERVICE_MAX_RX_IND_FLUSH_DEFAULT (32)
+
+
/* List of RPS CPU maps for different rx queues registered by WLAN driver
* Ref - Kernel/Documentation/networking/scaling.txt
* RPS CPU map for a particular RX queue, selects CPU(s) for bottom half
@@ -11314,6 +11434,647 @@
#define CFG_DTIM_1CHRX_ENABLE_MAX (1)
#define CFG_DTIM_1CHRX_ENABLE_DEFAULT (1)
+/*
+ * <ini>
+ * rssi_weightage - Rssi Weightage to calculate best candidate
+ * @Min: 0
+ * @Max: 100
+ * @Default: 25
+ *
+ * This ini is used to increase/decrease rssi weightage in best candidate
+ * selection. AP with better RSSI will get more weightage.
+ *
+ * Usage: External
+ *
+ * </ini>
+ */
+
+#define CFG_RSSI_WEIGHTAGE_NAME "rssi_weightage"
+#define CFG_RSSI_WEIGHTAGE_DEFAULT (25)
+#define CFG_RSSI_WEIGHTAGE_MIN (0)
+#define CFG_RSSI_WEIGHTAGE_MAX (100)
+/*
+ * <ini>
+ * ht_caps_weightage - HT caps weightage to calculate best candidate
+ * @Min: 0
+ * @Max: 100
+ * @Default: 7
+ *
+ * This ini is used to increase/decrease HT caps weightage in best candidate
+ * selection. If AP supports HT caps, AP will get additional Weightage with
+ * this param.
+ *
+ * Related: None
+ *
+ * Supported Feature: STA Candidate selection
+ *
+ * Usage: External
+ *
+ * </ini>
+ */
+
+#define CFG_HT_CAPABILITY_WEIGHTAGE_NAME "ht_caps_weightage"
+#define CFG_HT_CAPABILITY_WEIGHTAGE_DEFAULT (7)
+#define CFG_HT_CAPABILITY_WEIGHTAGE_MIN (0)
+#define CFG_HT_CAPABILITY_WEIGHTAGE_MAX (100)
+/*
+ * <ini>
+ * vht_caps_weightage - VHT caps Weightage to calculate best candidate
+ * @Min: 0
+ * @Max: 100
+ * @Default: 5
+ *
+ * This ini is used to increase/decrease VHT caps weightage in best candidate
+ * selection. If AP supports VHT caps, AP will get additional weightage with
+ * this param.
+ *
+ * Related: None
+ *
+ * Supported Feature: STA Candidate selection
+ *
+ * Usage: External
+ *
+ * </ini>
+ */
+
+#define CFG_VHT_CAPABILITY_WEIGHTAGE_NAME "vht_caps_weightage"
+#define CFG_VHT_CAPABILITY_WEIGHTAGE_DEFAULT (5)
+#define CFG_VHT_CAPABILITY_WEIGHTAGE_MIN (0)
+#define CFG_VHT_CAPABILITY_WEIGHTAGE_MAX (100)
+
+/*
+ * <ini>
+ * chan_width_weightage - Channel Width Weightage to calculate best candidate
+ * @Min: 0
+ * @Max: 100
+ * @Default: 5
+ *
+ * This ini is used to increase/decrease Channel Width weightage in best
+ * candidate selection. AP with Higher channel width will get higher weightage.
+ *
+ * Related: None
+ *
+ * Supported Feature: STA Candidate selection
+ *
+ * Usage: External
+ *
+ * </ini>
+ */
+#define CFG_CHAN_WIDTH_WEIGHTAGE_NAME "chan_width_weightage"
+#define CFG_CHAN_WIDTH_WEIGHTAGE_DEFAULT (5)
+#define CFG_CHAN_WIDTH_WEIGHTAGE_MIN (0)
+#define CFG_CHAN_WIDTH_WEIGHTAGE_MAX (100)
+
+/*
+ * <ini>
+ * chan_band_weightage - Channel Band perferance to 5GHZ to
+ * calculate best candidate
+ * @Min: 0
+ * @Max: 100
+ * @Default: 5
+ *
+ * This ini is used to increase/decrease Channel Band Perferance weightage
+ * in best candidate selection. 5GHZ AP get this additional boost compare
+ * to 2GHZ AP.
+ *
+ * Related: None
+ *
+ * Supported Feature: STA Candidate selection
+ *
+ * Usage: External
+ *
+ * </ini>
+ */
+#define CFG_CHAN_BAND_WEIGHTAGE_NAME "chan_band_weightage"
+#define CFG_CHAN_BAND_WEIGHTAGE_DEFAULT (5)
+#define CFG_CHAN_BAND_WEIGHTAGE_MIN (0)
+#define CFG_CHAN_BAND_WEIGHTAGE_MAX (100)
+
+/*
+ * <ini>
+ * nss_weightage - NSS Weightage to calculate best candidate
+ * @Min: 0
+ * @Max: 100
+ * @Default: 5
+ *
+ * This ini is used to increase/decrease NSS weightage in best candidate
+ * selection. If there are two AP, one AP supports 2x2 and another one
+ * supports 1x1 and station supports 2X2, first A will get this additional
+ * weightage.
+ *
+ * Related: None
+ *
+ * Supported Feature: STA Candidate selection
+ *
+ * Usage: External
+ *
+ * </ini>
+ */
+#define CFG_NSS_WEIGHTAGE_NAME "nss_weightage"
+#define CFG_NSS_WEIGHTAGE_DEFAULT (5)
+#define CFG_NSS_WEIGHTAGE_MIN (0)
+#define CFG_NSS_WEIGHTAGE_MAX (100)
+
+/*
+ * <ini>
+ * beamforming_cap_weightage - Beam Forming Weightage to
+ * calculate best candidate
+ * @Min: 0
+ * @Max: 100
+ * @Default: 2
+ *
+ * This ini is used to increase/decrease Beam forming Weightage if
+ * some AP support Beam forming or not. If AP suppoets Beam forming,
+ * that AP will get additional boost of this weightage
+ *
+ * Related: None
+ *
+ * Supported Feature: STA Candidate selection
+ *
+ * Usage: External
+ *
+ * </ini>
+ */
+#define CFG_BEAMFORMING_CAP_WEIGHTAGE_NAME "beamforming_cap_weightage"
+#define CFG_BEAMFORMING_CAP_WEIGHTAGE_DEFAULT (2)
+#define CFG_BEAMFORMING_CAP_WEIGHTAGE_MIN (0)
+#define CFG_BEAMFORMING_CAP_WEIGHTAGE_MAX (100)
+
+/*
+ * <ini>
+ * pcl_weightage - PCL Weightage to calculate best candidate
+ * @Min: 0
+ * @Max: 100
+ * @Default: 10
+ *
+ * This ini is used to increase/decrease PCL weightage in best candidate
+ * selection. If some APs are in PCL list, those AP will get addition
+ * weightage.
+ *
+ * Related: None
+ *
+ * Supported Feature: STA Candidate selection
+ *
+ * Usage: External
+ *
+ * </ini>
+ */
+#define CFG_PCL_WEIGHT_WEIGHTAGE_NAME "pcl_weightage"
+#define CFG_PCL_WEIGHT_DEFAULT (0)
+#define CFG_PCL_WEIGHT_MIN (0)
+#define CFG_PCL_WEIGHT_MAX (100)
+
+/*
+ * <ini>
+ * channel_congestion_weightage - channel Congestion Weightage to
+ * calculate best candidate
+ * @Min: 0
+ * @Max: 100
+ * @Default: 5
+ *
+ * This ini is used to increase/decrease channel congestion weightage
+ * in candidate selection. Congestion is mesaured with the help of ESP/QBSS.
+ * selection.
+ *
+ * Related: None
+ *
+ * Supported Feature: STA Candidate selection
+ *
+ * Usage: External
+ *
+ * </ini>
+ */
+#define CFG_CHANNEL_CONGESTION_WEIGHTAGE_NAME "channel_congestion_weightage"
+#define CFG_CHANNEL_CONGESTION_WEIGHTAGE_DEFAULT (5)
+#define CFG_CHANNEL_CONGESTION_WEIGHTAGE_MIN (0)
+#define CFG_CHANNEL_CONGESTION_WEIGHTAGE_MAX (100)
+
+/*
+ * <ini>
+ * best_rssi_threshold - Best Rssi for score calculation
+ * @Min: 0
+ * @Max: 127
+ * @Default: 55
+ *
+ * This ini tells limit for best RSSI. RSSI better than this limit
+ * are considered as best RSSI.
+ *
+ * Related: None
+ *
+ * Supported Feature: STA Candidate selection
+ *
+ * Usage: External
+ *
+ * </ini>
+ */
+#define CFG_BEST_RSSI_THRESHOLD_NAME "best_rssi_threshold"
+#define CFG_BEST_RSSI_THRESHOLD_DEFAULT (55)
+#define CFG_BEST_RSSI_THRESHOLD_MIN (0)
+#define CFG_BEST_RSSI_THRESHOLD_MAX (127)
+
+/*
+ * <ini>
+ * good_rssi_threshold - Good Rssi for score calculation
+ * @Min: 0
+ * @Max: 127
+ * @Default: 70
+ *
+ * This ini tells limit for good RSSI. RSSI better than this limit
+ * and less than best_rssi_threshold are considered as good rssi.
+ *
+ * Related: rssi_weightage
+ *
+ * Supported Feature: STA Candidate selection
+ *
+ * Usage: External
+ *
+ * </ini>
+ */
+#define CFG_GOOD_RSSI_THRESHOLD_NAME "good_rssi_threshold"
+#define CFG_GOOD_RSSI_THRESHOLD_DEFAULT (70)
+#define CFG_GOOD_RSSI_THRESHOLD_MIN (0)
+#define CFG_GOOD_RSSI_THRESHOLD_MAX (127)
+
+/*
+ * <ini>
+ * bad_rssi_threshold - Bad Rssi for score calculation
+ * @Min: 0
+ * @Max: 127
+ * @Default: 80
+ *
+ * This ini tells limit for Bad RSSI. RSSI greater then bad_rssi_threshold
+ * are considered as bad rssi.
+ *
+ * Related: rssi_weightage
+ *
+ * Supported Feature: STA Candidate selection
+ *
+ * Usage: External
+ *
+ * </ini>
+ */
+#define CFG_BAD_RSSI_THRESHOLD_NAME "bad_rssi_threshold"
+#define CFG_BAD_RSSI_THRESHOLD_DEFAULT (80)
+#define CFG_BAD_RSSI_THRESHOLD_MIN (0)
+#define CFG_BAD_RSSI_THRESHOLD_MAX (127)
+
+/*
+ * <ini>
+ * good_rssi_pcnt - Percent Score to Good RSSI out of total RSSI score.
+ * @Min: 0
+ * @Max: 100
+ * @Default: 80
+ *
+ * This ini tells about how much percent should be given to good RSSI
+ * out of RSSI weightage.
+ *
+ * Related: rssi_weightage
+ *
+ * Supported Feature: STA Candidate selection
+ *
+ * Usage: External
+ *
+ * </ini>
+ */
+#define CFG_GOOD_RSSI_PCNT_NAME "good_rssi_pcnt"
+#define CFG_GOOD_RSSI_PCNT_DEFAULT (80)
+#define CFG_GOOD_RSSI_PCNT_MIN (0)
+#define CFG_GOOD_RSSI_PCNT_MAX (100)
+
+/*
+ * <ini>
+ * bad_rssi_pcnt - Percent Score to BAD RSSI out of total RSSI score.
+ * @Min: 0
+ * @Max: 100
+ * @Default: 25
+ *
+ * This ini tells about how much percent should be given to bad RSSI
+ * out of RSSI weightage.
+ *
+ * Related: rssi_weightage
+ *
+ * Supported Feature: STA Candidate selection
+ *
+ * Usage: External
+ *
+ * </ini>
+ */
+#define CFG_BAD_RSSI_PCNT_NAME "bad_rssi_pcnt"
+#define CFG_BAD_RSSI_PCNT_DEFAULT (25)
+#define CFG_BAD_RSSI_PCNT_MIN (0)
+#define CFG_BAD_RSSI_PCNT_MAX (100)
+
+/*
+ * <ini>
+ * good_rssi_bucket_size - Bucket size between best and good RSSI to score.
+ * @Min: 1
+ * @Max: 100
+ * @Default: 5
+ *
+ * This ini tells about bucket size for scoring between best and good RSSI.
+ * Below Best RSSI, 100% score will be given. Between best and good rssi, rssi
+ * is divided in buckets and score will be assigned bucket wise.
+ *
+ * Related: rssi_weightage
+ *
+ * Supported Feature: STA Candidate selection
+ *
+ * Usage: External
+ *
+ * </ini>
+ */
+#define CFG_GOOD_RSSI_BUCKET_SIZE_NAME "good_rssi_bucket_size"
+#define CFG_GOOD_RSSI_BUCKET_SIZE_DEFAULT (5)
+#define CFG_GOOD_RSSI_BUCKET_SIZE_MIN (1)
+#define CFG_GOOD_RSSI_BUCKET_SIZE_MAX (100)
+
+/*
+ * <ini>
+ * bad_rssi_bucket_size - Bucket size between good and bad RSSI to score.
+ * @Min: 1
+ * @Max: 5
+ * @Default: 100
+ *
+ * This ini tells about bucket size for scoring between good and bad RSSI.
+ * Between good and bad rssi, rssi is divided in buckets and score will
+ * be assigned bucket wise.
+ *
+ * Related: rssi_weightage
+ *
+ * Supported Feature: STA Candidate selection
+ *
+ * Usage: External
+ *
+ * </ini>
+ */
+#define CFG_BAD_RSSI_BUCKET_SIZE_NAME "bad_rssi_bucket_size"
+#define CFG_BAD_RSSI_BUCKET_SIZE_DEFAULT (5)
+#define CFG_BAD_RSSI_BUCKET_SIZE_MIN (1)
+#define CFG_BAD_RSSI_BUCKET_SIZE_MAX (100)
+
+/*
+ * <ini>
+ * rssi_pref_5g_rssi_thresh - A RSSI threshold above which 5 GHz is not favored
+ * @Min: 0
+ * @Max: 127
+ * @Default: 76
+ *
+ * 5G AP are given chan_band_weightage. This ini tells about RSSI threshold
+ * above which 5GHZ is not favored.
+ *
+ * Related: rssi_weightage
+ *
+ * Supported Feature: STA Candidate selection
+ *
+ * Usage: External
+ *
+ * </ini>
+ */
+#define CFG_RSSI_PERF_5G_THRESHOLD_NAME "rssi_pref_5g_rssi_thresh"
+#define CFG_RSSI_PERF_5G_THRESHOLD_DEFAULT (76)
+#define CFG_RSSI_PERF_5G_THRESHOLD_MIN (0)
+#define CFG_RSSI_PERF_5G_THRESHOLD_MAX (127)
+
+/*
+ * <ini>
+ * bandwidth_weight_per_index - Bandwidth weight per index
+ * @Min: 0x00000000
+ * @Max: 0x64646464
+ * @Default: 0x3C322314
+ *
+ * For Bandwidth chan_width_weightage(10) is given. In this weightage
+ * this ini divide individual index weight as per bandwidth.
+ * Indexes are defined in this way.
+ * 0 Index : 20 MHz - Def 20%
+ * 1 Index : 40 MHz - Def 35%
+ * 2 Index : 80 MHX - Def 50%
+ * 3 Index : 160 MHX - Def 60%
+ *
+ * These percentage values are stored in HEX. For any index max weight can be
+ * 100 so Max value for each index will be 64.
+ *
+ * Related: chan_width_weightage
+ *
+ * Supported Feature: STA Candidate selection
+ *
+ * Usage: External
+ *
+ * </ini>
+ */
+#define CFG_BAND_WIDTH_WEIGHT_PER_INDEX_NAME "bandwidth_weight_per_index"
+#define CFG_BAND_WIDTH_WEIGHT_PER_INDEX_DEFAULT (0x3C322314)
+#define CFG_BAND_WIDTH_WEIGHT_PER_INDEX_MIN (0x00000000)
+#define CFG_BAND_WIDTH_WEIGHT_PER_INDEX_MAX (0x64646464)
+/*
+ * <ini>
+ * nss_weight_per_index - nss weight per index
+ * @Min: 0x00000000
+ * @Max: 0x64646464
+ * @Default: 0x64645046
+ *
+ * For NSS nss_weightage(5) is given. In this weightage
+ * this ini divide individual index weight as per NSS supported.
+ * Indexes are defined in this way.
+ * 0 Index : 1X1 - Def 70%
+ * 1 Index : 2X2 - Def 80%
+ * 2 Index : 3X3 - Def 100%
+ * 3 Index : 4X4 - Def 100%
+ *
+ * These percentage values are stored in HEX. For any index max weight can be
+ * 100 so Max value for each index will be 64.
+ *
+ * Related: nss_weightage
+ *
+ * Supported Feature: STA Candidate selection
+ *
+ * Usage: External
+ *
+ * </ini>
+ */
+#define CFG_NSS_WEIGHT_PER_INDEX_NAME "nss_weight_per_index"
+#define CFG_NSS_WEIGHT_PER_INDEX_DEFAULT (0x64645046)
+#define CFG_NSS_WEIGHT_PER_INDEX_MIN (0x00000000)
+#define CFG_NSS_WEIGHT_PER_INDEX_MAX (0x64646464)
+
+/*
+ * <ini>
+ * band_weight_per_index - Band weight per index
+ * @Min: 0x00000000
+ * @Max: 0x64646464
+ * @Default: 0x0000644B
+ *
+ * For Band chan_band_weightage(5) is given. In this weightage
+ * this ini divide individual index weight as per Band supported.
+ * Indexes are defined in this way.
+ * 0 Index : 2.4GHz - Def 75%
+ * 1 Index : 5GHz - - Def 100%
+ * 2 Index : Reserved
+ * 3 Index : Reserved
+ *
+ * These percentage values are stored in HEX. For any index max weight can be
+ * 100 so Max value for each index will be 64.
+ *
+ * Related: chan_band_weightage, rssi_pref_5g_rssi_thresh
+ *
+ * Supported Feature: STA Candidate selection
+ *
+ * Usage: External
+ *
+ * </ini>
+ */
+#define CFG_BAND_WEIGHT_PER_INDEX_NAME "band_weight_per_index"
+#define CFG_BAND_WEIGHT_PER_INDEX_DEFAULT (0x0000644B)
+#define CFG_BAND_WEIGHT_PER_INDEX_MIN (0x00000000)
+#define CFG_BAND_WEIGHT_PER_INDEX_MAX (0x64646464)
+
+/*
+ * <ini>
+ * num_esp_qbss_slots - number of slots in which the esp/qbss load will
+ * be divided
+ * @Min: 1
+ * @Max: 15
+ * @Default: 4
+ *
+ * number of slots in which the esp/qbss load will be divided.
+ * Max 15. index 0 is used for 'not_present. Num_slot will
+ * equally divide 100. e.g, if num_slot = 4 slot 1 = 0-25%, slot
+ * 2 = 26-50% slot 3 = 51-75%, slot 4 = 76-100%
+ *
+ * Usage: External
+ *
+ * </ini>
+ */
+#define CFG_ESP_QBSS_SLOTS_NAME "num_esp_qbss_slots"
+#define CFG_ESP_QBSS_SLOTS_DEFAULT (4)
+#define CFG_ESP_QBSS_SLOTS_MIN (1)
+#define CFG_ESP_QBSS_SLOTS_MAX (15)
+
+/*
+ * <ini>
+ * esp_qbss_score_idx3_to_0 - esp/qbss load score for slots 0-3
+ * @Min: 0x00000000
+ * @Max: 0x64646464
+ * @Default: 0x19326432
+ *
+ * For esp/qbss load score weightage(5) is given. In this weightage
+ * this ini divide individual index weight as num_esp_qbss_slots.
+ * Indexes are defined in this way.
+ * BITS 0-7 : the scoring when ESP QBSS not present
+ * BITS 8-15 : SLOT_1
+ * BITS 16-23 : SLOT_2
+ * BITS 24-31 : SLOT_3
+ *
+ * These percentage values are stored in HEX. For any slot max weight can be
+ * 100 so Max value for each slot will be 0x64.
+ *
+ * Related: channel_congestion_weightage
+ *
+ * Supported Feature: STA Candidate selection
+ *
+ * Usage: External
+ *
+ * </ini>
+ */
+#define CFG_ESP_QBSS_SCORE_IDX3_TO_0_NAME "esp_qbss_score_idx3_to_0"
+#define CFG_ESP_QBSS_SCORE_IDX3_TO_0_DEFAULT (0x19326432)
+#define CFG_ESP_QBSS_SCORE_IDX3_TO_0_MIN (0x00000000)
+#define CFG_ESP_QBSS_SCORE_IDX3_TO_0_MAX (0x64646464)
+
+/*
+ * <ini>
+ * esp_qbss_score_idx7_to_4 - esp/qbss load score for slots 4-7
+ * @Min: 0x00000000
+ * @Max: 0x64646464
+ * @Default: 0x00000019
+ *
+ * For esp/qbss load score weightage(5) is given. In this weightage
+ * this ini divide individual index weight as num_esp_qbss_slots.
+ * Indexes are defined in this way.
+ * BITS 0-7 : SLOT_4
+ * BITS 8-15 : SLOT_5
+ * BITS 16-23 : SLOT_6
+ * BITS 24-31 : SLOT_7
+ *
+ * These percentage values are stored in HEX. For any slot max weight can be
+ * 100 so Max value for each slot will be 0x64.
+ *
+ * Related: num_esp_qbss_slots, channel_congestion_weightage
+ *
+ * Supported Feature: STA Candidate selection
+ *
+ * Usage: External
+ *
+ * </ini>
+ */
+#define CFG_ESP_QBSS_SCORE_IDX7_TO_4_NAME "esp_qbss_score_idx7_to_4"
+#define CFG_ESP_QBSS_SCORE_IDX7_TO_4_DEFAULT (0x00000019)
+#define CFG_ESP_QBSS_SCORE_IDX7_TO_4_MIN (0x00000000)
+#define CFG_ESP_QBSS_SCORE_IDX7_TO_4_MAX (0x64646464)
+
+
+/*
+ * <ini>
+ * esp_qbss_score_idx11_to_8 - esp/qbss score for index 8-11
+ * @Min: 0x00000000
+ * @Max: 0x64646464
+ * @Default: 0x00000000
+ *
+ * For esp/qbss load score weightage(5) is given. In this weightage
+ * this ini divide individual index weight as num_esp_qbss_slots.
+ * Indexes are defined in this way.
+ * BITS 0-7 : SLOT_8
+ * BITS 8-15 : SLOT_9
+ * BITS 16-23 : SLOT_10
+ * BITS 24-31 : SLOT_11
+ *
+ * These percentage values are stored in HEX. For any slot max weight can be
+ * 100 so Max value for each slot will be 0x64.
+ *
+ * Related: num_esp_qbss_slots, channel_congestion_weightage
+ *
+ * Supported Feature: STA Candidate selection
+ *
+ * Usage: External
+ *
+ * </ini>
+ */
+#define CFG_ESP_QBSS_SCORE_IDX11_TO_8_NAME "esp_qbss_score_idx11_to_8"
+#define CFG_ESP_QBSS_SCORE_IDX11_TO_8_DEFAULT (0x00000000)
+#define CFG_ESP_QBSS_SCORE_IDX11_TO_8_MIN (0x00000000)
+#define CFG_ESP_QBSS_SCORE_IDX11_TO_8_MAX (0x64646464)
+
+
+/*
+ * <ini>
+ * esp_qbss_score_idx15_to_12 - esp/qbss score for index 12-15
+ * @Min: 0x00000000
+ * @Max: 0x64646464
+ * @Default: 0x00000000
+ *
+ * For esp/qbss load score weightage(5) is given. In this weightage
+ * this ini divide individual index weight as num_esp_qbss_slots.
+ * Indexes are defined in this way.
+ * BITS 0-7 : SLOT_12
+ * BITS 8-15 : SLOT_13
+ * BITS 16-23 : SLOT_14
+ * BITS 24-31 : SLOT_15
+ *
+ * These percentage values are stored in HEX. For any slot max weight can be
+ * 100 so Max value for each slot will be 0x64.
+ *
+ * Related: num_esp_qbss_slots, channel_congestion_weightage
+ *
+ * Supported Feature: STA Candidate selection
+ *
+ * Usage: External
+ *
+ * </ini>
+ */
+#define CFG_ESP_QBSS_SCORE_IDX15_TO_12_NAME "esp_qbss_score_idx15_to_12"
+#define CFG_ESP_QBSS_SCORE_IDX15_TO_12_DEFAULT (0x00000000)
+#define CFG_ESP_QBSS_SCORE_IDX15_TO_12_MIN (0x00000000)
+#define CFG_ESP_QBSS_SCORE_IDX15_TO_12_MAX (0x64646464)
+
/*---------------------------------------------------------------------------
Type declarations
-------------------------------------------------------------------------*/
@@ -11897,6 +12658,7 @@
bool tso_enable;
bool lro_enable;
bool flow_steering_enable;
+ uint8_t max_msdus_per_rxinorderind;
bool active_mode_offload;
bool bpf_packet_filter_enable;
/* parameter for defer timer for enabling TDLS on p2p listen */
@@ -11908,6 +12670,8 @@
#endif
uint8_t dot11p_mode;
uint8_t rx_mode;
+ uint32_t ce_service_max_yield_time;
+ uint8_t ce_service_max_rx_ind_flush;
uint8_t cpu_map_list[CFG_RPS_RX_QUEUE_CPU_MAP_LIST_LEN];
#ifdef FEATURE_WLAN_EXTSCAN
bool extscan_enabled;
@@ -11956,6 +12720,7 @@
bool ignore_peer_ht_opmode;
uint32_t roam_dense_min_aps;
int8_t roam_bg_scan_bad_rssi_thresh;
+ uint8_t roam_bad_rssi_thresh_offset_2g;
uint32_t roam_bg_scan_client_bitmap;
bool enable_edca_params;
uint32_t edca_vo_cwmin;
@@ -12091,7 +12856,6 @@
bool is_force_1x1;
uint16_t num_11b_tx_chains;
uint16_t num_11ag_tx_chains;
-
/* LCA(Last connected AP) disallow configs */
uint32_t disallow_duration;
uint32_t rssi_channel_penalization;
@@ -12102,6 +12866,32 @@
uint8_t upper_brssi_thresh;
uint8_t lower_brssi_thresh;
bool enable_dtim_1chrx;
+ int8_t rssi_thresh_offset_5g;
+ uint8_t rssi_weightage;
+ uint8_t ht_caps_weightage;
+ uint8_t vht_caps_weightage;
+ uint8_t chan_width_weightage;
+ uint8_t chan_band_weightage;
+ uint8_t nss_weightage;
+ uint8_t beamforming_cap_weightage;
+ uint8_t pcl_weightage;
+ uint8_t channel_congestion_weightage;
+ uint32_t bandwidth_weight_per_index;
+ uint32_t nss_weight_per_index;
+ uint32_t band_weight_per_index;
+ uint32_t best_rssi_threshold;
+ uint32_t good_rssi_threshold;
+ uint32_t bad_rssi_threshold;
+ uint32_t good_rssi_pcnt;
+ uint32_t bad_rssi_pcnt;
+ uint32_t good_rssi_bucket_size;
+ uint32_t bad_rssi_bucket_size;
+ uint32_t rssi_pref_5g_rssi_thresh;
+ uint8_t num_esp_qbss_slots;
+ uint32_t esp_qbss_score_slots3_to_0;
+ uint32_t esp_qbss_score_slots7_to_4;
+ uint32_t esp_qbss_score_slots11_to_8;
+ uint32_t esp_qbss_score_slots15_to_12;
};
#define VAR_OFFSET(_Struct, _Var) (offsetof(_Struct, _Var))
diff --git a/drivers/staging/qcacld-3.0/core/hdd/inc/wlan_hdd_main.h b/drivers/staging/qcacld-3.0/core/hdd/inc/wlan_hdd_main.h
index 51e67bd..16a4843 100644
--- a/drivers/staging/qcacld-3.0/core/hdd/inc/wlan_hdd_main.h
+++ b/drivers/staging/qcacld-3.0/core/hdd/inc/wlan_hdd_main.h
@@ -2502,4 +2502,10 @@
*/
void hdd_dp_trace_init(struct hdd_config *config);
+/**
+ * hdd_pld_ipa_uc_shutdown_pipes() - Disconnect IPA WDI pipes during PDR
+ *
+ * Return: None
+ */
+void hdd_pld_ipa_uc_shutdown_pipes(void);
#endif /* end #if !defined(WLAN_HDD_MAIN_H) */
diff --git a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_cfg.c b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_cfg.c
index 0fa392f..cda41b9 100644
--- a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_cfg.c
+++ b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_cfg.c
@@ -1351,6 +1351,14 @@
CFG_NEIGHBOR_LOOKUP_RSSI_THRESHOLD_MAX,
cb_notify_set_neighbor_lookup_rssi_threshold, 0),
+ REG_VARIABLE(CFG_5G_RSSI_THRESHOLD_OFFSET_NAME,
+ WLAN_PARAM_SignedInteger, struct hdd_config,
+ rssi_thresh_offset_5g,
+ VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
+ CFG_5G_RSSI_THRESHOLD_OFFSET_DEFAULT,
+ CFG_5G_RSSI_THRESHOLD_OFFSET_MIN,
+ CFG_5G_RSSI_THRESHOLD_OFFSET_MAX),
+
REG_DYNAMIC_VARIABLE(CFG_OPPORTUNISTIC_SCAN_THRESHOLD_DIFF_NAME,
WLAN_PARAM_Integer,
struct hdd_config, nOpportunisticThresholdDiff,
@@ -3578,6 +3586,13 @@
CFG_FLOW_STEERING_ENABLED_MIN,
CFG_FLOW_STEERING_ENABLED_MAX),
+ REG_VARIABLE(CFG_MAX_MSDUS_PER_RXIND_NAME, WLAN_PARAM_Integer,
+ struct hdd_config, max_msdus_per_rxinorderind,
+ VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
+ CFG_MAX_MSDUS_PER_RXIND_DEFAULT,
+ CFG_MAX_MSDUS_PER_RXIND_MIN,
+ CFG_MAX_MSDUS_PER_RXIND_MAX),
+
REG_VARIABLE(CFG_ACTIVE_MODE_OFFLOAD, WLAN_PARAM_Integer,
struct hdd_config, active_mode_offload,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
@@ -3903,6 +3918,14 @@
CFG_ROAM_BG_SCAN_CLIENT_BITMAP_MIN,
CFG_ROAM_BG_SCAN_CLIENT_BITMAP_MAX),
+ REG_VARIABLE(CFG_ROAM_BG_SCAN_BAD_RSSI_OFFSET_2G_NAME,
+ WLAN_PARAM_Integer, struct hdd_config,
+ roam_bad_rssi_thresh_offset_2g,
+ VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
+ CFG_ROAM_BG_SCAN_BAD_RSSI_OFFSET_2G_DEFAULT,
+ CFG_ROAM_BG_SCAN_BAD_RSSI_OFFSET_2G_MIN,
+ CFG_ROAM_BG_SCAN_BAD_RSSI_OFFSET_2G_MAX),
+
REG_VARIABLE(CFG_ENABLE_FATAL_EVENT_TRIGGER, WLAN_PARAM_Integer,
struct hdd_config, enable_fatal_event,
VAR_FLAGS_OPTIONAL |
@@ -4155,6 +4178,20 @@
CFG_RX_MODE_MIN,
CFG_RX_MODE_MAX),
+ REG_VARIABLE(CFG_CE_SERVICE_MAX_YIELD_TIME_NAME, WLAN_PARAM_Integer,
+ struct hdd_config, ce_service_max_yield_time,
+ VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
+ CFG_CE_SERVICE_MAX_YIELD_TIME_DEFAULT,
+ CFG_CE_SERVICE_MAX_YIELD_TIME_MIN,
+ CFG_CE_SERVICE_MAX_YIELD_TIME_MAX),
+
+ REG_VARIABLE(CFG_CE_SERVICE_MAX_RX_IND_FLUSH_NAME, WLAN_PARAM_Integer,
+ struct hdd_config, ce_service_max_rx_ind_flush,
+ VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
+ CFG_CE_SERVICE_MAX_RX_IND_FLUSH_DEFAULT,
+ CFG_CE_SERVICE_MAX_RX_IND_FLUSH_MIN,
+ CFG_CE_SERVICE_MAX_RX_IND_FLUSH_MAX),
+
REG_VARIABLE_STRING(CFG_RPS_RX_QUEUE_CPU_MAP_LIST_NAME,
WLAN_PARAM_String,
struct hdd_config, cpu_map_list,
@@ -4744,6 +4781,183 @@
CFG_DTIM_1CHRX_ENABLE_DEFAULT,
CFG_DTIM_1CHRX_ENABLE_MIN,
CFG_DTIM_1CHRX_ENABLE_MAX),
+
+ REG_VARIABLE(CFG_RSSI_WEIGHTAGE_NAME, WLAN_PARAM_Integer,
+ struct hdd_config, rssi_weightage,
+ VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
+ CFG_RSSI_WEIGHTAGE_DEFAULT,
+ CFG_RSSI_WEIGHTAGE_MIN,
+ CFG_RSSI_WEIGHTAGE_MAX),
+
+ REG_VARIABLE(CFG_HT_CAPABILITY_WEIGHTAGE_NAME, WLAN_PARAM_Integer,
+ struct hdd_config, ht_caps_weightage,
+ VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
+ CFG_HT_CAPABILITY_WEIGHTAGE_DEFAULT,
+ CFG_HT_CAPABILITY_WEIGHTAGE_MIN,
+ CFG_HT_CAPABILITY_WEIGHTAGE_MAX),
+
+ REG_VARIABLE(CFG_VHT_CAPABILITY_WEIGHTAGE_NAME, WLAN_PARAM_Integer,
+ struct hdd_config, vht_caps_weightage,
+ VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
+ CFG_VHT_CAPABILITY_WEIGHTAGE_DEFAULT,
+ CFG_VHT_CAPABILITY_WEIGHTAGE_MIN,
+ CFG_VHT_CAPABILITY_WEIGHTAGE_MAX),
+
+ REG_VARIABLE(CFG_CHAN_WIDTH_WEIGHTAGE_NAME, WLAN_PARAM_Integer,
+ struct hdd_config, chan_width_weightage,
+ VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
+ CFG_CHAN_WIDTH_WEIGHTAGE_DEFAULT,
+ CFG_CHAN_WIDTH_WEIGHTAGE_MIN,
+ CFG_CHAN_WIDTH_WEIGHTAGE_MAX),
+
+ REG_VARIABLE(CFG_CHAN_BAND_WEIGHTAGE_NAME, WLAN_PARAM_Integer,
+ struct hdd_config, chan_band_weightage,
+ VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
+ CFG_CHAN_BAND_WEIGHTAGE_DEFAULT,
+ CFG_CHAN_BAND_WEIGHTAGE_MIN,
+ CFG_CHAN_BAND_WEIGHTAGE_MAX),
+
+ REG_VARIABLE(CFG_NSS_WEIGHTAGE_NAME, WLAN_PARAM_Integer,
+ struct hdd_config, nss_weightage,
+ VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
+ CFG_NSS_WEIGHTAGE_DEFAULT,
+ CFG_NSS_WEIGHTAGE_MIN,
+ CFG_NSS_WEIGHTAGE_MAX),
+
+ REG_VARIABLE(CFG_BEAMFORMING_CAP_WEIGHTAGE_NAME, WLAN_PARAM_Integer,
+ struct hdd_config, beamforming_cap_weightage,
+ VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
+ CFG_BEAMFORMING_CAP_WEIGHTAGE_DEFAULT,
+ CFG_BEAMFORMING_CAP_WEIGHTAGE_MIN,
+ CFG_BEAMFORMING_CAP_WEIGHTAGE_MAX),
+
+ REG_VARIABLE(CFG_PCL_WEIGHT_WEIGHTAGE_NAME, WLAN_PARAM_Integer,
+ struct hdd_config, pcl_weightage,
+ VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
+ CFG_PCL_WEIGHT_DEFAULT,
+ CFG_PCL_WEIGHT_MIN,
+ CFG_PCL_WEIGHT_MAX),
+
+ REG_VARIABLE(CFG_CHANNEL_CONGESTION_WEIGHTAGE_NAME, WLAN_PARAM_Integer,
+ struct hdd_config, channel_congestion_weightage,
+ VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
+ CFG_CHANNEL_CONGESTION_WEIGHTAGE_DEFAULT,
+ CFG_CHANNEL_CONGESTION_WEIGHTAGE_MIN,
+ CFG_CHANNEL_CONGESTION_WEIGHTAGE_MAX),
+
+ REG_VARIABLE(CFG_BEST_RSSI_THRESHOLD_NAME, WLAN_PARAM_Integer,
+ struct hdd_config, best_rssi_threshold,
+ VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
+ CFG_BEST_RSSI_THRESHOLD_DEFAULT,
+ CFG_BEST_RSSI_THRESHOLD_MIN,
+ CFG_BEST_RSSI_THRESHOLD_MAX),
+
+ REG_VARIABLE(CFG_GOOD_RSSI_THRESHOLD_NAME, WLAN_PARAM_Integer,
+ struct hdd_config, good_rssi_threshold,
+ VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
+ CFG_GOOD_RSSI_THRESHOLD_DEFAULT,
+ CFG_GOOD_RSSI_THRESHOLD_MIN,
+ CFG_GOOD_RSSI_THRESHOLD_MAX),
+
+ REG_VARIABLE(CFG_BAD_RSSI_THRESHOLD_NAME, WLAN_PARAM_Integer,
+ struct hdd_config, bad_rssi_threshold,
+ VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
+ CFG_BAD_RSSI_THRESHOLD_DEFAULT,
+ CFG_BAD_RSSI_THRESHOLD_MIN,
+ CFG_BAD_RSSI_THRESHOLD_MAX),
+
+ REG_VARIABLE(CFG_GOOD_RSSI_PCNT_NAME, WLAN_PARAM_Integer,
+ struct hdd_config, good_rssi_pcnt,
+ VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
+ CFG_GOOD_RSSI_PCNT_DEFAULT,
+ CFG_GOOD_RSSI_PCNT_MIN,
+ CFG_GOOD_RSSI_PCNT_MAX),
+
+ REG_VARIABLE(CFG_BAD_RSSI_PCNT_NAME, WLAN_PARAM_Integer,
+ struct hdd_config, bad_rssi_pcnt,
+ VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
+ CFG_BAD_RSSI_PCNT_DEFAULT,
+ CFG_BAD_RSSI_PCNT_MIN,
+ CFG_BAD_RSSI_PCNT_MAX),
+
+ REG_VARIABLE(CFG_GOOD_RSSI_BUCKET_SIZE_NAME, WLAN_PARAM_Integer,
+ struct hdd_config, good_rssi_bucket_size,
+ VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
+ CFG_GOOD_RSSI_BUCKET_SIZE_DEFAULT,
+ CFG_GOOD_RSSI_BUCKET_SIZE_MIN,
+ CFG_GOOD_RSSI_BUCKET_SIZE_MAX),
+
+ REG_VARIABLE(CFG_BAD_RSSI_BUCKET_SIZE_NAME, WLAN_PARAM_Integer,
+ struct hdd_config, bad_rssi_bucket_size,
+ VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
+ CFG_BAD_RSSI_BUCKET_SIZE_DEFAULT,
+ CFG_BAD_RSSI_BUCKET_SIZE_MIN,
+ CFG_BAD_RSSI_BUCKET_SIZE_MAX),
+
+ REG_VARIABLE(CFG_RSSI_PERF_5G_THRESHOLD_NAME, WLAN_PARAM_Integer,
+ struct hdd_config, rssi_pref_5g_rssi_thresh,
+ VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
+ CFG_RSSI_PERF_5G_THRESHOLD_DEFAULT,
+ CFG_RSSI_PERF_5G_THRESHOLD_MIN,
+ CFG_RSSI_PERF_5G_THRESHOLD_MAX),
+
+ REG_VARIABLE(CFG_BAND_WIDTH_WEIGHT_PER_INDEX_NAME,
+ WLAN_PARAM_HexInteger,
+ struct hdd_config, bandwidth_weight_per_index,
+ VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
+ CFG_BAND_WIDTH_WEIGHT_PER_INDEX_DEFAULT,
+ CFG_BAND_WIDTH_WEIGHT_PER_INDEX_MIN,
+ CFG_BAND_WIDTH_WEIGHT_PER_INDEX_MAX),
+
+ REG_VARIABLE(CFG_NSS_WEIGHT_PER_INDEX_NAME, WLAN_PARAM_HexInteger,
+ struct hdd_config, nss_weight_per_index,
+ VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
+ CFG_NSS_WEIGHT_PER_INDEX_DEFAULT,
+ CFG_NSS_WEIGHT_PER_INDEX_MIN,
+ CFG_NSS_WEIGHT_PER_INDEX_MAX),
+
+ REG_VARIABLE(CFG_BAND_WEIGHT_PER_INDEX_NAME, WLAN_PARAM_HexInteger,
+ struct hdd_config, band_weight_per_index,
+ VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
+ CFG_BAND_WEIGHT_PER_INDEX_DEFAULT,
+ CFG_BAND_WEIGHT_PER_INDEX_MIN,
+ CFG_BAND_WEIGHT_PER_INDEX_MAX),
+
+ REG_VARIABLE(CFG_ESP_QBSS_SLOTS_NAME, WLAN_PARAM_Integer,
+ struct hdd_config, num_esp_qbss_slots,
+ VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
+ CFG_ESP_QBSS_SLOTS_DEFAULT,
+ CFG_ESP_QBSS_SLOTS_MIN,
+ CFG_ESP_QBSS_SLOTS_MAX),
+
+ REG_VARIABLE(CFG_ESP_QBSS_SCORE_IDX3_TO_0_NAME, WLAN_PARAM_HexInteger,
+ struct hdd_config, esp_qbss_score_slots3_to_0,
+ VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
+ CFG_ESP_QBSS_SCORE_IDX3_TO_0_DEFAULT,
+ CFG_ESP_QBSS_SCORE_IDX3_TO_0_MIN,
+ CFG_ESP_QBSS_SCORE_IDX3_TO_0_MAX),
+
+ REG_VARIABLE(CFG_ESP_QBSS_SCORE_IDX7_TO_4_NAME, WLAN_PARAM_HexInteger,
+ struct hdd_config, esp_qbss_score_slots7_to_4,
+ VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
+ CFG_ESP_QBSS_SCORE_IDX7_TO_4_DEFAULT,
+ CFG_ESP_QBSS_SCORE_IDX7_TO_4_MIN,
+ CFG_ESP_QBSS_SCORE_IDX7_TO_4_MAX),
+
+ REG_VARIABLE(CFG_ESP_QBSS_SCORE_IDX11_TO_8_NAME, WLAN_PARAM_HexInteger,
+ struct hdd_config, esp_qbss_score_slots11_to_8,
+ VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
+ CFG_ESP_QBSS_SCORE_IDX11_TO_8_DEFAULT,
+ CFG_ESP_QBSS_SCORE_IDX11_TO_8_MIN,
+ CFG_ESP_QBSS_SCORE_IDX11_TO_8_MAX),
+
+ REG_VARIABLE(CFG_ESP_QBSS_SCORE_IDX15_TO_12_NAME, WLAN_PARAM_HexInteger,
+ struct hdd_config, esp_qbss_score_slots15_to_12,
+ VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
+ CFG_ESP_QBSS_SCORE_IDX15_TO_12_DEFAULT,
+ CFG_ESP_QBSS_SCORE_IDX15_TO_12_MIN,
+ CFG_ESP_QBSS_SCORE_IDX15_TO_12_MAX),
+
};
/**
@@ -5758,6 +5972,9 @@
hdd_debug("Name = [nNeighborLookupRssiThreshold] Value = [%u] ",
pHddCtx->config->nNeighborLookupRssiThreshold);
+ hdd_debug("Name = [%s] Value = [%d] ",
+ CFG_5G_RSSI_THRESHOLD_OFFSET_NAME,
+ pHddCtx->config->rssi_thresh_offset_5g);
hdd_debug("Name = [delay_before_vdev_stop] Value = [%u] ",
pHddCtx->config->delay_before_vdev_stop);
hdd_debug("Name = [nOpportunisticThresholdDiff] Value = [%u] ",
@@ -6032,6 +6249,12 @@
pHddCtx->config->tso_enable);
hdd_debug("Name = [LROEnable] value = [%d]",
pHddCtx->config->lro_enable);
+ hdd_debug("Name = [%s] value = [%d]",
+ CFG_FLOW_STEERING_ENABLED_NAME,
+ pHddCtx->config->flow_steering_enable);
+ hdd_debug("Name = [%s] value = [%d]",
+ CFG_MAX_MSDUS_PER_RXIND_NAME,
+ pHddCtx->config->max_msdus_per_rxinorderind);
hdd_debug("Name = [active_mode_offload] value = [%d]",
pHddCtx->config->active_mode_offload);
hdd_debug("Name = [gfine_time_meas_cap] value = [%u]",
@@ -6044,6 +6267,12 @@
pHddCtx->config->max_scan_count);
hdd_debug("Name = [%s] value = [%d]",
CFG_RX_MODE_NAME, pHddCtx->config->rx_mode);
+ hdd_debug("Name = [%s] value = [%d]",
+ CFG_CE_SERVICE_MAX_YIELD_TIME_NAME,
+ pHddCtx->config->ce_service_max_yield_time);
+ hdd_debug("Name = [%s] value = [%d]",
+ CFG_CE_SERVICE_MAX_RX_IND_FLUSH_NAME,
+ pHddCtx->config->ce_service_max_rx_ind_flush);
hdd_debug("Name = [%s] Value = [%u]",
CFG_CE_CLASSIFY_ENABLE_NAME,
pHddCtx->config->ce_classify_enabled);
@@ -6117,6 +6346,9 @@
CFG_ROAM_BG_SCAN_CLIENT_BITMAP_NAME,
pHddCtx->config->roam_bg_scan_client_bitmap);
hdd_debug("Name = [%s] Value = [%u]",
+ CFG_ROAM_BG_SCAN_BAD_RSSI_OFFSET_2G_NAME,
+ pHddCtx->config->roam_bad_rssi_thresh_offset_2g);
+ hdd_debug("Name = [%s] Value = [%u]",
CFG_MIN_REST_TIME_NAME,
pHddCtx->config->min_rest_time_conc);
hdd_debug("Name = [%s] Value = [%u]",
@@ -6343,7 +6575,86 @@
pHddCtx->config->lower_brssi_thresh);
hdd_debug("Name = [%s] value = [%u]",
CFG_DTIM_1CHRX_ENABLE_NAME,
- pHddCtx->config->enable_dtim_1chrx);
+ pHddCtx->config->enable_dtim_1chrx);
+ hdd_debug("Name = [%s] value = [%u]",
+ CFG_DOT11P_MODE_NAME,
+ pHddCtx->config->dot11p_mode);
+ hdd_debug("Name = [%s] value = [%u]",
+ CFG_RSSI_WEIGHTAGE_NAME,
+ pHddCtx->config->rssi_weightage);
+ hdd_debug("Name = [%s] value = [%u]",
+ CFG_HT_CAPABILITY_WEIGHTAGE_NAME,
+ pHddCtx->config->ht_caps_weightage);
+ hdd_debug("Name = [%s] value = [%u]",
+ CFG_VHT_CAPABILITY_WEIGHTAGE_NAME,
+ pHddCtx->config->vht_caps_weightage);
+ hdd_debug("Name = [%s] value = [%u]",
+ CFG_CHAN_WIDTH_WEIGHTAGE_NAME,
+ pHddCtx->config->chan_width_weightage);
+ hdd_debug("Name = [%s] value = [%u]",
+ CFG_CHAN_BAND_WEIGHTAGE_NAME,
+ pHddCtx->config->chan_band_weightage);
+ hdd_debug("Name = [%s] value = [%u]",
+ CFG_NSS_WEIGHTAGE_NAME,
+ pHddCtx->config->nss_weightage);
+ hdd_debug("Name = [%s] value = [%u]",
+ CFG_BEAMFORMING_CAP_WEIGHTAGE_NAME,
+ pHddCtx->config->beamforming_cap_weightage);
+ hdd_debug("Name = [%s] value = [%u]",
+ CFG_PCL_WEIGHT_WEIGHTAGE_NAME,
+ pHddCtx->config->pcl_weightage);
+ hdd_debug("Name = [%s] value = [%u]",
+ CFG_CHANNEL_CONGESTION_WEIGHTAGE_NAME,
+ pHddCtx->config->channel_congestion_weightage);
+ hdd_debug("Name = [%s] value = [%u]",
+ CFG_BAND_WIDTH_WEIGHT_PER_INDEX_NAME,
+ pHddCtx->config->bandwidth_weight_per_index);
+ hdd_debug("Name = [%s] value = [%u]",
+ CFG_NSS_WEIGHT_PER_INDEX_NAME,
+ pHddCtx->config->nss_weight_per_index);
+ hdd_debug("Name = [%s] value = [%u]",
+ CFG_BAND_WEIGHT_PER_INDEX_NAME,
+ pHddCtx->config->band_weight_per_index);
+ hdd_debug("Name = [%s] value = [%u]",
+ CFG_BEST_RSSI_THRESHOLD_NAME,
+ pHddCtx->config->best_rssi_threshold);
+ hdd_debug("Name = [%s] value = [%u]",
+ CFG_GOOD_RSSI_THRESHOLD_NAME,
+ pHddCtx->config->good_rssi_threshold);
+ hdd_debug("Name = [%s] value = [%u]",
+ CFG_BAD_RSSI_THRESHOLD_NAME,
+ pHddCtx->config->bad_rssi_threshold);
+ hdd_debug("Name = [%s] value = [%u]",
+ CFG_GOOD_RSSI_PCNT_NAME,
+ pHddCtx->config->good_rssi_pcnt);
+ hdd_debug("Name = [%s] value = [%u]",
+ CFG_BAD_RSSI_PCNT_NAME,
+ pHddCtx->config->bad_rssi_pcnt);
+ hdd_debug("Name = [%s] value = [%u]",
+ CFG_GOOD_RSSI_BUCKET_SIZE_NAME,
+ pHddCtx->config->good_rssi_bucket_size);
+ hdd_debug("Name = [%s] value = [%u]",
+ CFG_BAD_RSSI_BUCKET_SIZE_NAME,
+ pHddCtx->config->bad_rssi_bucket_size);
+ hdd_debug("Name = [%s] value = [%u]",
+ CFG_RSSI_PERF_5G_THRESHOLD_NAME,
+ pHddCtx->config->rssi_pref_5g_rssi_thresh);
+ hdd_debug("Name = [%s] value = [%u]",
+ CFG_ESP_QBSS_SLOTS_NAME,
+ pHddCtx->config->num_esp_qbss_slots);
+ hdd_debug("Name = [%s] value = [%u]",
+ CFG_ESP_QBSS_SCORE_IDX3_TO_0_NAME,
+ pHddCtx->config->esp_qbss_score_slots3_to_0);
+ hdd_debug("Name = [%s] value = [%u]",
+ CFG_ESP_QBSS_SCORE_IDX7_TO_4_NAME,
+ pHddCtx->config->esp_qbss_score_slots7_to_4);
+ hdd_debug("Name = [%s] value = [%u]",
+ CFG_ESP_QBSS_SCORE_IDX11_TO_8_NAME,
+ pHddCtx->config->esp_qbss_score_slots11_to_8);
+ hdd_debug("Name = [%s] value = [%u]",
+ CFG_ESP_QBSS_SCORE_IDX15_TO_12_NAME,
+ pHddCtx->config->esp_qbss_score_slots15_to_12);
+
}
/**
@@ -7593,6 +7904,125 @@
}
/**
+ * hdd_limit_max_per_index_score() -check if per index score doesnt exceed 100%
+ * (0x64). If it exceed make it 100%
+ *
+ * @per_index_score: per_index_score as input
+ *
+ * Return: per_index_score within the max limit
+ */
+static uint32_t hdd_limit_max_per_index_score(uint32_t per_index_score)
+{
+ uint8_t i, score;
+
+ for (i = 0; i < MAX_INDEX_PER_INI; i++) {
+ score = WLAN_GET_SCORE_PERCENTAGE(per_index_score, i);
+ if (score > MAX_INDEX_SCORE)
+ WLAN_SET_SCORE_PERCENTAGE(per_index_score,
+ MAX_INDEX_SCORE, i);
+ }
+
+ return per_index_score;
+}
+
+/**
+ * hdd_update_score_params() -initializes the sme config for bss score params
+ *
+ * @config: pointer to config
+ * @score_params: bss score params
+ *
+ * Return: None
+ */
+static void hdd_update_bss_score_params(struct hdd_config *config,
+ struct sir_score_config *score_params)
+{
+ int total_weight;
+
+ score_params->weight_cfg.rssi_weightage = config->rssi_weightage;
+ score_params->weight_cfg.ht_caps_weightage = config->ht_caps_weightage;
+ score_params->weight_cfg.vht_caps_weightage =
+ config->vht_caps_weightage;
+ score_params->weight_cfg.chan_width_weightage =
+ config->chan_width_weightage;
+ score_params->weight_cfg.chan_band_weightage =
+ config->chan_band_weightage;
+ score_params->weight_cfg.nss_weightage = config->nss_weightage;
+ score_params->weight_cfg.beamforming_cap_weightage =
+ config->beamforming_cap_weightage;
+ score_params->weight_cfg.pcl_weightage = config->pcl_weightage;
+ score_params->weight_cfg.channel_congestion_weightage =
+ config->channel_congestion_weightage;
+
+ total_weight = score_params->weight_cfg.rssi_weightage +
+ score_params->weight_cfg.ht_caps_weightage +
+ score_params->weight_cfg.vht_caps_weightage +
+ score_params->weight_cfg.chan_width_weightage +
+ score_params->weight_cfg.chan_band_weightage +
+ score_params->weight_cfg.nss_weightage +
+ score_params->weight_cfg.beamforming_cap_weightage +
+ score_params->weight_cfg.pcl_weightage +
+ score_params->weight_cfg.channel_congestion_weightage;
+
+ if (total_weight > BEST_CANDIDATE_MAX_WEIGHT) {
+
+ hdd_err("total weight is greater than %d fallback to default values",
+ BEST_CANDIDATE_MAX_WEIGHT);
+
+ score_params->weight_cfg.rssi_weightage = RSSI_WEIGHTAGE;
+ score_params->weight_cfg.ht_caps_weightage =
+ HT_CAPABILITY_WEIGHTAGE;
+ score_params->weight_cfg.vht_caps_weightage = VHT_CAP_WEIGHTAGE;
+ score_params->weight_cfg.chan_width_weightage =
+ CHAN_WIDTH_WEIGHTAGE;
+ score_params->weight_cfg.chan_band_weightage =
+ CHAN_BAND_WEIGHTAGE;
+ score_params->weight_cfg.nss_weightage = NSS_WEIGHTAGE;
+ score_params->weight_cfg.beamforming_cap_weightage =
+ BEAMFORMING_CAP_WEIGHTAGE;
+ score_params->weight_cfg.pcl_weightage = PCL_WEIGHT;
+ score_params->weight_cfg.channel_congestion_weightage =
+ CHANNEL_CONGESTION_WEIGHTAGE;
+ }
+
+ score_params->bandwidth_weight_per_index =
+ hdd_limit_max_per_index_score(
+ config->bandwidth_weight_per_index);
+ score_params->nss_weight_per_index =
+ hdd_limit_max_per_index_score(config->nss_weight_per_index);
+ score_params->band_weight_per_index =
+ hdd_limit_max_per_index_score(config->band_weight_per_index);
+
+ score_params->rssi_score.best_rssi_threshold =
+ config->best_rssi_threshold;
+ score_params->rssi_score.good_rssi_threshold =
+ config->good_rssi_threshold;
+ score_params->rssi_score.bad_rssi_threshold =
+ config->bad_rssi_threshold;
+ score_params->rssi_score.good_rssi_pcnt = config->good_rssi_pcnt;
+ score_params->rssi_score.bad_rssi_pcnt = config->bad_rssi_pcnt;
+ score_params->rssi_score.good_rssi_bucket_size =
+ config->good_rssi_bucket_size;
+ score_params->rssi_score.bad_rssi_bucket_size =
+ config->bad_rssi_bucket_size;
+ score_params->rssi_score.rssi_pref_5g_rssi_thresh =
+ config->rssi_pref_5g_rssi_thresh;
+
+ score_params->esp_qbss_scoring.num_slot = config->num_esp_qbss_slots;
+ score_params->esp_qbss_scoring.score_pcnt3_to_0 =
+ hdd_limit_max_per_index_score(
+ config->esp_qbss_score_slots3_to_0);
+ score_params->esp_qbss_scoring.score_pcnt7_to_4 =
+ hdd_limit_max_per_index_score(
+ config->esp_qbss_score_slots7_to_4);
+ score_params->esp_qbss_scoring.score_pcnt11_to_8 =
+ hdd_limit_max_per_index_score(
+ config->esp_qbss_score_slots11_to_8);
+ score_params->esp_qbss_scoring.score_pcnt15_to_12 =
+ hdd_limit_max_per_index_score(
+ config->esp_qbss_score_slots15_to_12);
+}
+
+/**
* hdd_set_sme_config() -initializes the sme configuration parameters
*
* @pHddCtx: the pointer to hdd context
@@ -7763,6 +8193,8 @@
}
smeConfig->csrConfig.neighborRoamConfig.nNeighborLookupRssiThreshold =
pConfig->nNeighborLookupRssiThreshold;
+ smeConfig->csrConfig.neighborRoamConfig.rssi_thresh_offset_5g =
+ pConfig->rssi_thresh_offset_5g;
smeConfig->csrConfig.neighborRoamConfig.delay_before_vdev_stop =
pConfig->delay_before_vdev_stop;
smeConfig->csrConfig.neighborRoamConfig.nOpportunisticThresholdDiff =
@@ -7907,6 +8339,8 @@
pHddCtx->config->roam_bg_scan_bad_rssi_thresh;
smeConfig->csrConfig.roam_bg_scan_client_bitmap =
pHddCtx->config->roam_bg_scan_client_bitmap;
+ smeConfig->csrConfig.roam_bad_rssi_thresh_offset_2g =
+ pHddCtx->config->roam_bad_rssi_thresh_offset_2g;
smeConfig->csrConfig.obss_width_interval =
pHddCtx->config->obss_width_trigger_interval;
smeConfig->csrConfig.obss_active_dwelltime =
@@ -7987,6 +8421,10 @@
smeConfig->csrConfig.num_11ag_tx_chains =
pHddCtx->config->num_11ag_tx_chains;
+ hdd_update_bss_score_params(pHddCtx->config,
+ &smeConfig->csrConfig.bss_score_params);
+
+
status = sme_update_config(pHddCtx->hHal, smeConfig);
if (!QDF_IS_STATUS_SUCCESS(status))
hdd_err("sme_update_config() failure: %d", status);
diff --git a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_cfg80211.c b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_cfg80211.c
index 14bff0a..a17fa7f 100644
--- a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_cfg80211.c
+++ b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_cfg80211.c
@@ -236,6 +236,9 @@
HDD5GHZCHAN(5785, 157, 0),
HDD5GHZCHAN(5805, 161, 0),
HDD5GHZCHAN(5825, 165, 0),
+};
+
+static const struct ieee80211_channel hdd_channels_dot11p[] = {
HDD5GHZCHAN(5852, 170, 0),
HDD5GHZCHAN(5855, 171, 0),
HDD5GHZCHAN(5860, 172, 0),
@@ -11183,6 +11186,7 @@
{
int i, j;
hdd_context_t *pHddCtx = wiphy_priv(wiphy);
+ int len = 0;
ENTER();
@@ -11325,18 +11329,43 @@
(eHDD_DOT11_MODE_11b_ONLY != pCfg->dot11Mode) &&
(eHDD_DOT11_MODE_11g_ONLY != pCfg->dot11Mode))) {
wiphy->bands[HDD_NL80211_BAND_5GHZ] = &wlan_hdd_band_5_ghz;
- wiphy->bands[HDD_NL80211_BAND_5GHZ]->channels =
+
+ if (pCfg->dot11p_mode) {
+ wiphy->bands[HDD_NL80211_BAND_5GHZ]->channels =
+ qdf_mem_malloc(sizeof(hdd_channels_5_ghz) +
+ sizeof(hdd_channels_dot11p));
+ if (wiphy->bands[HDD_NL80211_BAND_5GHZ]->channels ==
+ NULL) {
+ hdd_err("Not enough memory to for channels");
+ goto mem_fail;
+ }
+ wiphy->bands[HDD_NL80211_BAND_5GHZ]->n_channels =
+ QDF_ARRAY_SIZE(hdd_channels_5_ghz) +
+ QDF_ARRAY_SIZE(hdd_channels_dot11p);
+
+ qdf_mem_copy(wiphy->bands[HDD_NL80211_BAND_5GHZ]->
+ channels, &hdd_channels_5_ghz[0],
+ sizeof(hdd_channels_5_ghz));
+ len = sizeof(hdd_channels_5_ghz);
+ qdf_mem_copy((char *)wiphy->
+ bands[HDD_NL80211_BAND_5GHZ]->channels
+ + len,
+ &hdd_channels_dot11p[0],
+ sizeof(hdd_channels_dot11p));
+
+ } else {
+ wiphy->bands[HDD_NL80211_BAND_5GHZ]->channels =
qdf_mem_malloc(sizeof(hdd_channels_5_ghz));
- if (wiphy->bands[HDD_NL80211_BAND_5GHZ]->channels == NULL) {
- hdd_err("Not enough memory to allocate channels");
- qdf_mem_free(wiphy->
- bands[HDD_NL80211_BAND_2GHZ]->channels);
- wiphy->bands[HDD_NL80211_BAND_2GHZ]->channels = NULL;
- return -ENOMEM;
+ if (wiphy->bands[HDD_NL80211_BAND_5GHZ]->channels ==
+ NULL) {
+ hdd_err("Not enough memory to for channels");
+ goto mem_fail;
+ }
+ qdf_mem_copy(wiphy->
+ bands[HDD_NL80211_BAND_5GHZ]->channels,
+ &hdd_channels_5_ghz[0],
+ sizeof(hdd_channels_5_ghz));
}
- qdf_mem_copy(wiphy->bands[HDD_NL80211_BAND_5GHZ]->channels,
- &hdd_channels_5_ghz[0],
- sizeof(hdd_channels_5_ghz));
}
for (i = 0; i < HDD_NUM_NL80211_BANDS; i++) {
@@ -11413,6 +11442,13 @@
EXIT();
return 0;
+
+mem_fail:
+ if (wiphy->bands[HDD_NL80211_BAND_2GHZ]->channels != NULL) {
+ qdf_mem_free(wiphy->bands[HDD_NL80211_BAND_2GHZ]->channels);
+ wiphy->bands[HDD_NL80211_BAND_2GHZ]->channels = NULL;
+ }
+ return -ENOMEM;
}
/**
diff --git a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_driver_ops.c b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_driver_ops.c
index 1becac1..260ceaf 100644
--- a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_driver_ops.c
+++ b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_driver_ops.c
@@ -123,6 +123,17 @@
}
/**
+ * hdd_is_target_ready() - API to query if target is in ready state
+ * @data: Private Data
+ *
+ * Return: bool
+ */
+static bool hdd_is_target_ready(void *data)
+{
+ return cds_is_target_ready();
+}
+
+/**
* hdd_hif_init_driver_state_callbacks() - API to initialize HIF callbacks
* @data: Private Data
* @cbk: HIF Driver State callbacks
@@ -140,6 +151,7 @@
cbk->is_recovery_in_progress = hdd_is_recovery_in_progress;
cbk->is_load_unload_in_progress = hdd_is_load_or_unload_in_progress;
cbk->is_driver_unloading = hdd_is_driver_unloading;
+ cbk->is_target_ready = hdd_is_target_ready;
}
/**
@@ -254,6 +266,11 @@
}
}
+ hif_set_ce_service_max_yield_time(cds_get_context(QDF_MODULE_ID_HIF),
+ hdd_ctx->config->ce_service_max_yield_time);
+ hif_set_ce_service_max_rx_ind_flush(cds_get_context(QDF_MODULE_ID_HIF),
+ hdd_ctx->config->ce_service_max_rx_ind_flush);
+
return 0;
err_hif_close:
@@ -1200,6 +1217,30 @@
wlan_hdd_notify_handler(state);
}
+static void wlan_hdd_purge_notifier(void)
+{
+ hdd_context_t *hdd_ctx;
+
+ ENTER();
+
+ hdd_ctx = cds_get_context(QDF_MODULE_ID_HDD);
+ if (!hdd_ctx) {
+ hdd_err("hdd context is NULL return!!");
+ return;
+ }
+
+ mutex_lock(&hdd_ctx->iface_change_lock);
+ if (QDF_TIMER_STATE_RUNNING ==
+ qdf_mc_timer_get_current_state(&hdd_ctx->iface_change_timer)) {
+ qdf_mc_timer_stop(&hdd_ctx->iface_change_timer);
+ }
+ cds_shutdown_notifier_call();
+ cds_shutdown_notifier_purge();
+ mutex_unlock(&hdd_ctx->iface_change_lock);
+
+ EXIT();
+}
+
/**
* wlan_hdd_pld_uevent() - update driver status
* @dev: device
@@ -1210,10 +1251,26 @@
static void wlan_hdd_pld_uevent(struct device *dev,
struct pld_uevent_data *uevent)
{
- if (uevent->uevent == PLD_RECOVERY)
+ ENTER();
+
+ switch (uevent->uevent) {
+ case PLD_RECOVERY:
cds_set_recovery_in_progress(true);
- else if (uevent->uevent == PLD_FW_DOWN)
+ hdd_pld_ipa_uc_shutdown_pipes();
+ wlan_hdd_purge_notifier();
+ break;
+ case PLD_FW_DOWN:
cds_set_fw_state(CDS_FW_STATE_DOWN);
+ cds_set_target_ready(false);
+ wlan_hdd_purge_notifier();
+ break;
+ case PLD_FW_READY:
+ cds_set_target_ready(true);
+ break;
+ }
+
+ EXIT();
+ return;
}
#ifdef FEATURE_RUNTIME_PM
diff --git a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_hostapd.c b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_hostapd.c
index f3aaf83..69b07bf 100644
--- a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_hostapd.c
+++ b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_hostapd.c
@@ -1799,8 +1799,7 @@
return QDF_STATUS_E_FAILURE;
}
#ifdef IPA_OFFLOAD
- if (!cds_is_driver_recovering() &&
- hdd_ipa_is_enabled(pHddCtx)) {
+ if (hdd_ipa_is_enabled(pHddCtx)) {
status = hdd_ipa_wlan_evt(pHostapdAdapter, staId,
HDD_IPA_CLIENT_DISCONNECT,
pSapEvent->sapevt.
diff --git a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_ipa.c b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_ipa.c
index 632cd56..eb8845e 100644
--- a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_ipa.c
+++ b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_ipa.c
@@ -1791,6 +1791,13 @@
/* RM PROD request sync return
* enable pipe immediately
*/
+ if (!hdd_ipa->ipa_pipes_down) {
+ HDD_IPA_LOG(QDF_TRACE_LEVEL_DEBUG,
+ "%s: IPA WDI Pipe already activated",
+ __func__);
+ return 0;
+ }
+
if (hdd_ipa_uc_enable_pipes(hdd_ipa)) {
HDD_IPA_LOG(QDF_TRACE_LEVEL_ERROR,
"%s: IPA WDI Pipe activation failed",
@@ -1798,6 +1805,10 @@
hdd_ipa->resource_loading = false;
return -EBUSY;
}
+ } else {
+ HDD_IPA_LOG(QDF_TRACE_LEVEL_INFO,
+ "%s: IPA WDI Pipe activation deferred",
+ __func__);
}
} else {
/* RM Disabled
diff --git a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_main.c b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_main.c
index 490c20f..1b2a0fc 100644
--- a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_main.c
+++ b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_main.c
@@ -368,6 +368,40 @@
return 0;
}
+/**
+ * hdd_wait_for_recovery_completion() - Wait for cds recovery completion
+ *
+ * Block the unloading of the driver (or) interface up until the
+ * cds recovery is completed
+ *
+ * Return: true for recovery completion else false
+ */
+static bool hdd_wait_for_recovery_completion(void)
+{
+ int retry = 0;
+
+ /* Wait for recovery to complete */
+ while (cds_is_driver_recovering()) {
+ if (retry == HDD_MOD_EXIT_SSR_MAX_RETRIES/2)
+ hdd_err("Recovery in progress; wait here!!!");
+ msleep(1000);
+ if (retry++ == HDD_MOD_EXIT_SSR_MAX_RETRIES) {
+ hdd_err("SSR never completed, error");
+ /*
+ * Trigger the bug_on in the internal builds, in the
+ * customer builds self-recovery will be enabled
+ * in those cases just return error.
+ */
+ if (cds_is_self_recovery_enabled())
+ return false;
+ QDF_BUG(0);
+ }
+ }
+
+ hdd_info("Recovery completed successfully!");
+ return true;
+}
+
static int __hdd_netdev_notifier_call(struct notifier_block *nb,
unsigned long state, void *data)
{
@@ -2023,6 +2057,16 @@
MTRACE(qdf_trace(QDF_MODULE_ID_HDD, TRACE_CODE_HDD_OPEN_REQUEST,
adapter->sessionId, adapter->device_mode));
+ /* Nothing to be done if device is unloading */
+ if (cds_is_driver_unloading()) {
+ hdd_err("Driver is unloading can not open the hdd");
+ return -EBUSY;
+ }
+
+ if (!hdd_wait_for_recovery_completion()) {
+ hdd_err("Recovery failed");
+ return -EIO;
+ }
mutex_lock(&hdd_init_deinit_lock);
/*
@@ -5531,6 +5575,7 @@
*/
static void hdd_context_destroy(hdd_context_t *hdd_ctx)
{
+ cds_set_context(QDF_MODULE_ID_HDD, NULL);
hdd_logging_sock_deactivate_svc(hdd_ctx);
@@ -5653,6 +5698,7 @@
}
wlan_destroy_bug_report_lock();
+ unregister_netdevice_notifier(&hdd_netdev_notifier);
/*
* Close the scheduler before calling cds_close to make sure
@@ -5681,9 +5727,6 @@
hdd_runtime_suspend_context_deinit(hdd_ctx);
hdd_close_all_adapters(hdd_ctx, false);
-
- unregister_netdevice_notifier(&hdd_netdev_notifier);
-
hdd_ipa_cleanup(hdd_ctx);
/* Free up RoC request queue and flush workqueue */
@@ -7530,8 +7573,6 @@
hdd_override_ini_config(hdd_ctx);
- ((cds_context_type *) (p_cds_context))->pHDDContext = (void *)hdd_ctx;
-
ret = hdd_context_init(hdd_ctx);
if (ret)
@@ -7563,6 +7604,8 @@
if (cds_is_multicast_logging())
wlan_logging_set_log_level();
+ cds_set_context(QDF_MODULE_ID_HDD, hdd_ctx);
+
skip_multicast_logging:
hdd_set_trace_level_for_each(hdd_ctx);
@@ -8007,6 +8050,8 @@
cds_cfg->max_station = hdd_ctx->config->maxNumberOfPeers;
cds_cfg->sub_20_channel_width = WLAN_SUB_20_CH_WIDTH_NONE;
cds_cfg->flow_steering_enabled = hdd_ctx->config->flow_steering_enable;
+ cds_cfg->max_msdus_per_rxinorderind =
+ hdd_ctx->config->max_msdus_per_rxinorderind;
cds_cfg->self_recovery_enabled = hdd_ctx->config->enableSelfRecovery;
cds_cfg->fw_timeout_crash = hdd_ctx->config->fw_timeout_crash;
cds_cfg->active_uc_bpf_mode = hdd_ctx->config->active_uc_bpf_mode;
@@ -10897,27 +10942,6 @@
return ret;
}
-/**
- * hdd_wait_for_recovery_completion() - Wait for cds recovery completion
- *
- * Block the unloading of the driver until the cds recovery is completed
- *
- * Return: None
- */
-static void hdd_wait_for_recovery_completion(void)
-{
- int retry = 0;
-
- /* Wait for recovery to complete */
- while (cds_is_driver_recovering()) {
- hdd_err("Recovery in progress; wait here!!!");
- msleep(1000);
- if (retry++ == HDD_MOD_EXIT_SSR_MAX_RETRIES) {
- hdd_err("SSR never completed, error");
- QDF_BUG(0);
- }
- }
-}
/**
* __hdd_module_exit - Module exit helper
@@ -11644,6 +11668,15 @@
return 0;
}
+void hdd_pld_ipa_uc_shutdown_pipes(void)
+{
+ hdd_context_t *hdd_ctx = cds_get_context(QDF_MODULE_ID_HDD);
+ if (!hdd_ctx)
+ return;
+
+ hdd_ipa_uc_force_pipe_shutdown(hdd_ctx);
+}
+
/* Register the module init/exit functions */
module_init(hdd_module_init);
module_exit(hdd_module_exit);
diff --git a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_napi.c b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_napi.c
index b2848e7..1a4b4c9 100644
--- a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_napi.c
+++ b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_napi.c
@@ -459,23 +459,17 @@
struct qca_napi_data *napid;
struct qca_napi_info *napii;
struct qca_napi_stat *napis;
- /*
- * Expecting each NAPI bucket item to need at max 5 numerals + space for
- * formatting. For example "10000 " Thus the array needs to have
- * (5 + 1) * QCA_NAPI_NUM_BUCKETS bytes of space. Leaving one space at
- * the end of the "buf" arrary for end of string char.
- */
- char buf[6 * QCA_NAPI_NUM_BUCKETS + 1] = {'\0'};
+#define MAX_SCRATCH_BUFFER_SIZE 1024
+ char buf[MAX_SCRATCH_BUFFER_SIZE] = {'\0'};
napid = hdd_napi_get_all();
if (NULL == napid) {
hdd_err("%s unable to retrieve napi structure", __func__);
return -EFAULT;
}
- qdf_print("[NAPI %u][BL %d]: scheds polls comps done t-lim p-lim corr napi-buckets(%d)",
+ qdf_print("[NAPI %u][BL %d]: scheds polls comps done t-lim p-lim corr max_time",
napid->napi_mode,
- hif_napi_cpu_blacklist(napid, BLACKLIST_QUERY),
- QCA_NAPI_NUM_BUCKETS);
+ hif_napi_cpu_blacklist(napid, BLACKLIST_QUERY));
for (i = 0; i < CE_COUNT_MAX; i++)
if (napid->ce_map & (0x01 << i)) {
@@ -485,17 +479,12 @@
for (j = 0; j < num_possible_cpus(); j++) {
napis = &(napii->stats[j]);
- n = 0;
- max = sizeof(buf);
- for (k = 0; k < QCA_NAPI_NUM_BUCKETS; k++) {
- n += scnprintf(
- buf + n, max - n,
- " %d",
- napis->napi_budget_uses[k]);
- }
- if (napis->napi_schedules != 0)
- qdf_print("NAPI[%2d]CPU[%d]: %7d %7d %7d %7d %5d %5d %5d %s",
+ if (napis->napi_schedules != 0) {
+ n = 0;
+ buf[0] = '\0';
+ max = sizeof(buf);
+ qdf_print("NAPI[%2d]CPU[%d]: %7d %7d %7d %7d %5d %5d %5d %9llu %s",
i, j,
napis->napi_schedules,
napis->napi_polls,
@@ -504,11 +493,35 @@
napis->time_limit_reached,
napis->rxpkt_thresh_reached,
napis->cpu_corrected,
+ napis->napi_max_poll_time,
buf);
+ n = 0;
+ for (k = 0; k < QCA_NAPI_TIME_BUCKETS;
+ k++) {
+ n += scnprintf(
+ buf + n, max - n,
+ " %d", napis->
+ napi_time_budget[k]);
+ }
+ qdf_print("NAPI[%2d]CPU[%d]: TIMES: %s",
+ i, j, buf);
+
+ n = 0;
+ for (k = 0; k < QCA_NAPI_MSG_BUCKETS;
+ k++) {
+ n += scnprintf(
+ buf + n, max - n,
+ " %d", napis->
+ napi_msg_budget[k]);
+ }
+ qdf_print("NAPI[%2d]CPU[%d]: MESGS: %s",
+ i, j, buf);
+ }
}
}
hif_napi_stats(napid);
return 0;
+#undef MAX_SCRATCH_BUFFER_SIZE
}
diff --git a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_p2p.c b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_p2p.c
index f2aae8f..34b9989 100644
--- a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_p2p.c
+++ b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_p2p.c
@@ -116,10 +116,13 @@
"TDLS Peer Traffic Response",
"TDLS Discovery Request"};
-static bool wlan_hdd_is_type_p2p_action(const u8 *buf)
+static bool wlan_hdd_is_type_p2p_action(const u8 *buf, uint32_t len)
{
const u8 *ouiPtr;
+ if (len < WLAN_HDD_PUBLIC_ACTION_FRAME_SUB_TYPE_OFFSET + 1)
+ return false;
+
if (buf[WLAN_HDD_PUBLIC_ACTION_FRAME_CATEGORY_OFFSET] !=
WLAN_HDD_PUBLIC_ACTION_FRAME)
return false;
@@ -140,11 +143,11 @@
return true;
}
-static bool hdd_p2p_is_action_type_rsp(const u8 *buf)
+static bool hdd_p2p_is_action_type_rsp(const u8 *buf, uint32_t len)
{
enum action_frm_type actionFrmType;
- if (wlan_hdd_is_type_p2p_action(buf)) {
+ if (wlan_hdd_is_type_p2p_action(buf, len)) {
actionFrmType =
buf[WLAN_HDD_PUBLIC_ACTION_FRAME_SUB_TYPE_OFFSET];
if (actionFrmType != WLAN_HDD_INVITATION_REQ
@@ -1813,8 +1816,8 @@
hdd_remain_on_chan_ctx_t *pRemainChanCtx;
hdd_context_t *pHddCtx = WLAN_HDD_GET_CTX(pAdapter);
uint16_t extendedWait = 0;
- uint8_t type = WLAN_HDD_GET_TYPE_FRM_FC(buf[0]);
- uint8_t subType = WLAN_HDD_GET_SUBTYPE_FRM_FC(buf[0]);
+ uint8_t type;
+ uint8_t subType;
enum action_frm_type actionFrmType;
bool noack = 0;
int status;
@@ -1823,9 +1826,18 @@
uint16_t current_freq;
uint8_t home_ch = 0;
bool enb_random_mac = false;
+ uint32_t mgmt_hdr_len = sizeof(struct ieee80211_hdr_3addr);
ENTER();
+ if (len < mgmt_hdr_len + 1) {
+ hdd_err("Invalid Length");
+ return -EINVAL;
+ }
+
+ type = WLAN_HDD_GET_TYPE_FRM_FC(buf[0]);
+ subType = WLAN_HDD_GET_SUBTYPE_FRM_FC(buf[0]);
+
if (QDF_GLOBAL_FTM_MODE == hdd_get_conparam()) {
hdd_err("Command not allowed in FTM mode");
return -EINVAL;
@@ -1844,19 +1856,24 @@
if (0 != status)
return status;
- hdd_debug("Device_mode %s(%d) type: %d, wait: %d, offchan: %d, category: %d, actionId: %d",
+ hdd_debug("Device_mode %s(%d) type: %d, wait: %d, offchan: %d",
hdd_device_mode_to_string(pAdapter->device_mode),
- pAdapter->device_mode, type, wait, offchan,
- buf[WLAN_HDD_PUBLIC_ACTION_FRAME_BODY_OFFSET +
- WLAN_HDD_PUBLIC_ACTION_FRAME_CATEGORY_OFFSET],
- buf[WLAN_HDD_PUBLIC_ACTION_FRAME_BODY_OFFSET +
- WLAN_HDD_PUBLIC_ACTION_FRAME_ACTION_OFFSET]);
+ pAdapter->device_mode, type, wait, offchan);
+
+ if (type == SIR_MAC_MGMT_FRAME && subType == SIR_MAC_MGMT_ACTION &&
+ len > IEEE80211_MIN_ACTION_SIZE)
+ hdd_debug("category: %d, actionId: %d",
+ buf[WLAN_HDD_PUBLIC_ACTION_FRAME_BODY_OFFSET +
+ WLAN_HDD_PUBLIC_ACTION_FRAME_CATEGORY_OFFSET],
+ buf[WLAN_HDD_PUBLIC_ACTION_FRAME_BODY_OFFSET +
+ WLAN_HDD_PUBLIC_ACTION_FRAME_ACTION_OFFSET]);
#ifdef WLAN_FEATURE_P2P_DEBUG
if ((type == SIR_MAC_MGMT_FRAME) &&
(subType == SIR_MAC_MGMT_ACTION) &&
wlan_hdd_is_type_p2p_action(&buf
- [WLAN_HDD_PUBLIC_ACTION_FRAME_BODY_OFFSET])) {
+ [WLAN_HDD_PUBLIC_ACTION_FRAME_BODY_OFFSET],
+ len - mgmt_hdr_len)) {
actionFrmType = buf[WLAN_HDD_PUBLIC_ACTION_FRAME_TYPE_OFFSET];
if (actionFrmType >= MAX_P2P_ACTION_FRAME_TYPE) {
hdd_debug("[P2P] unknown[%d] ---> OTA to " MAC_ADDRESS_STR,
@@ -2000,7 +2017,8 @@
if ((type == SIR_MAC_MGMT_FRAME) &&
(subType == SIR_MAC_MGMT_ACTION) &&
hdd_p2p_is_action_type_rsp(&buf
- [WLAN_HDD_PUBLIC_ACTION_FRAME_BODY_OFFSET])
+ [WLAN_HDD_PUBLIC_ACTION_FRAME_BODY_OFFSET],
+ len - mgmt_hdr_len)
&& cfgState->remain_on_chan_ctx
&& cfgState->current_freq == chan->center_freq) {
if (QDF_TIMER_STATE_RUNNING ==
@@ -2184,7 +2202,8 @@
if ((type == SIR_MAC_MGMT_FRAME) &&
(subType == SIR_MAC_MGMT_ACTION) &&
wlan_hdd_is_type_p2p_action(&buf
- [WLAN_HDD_PUBLIC_ACTION_FRAME_BODY_OFFSET])) {
+ [WLAN_HDD_PUBLIC_ACTION_FRAME_BODY_OFFSET],
+ len - mgmt_hdr_len)) {
actionFrmType =
buf[WLAN_HDD_PUBLIC_ACTION_FRAME_TYPE_OFFSET];
hdd_debug("Tx Action Frame %u", actionFrmType);
diff --git a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_power.c b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_power.c
index d5c98f1..51a8ef7 100644
--- a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_power.c
+++ b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_power.c
@@ -1454,6 +1454,7 @@
if (true == pHddCtx->isMcThreadSuspended) {
complete(&cds_sched_context->ResumeMcEvent);
pHddCtx->isMcThreadSuspended = false;
+ pHddCtx->isWiphySuspended = false;
}
#ifdef QCA_CONFIG_SMP
if (true == pHddCtx->is_ol_rx_thread_suspended) {
diff --git a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_regulatory.c b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_regulatory.c
index 3eada3c..c0a1dc2 100644
--- a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_regulatory.c
+++ b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_regulatory.c
@@ -74,10 +74,11 @@
static const struct ieee80211_regdomain
hdd_world_regrules_60_61_62 = {
- .n_reg_rules = 4,
+ .n_reg_rules = 5,
.alpha2 = "00",
.reg_rules = {
REG_RULE_2412_2462,
+ REG_RULE_2467_2472,
REG_RULE_5180_5320,
REG_RULE_5500_5700,
REG_RULE_5745_5825,
diff --git a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_wmm.c b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_wmm.c
index 77ed25b9..a5b51c6 100644
--- a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_wmm.c
+++ b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_wmm.c
@@ -63,6 +63,7 @@
#include <cds_sched.h>
#include "sme_api.h"
+#define WLAN_HDD_HIPRI_TOS 0xc0
#define WLAN_HDD_MAX_DSCP 0x3f
#define HDD_WMM_UP_TO_AC_MAP_SIZE 8
@@ -1411,6 +1412,22 @@
return QDF_STATUS_SUCCESS;
}
+static inline unsigned char hdd_wmm_check_ip_proto(unsigned char ip_proto,
+ unsigned char ip_tos,
+ bool *is_hipri)
+{
+ switch (ip_proto) {
+ case IPPROTO_ICMP:
+ case IPPROTO_ICMPV6:
+ *is_hipri = true;
+ return WLAN_HDD_HIPRI_TOS;
+
+ default:
+ *is_hipri = false;
+ return ip_tos;
+ }
+}
+
/**
* hdd_wmm_classify_pkt() - Function which will classify an OS packet
* into a WMM AC based on DSCP
@@ -1418,7 +1435,7 @@
* @adapter: adapter upon which the packet is being transmitted
* @skb: pointer to network buffer
* @user_pri: user priority of the OS packet
- * @is_eapol: eapol packet flag
+ * @is_hipri: high priority packet flag
*
* Return: None
*/
@@ -1426,7 +1443,7 @@
void hdd_wmm_classify_pkt(hdd_adapter_t *adapter,
struct sk_buff *skb,
sme_QosWmmUpType *user_pri,
- bool *is_eapol)
+ bool *is_hipri)
{
unsigned char dscp;
unsigned char tos;
@@ -1453,14 +1470,16 @@
if (eth_hdr->eth_II.h_proto == htons(ETH_P_IP)) {
/* case 1: Ethernet II IP packet */
ip_hdr = (struct iphdr *)&pkt[sizeof(eth_hdr->eth_II)];
- tos = ip_hdr->tos;
+ tos = hdd_wmm_check_ip_proto(ip_hdr->protocol, ip_hdr->tos,
+ is_hipri);
#ifdef HDD_WMM_DEBUG
hdd_info("Ethernet II IP Packet, tos is %d", tos);
#endif /* HDD_WMM_DEBUG */
-
} else if (eth_hdr->eth_II.h_proto == htons(ETH_P_IPV6)) {
ipv6hdr = ipv6_hdr(skb);
- tos = ntohs(*(const __be16 *)ipv6hdr) >> 4;
+ tos = hdd_wmm_check_ip_proto(
+ ipv6hdr->nexthdr, ntohs(*(const __be16 *)ipv6hdr) >> 4,
+ is_hipri);
#ifdef HDD_WMM_DEBUG
hdd_info("Ethernet II IPv6 Packet, tos is %d", tos);
#endif /* HDD_WMM_DEBUG */
@@ -1471,7 +1490,8 @@
(eth_hdr->eth_8023.h_proto == htons(ETH_P_IP))) {
/* case 2: 802.3 LLC/SNAP IP packet */
ip_hdr = (struct iphdr *)&pkt[sizeof(eth_hdr->eth_8023)];
- tos = ip_hdr->tos;
+ tos = hdd_wmm_check_ip_proto(ip_hdr->protocol, ip_hdr->tos,
+ is_hipri);
#ifdef HDD_WMM_DEBUG
hdd_info("802.3 LLC/SNAP IP Packet, tos is %d", tos);
#endif /* HDD_WMM_DEBUG */
@@ -1484,7 +1504,8 @@
ip_hdr =
(struct iphdr *)
&pkt[sizeof(eth_hdr->eth_IIv)];
- tos = ip_hdr->tos;
+ tos = hdd_wmm_check_ip_proto(ip_hdr->protocol,
+ ip_hdr->tos, is_hipri);
#ifdef HDD_WMM_DEBUG
hdd_info("Ethernet II VLAN tagged IP Packet, tos is %d",
tos);
@@ -1504,30 +1525,39 @@
ip_hdr =
(struct iphdr *)
&pkt[sizeof(eth_hdr->eth_8023v)];
- tos = ip_hdr->tos;
+ tos = hdd_wmm_check_ip_proto(ip_hdr->protocol,
+ ip_hdr->tos, is_hipri);
#ifdef HDD_WMM_DEBUG
hdd_info("802.3 LLC/SNAP VLAN tagged IP Packet, tos is %d",
tos);
#endif /* HDD_WMM_DEBUG */
} else {
/* default */
+ *is_hipri = false;
+ tos = 0;
#ifdef HDD_WMM_DEBUG
hdd_warn("VLAN tagged Unhandled Protocol, using default tos");
#endif /* HDD_WMM_DEBUG */
- tos = 0;
}
+ } else if (eth_hdr->eth_II.h_proto == htons(HDD_ETHERTYPE_802_1_X)) {
+ *is_hipri = true;
+ tos = WLAN_HDD_HIPRI_TOS;
+#ifdef HDD_WMM_DEBUG
+ hdd_info("802.1x packet, tos is %d", tos);
+#endif /* HDD_WMM_DEBUG */
+ } else if (skb->protocol == htons(ETH_P_ARP)) {
+ *is_hipri = true;
+ tos = WLAN_HDD_HIPRI_TOS;
+#ifdef HDD_WMM_DEBUG
+ hdd_info("ARP packet, tos is %d", tos);
+#endif /* HDD_WMM_DEBUG */
} else {
/* default */
+ *is_hipri = false;
+ tos = 0;
#ifdef HDD_WMM_DEBUG
hdd_warn("Unhandled Protocol, using default tos");
#endif /* HDD_WMM_DEBUG */
- /* Give the highest priority to 802.1x packet */
- if (eth_hdr->eth_II.h_proto ==
- htons(HDD_ETHERTYPE_802_1_X)) {
- tos = 0xC0;
- *is_eapol = true;
- } else
- tos = 0;
}
dscp = (tos >> 2) & 0x3f;
@@ -1557,20 +1587,20 @@
/**
* hdd_get_queue_index() - get queue index
* @up: user priority
- * @is_eapol: is_eapol flag
+ * @is_hipri: high priority packet flag
*
* Return: queue_index
*/
static
-uint16_t hdd_get_queue_index(uint16_t up, bool is_eapol)
+uint16_t hdd_get_queue_index(u16 up, bool is_hipri)
{
- if (qdf_unlikely(is_eapol == true))
+ if (qdf_unlikely(is_hipri))
return HDD_LINUX_AC_HI_PRIO;
return __hdd_get_queue_index(up);
}
#else
static
-uint16_t hdd_get_queue_index(uint16_t up, bool is_eapol)
+uint16_t hdd_get_queue_index(u16 up, bool is_hipri)
{
return __hdd_get_queue_index(up);
}
@@ -1600,7 +1630,7 @@
uint16_t queueIndex;
hdd_adapter_t *adapter = (hdd_adapter_t *) netdev_priv(dev);
hdd_context_t *hddctx = WLAN_HDD_GET_CTX(adapter);
- bool is_eapol = false;
+ bool is_hipri = false;
int status = 0;
status = wlan_hdd_validate_context(hddctx);
@@ -1610,9 +1640,9 @@
}
/* Get the user priority from IP header */
- hdd_wmm_classify_pkt(adapter, skb, &up, &is_eapol);
+ hdd_wmm_classify_pkt(adapter, skb, &up, &is_hipri);
skb->priority = up;
- queueIndex = hdd_get_queue_index(skb->priority, is_eapol);
+ queueIndex = hdd_get_queue_index(skb->priority, is_hipri);
return queueIndex;
}
@@ -1630,9 +1660,9 @@
{
sme_QosWmmUpType up = SME_QOS_WMM_UP_BE;
uint16_t queueIndex;
- hdd_adapter_t *pAdapter = WLAN_HDD_GET_PRIV_PTR(dev);
- bool is_eapol = false;
- hdd_context_t *hdd_ctx = WLAN_HDD_GET_CTX(pAdapter);
+ hdd_adapter_t *adapter = WLAN_HDD_GET_PRIV_PTR(dev);
+ bool is_hipri = false;
+ hdd_context_t *hdd_ctx = WLAN_HDD_GET_CTX(adapter);
int status;
status = wlan_hdd_validate_context(hdd_ctx);
@@ -1642,9 +1672,9 @@
}
/* Get the user priority from IP header */
- hdd_wmm_classify_pkt(pAdapter, skb, &up, &is_eapol);
+ hdd_wmm_classify_pkt(adapter, skb, &up, &is_hipri);
skb->priority = up;
- queueIndex = hdd_get_queue_index(skb->priority, is_eapol);
+ queueIndex = hdd_get_queue_index(skb->priority, is_hipri);
return queueIndex;
}
diff --git a/drivers/staging/qcacld-3.0/core/mac/inc/ani_global.h b/drivers/staging/qcacld-3.0/core/mac/inc/ani_global.h
index 7b202b6..4cf7bc3 100644
--- a/drivers/staging/qcacld-3.0/core/mac/inc/ani_global.h
+++ b/drivers/staging/qcacld-3.0/core/mac/inc/ani_global.h
@@ -964,7 +964,6 @@
bool sta_prefer_80MHz_over_160MHz;
enum country_src reg_hint_src;
uint32_t rx_packet_drop_counter;
- struct candidate_chan_info candidate_channel_info[QDF_MAX_NUM_CHAN];
bool snr_monitor_enabled;
/* channel information callback */
void (*chan_info_cb)(struct scan_chan_info *chan_info);
diff --git a/drivers/staging/qcacld-3.0/core/mac/inc/qwlan_version.h b/drivers/staging/qcacld-3.0/core/mac/inc/qwlan_version.h
index 207cd02..4af1080 100644
--- a/drivers/staging/qcacld-3.0/core/mac/inc/qwlan_version.h
+++ b/drivers/staging/qcacld-3.0/core/mac/inc/qwlan_version.h
@@ -40,10 +40,10 @@
#define QWLAN_VERSION_MAJOR 5
#define QWLAN_VERSION_MINOR 1
-#define QWLAN_VERSION_PATCH 2
-#define QWLAN_VERSION_EXTRA "Y"
-#define QWLAN_VERSION_BUILD 21
+#define QWLAN_VERSION_PATCH 3
+#define QWLAN_VERSION_EXTRA "K"
+#define QWLAN_VERSION_BUILD 01
-#define QWLAN_VERSIONSTR "5.1.2.21Y.1"
+#define QWLAN_VERSIONSTR "5.1.3.01K"
#endif /* QWLAN_VERSION_H */
diff --git a/drivers/staging/qcacld-3.0/core/mac/inc/sir_api.h b/drivers/staging/qcacld-3.0/core/mac/inc/sir_api.h
index ebb3350..fd829e0 100644
--- a/drivers/staging/qcacld-3.0/core/mac/inc/sir_api.h
+++ b/drivers/staging/qcacld-3.0/core/mac/inc/sir_api.h
@@ -805,6 +805,9 @@
#ifdef WLAN_FEATURE_FILS_SK
struct fils_ind_elements fils_info_element;
#endif
+ uint8_t air_time_fraction;
+ uint8_t nss;
+ uint16_t reservedPadding3;
/* Please keep the structure 4 bytes aligned above the ieFields */
uint32_t ieFields[1];
@@ -3111,18 +3114,6 @@
#endif /* FEATURE_WLAN_SCAN_PNO */
-/**
- * struct candidate_chan_info - channel information for candidate
- * @channel_num: channel number.
- * @other_ap_count: other ap count on candidate channel.
- * @max_rssi_on_channel: Max best rssi on candiate channel
- */
-struct candidate_chan_info {
- uint8_t channel_num;
- uint8_t other_ap_count;
- int8_t max_rssi_on_channel;
-};
-
/*
* ALLOWED_ACTION_FRAMES_BITMAP
*
@@ -3271,6 +3262,7 @@
* @is_5g_pref_enabled: 5GHz BSSID preference feature enable/disable.
* @bg_scan_bad_rssi_thresh: Bad RSSI threshold to perform bg scan.
* @bg_scan_client_bitmap: Bitmap to identify the client scans to snoop.
+ * @bad_rssi_thresh_offset_2g: Offset from Bad RSSI threshold for 2G to 5G Roam
*
* This structure holds all the key parameters related to
* initial connection and also roaming connections.
@@ -3300,9 +3292,130 @@
int initial_dense_status;
int traffic_threshold;
int8_t bg_scan_bad_rssi_thresh;
+ uint8_t roam_bad_rssi_thresh_offset_2g;
uint32_t bg_scan_client_bitmap;
};
+
+#define RSSI_WEIGHTAGE 30
+#define HT_CAPABILITY_WEIGHTAGE 7
+#define VHT_CAP_WEIGHTAGE 5
+#define HE_CAP_WEIGHTAGE 2
+#define CHAN_WIDTH_WEIGHTAGE 5
+#define CHAN_BAND_WEIGHTAGE 5
+#define NSS_WEIGHTAGE 5
+#define BEAMFORMING_CAP_WEIGHTAGE 2
+#define PCL_WEIGHT 10
+#define CHANNEL_CONGESTION_WEIGHTAGE 5
+#define OCE_WAN_WEIGHTAGE 2
+#define RESERVED_WEIGHT 22
+#define BEST_CANDIDATE_MAX_WEIGHT 100
+#define MAX_INDEX_SCORE 100
+#define MAX_INDEX_PER_INI 4
+
+
+#define WLAN_GET_BITS(_val, _index, _num_bits) \
+ (((_val) >> (_index)) & ((1 << (_num_bits)) - 1))
+
+#define WLAN_SET_BITS(_var, _index, _num_bits, _val) do { \
+ (_var) &= ~(((1 << (_num_bits)) - 1) << (_index)); \
+ (_var) |= (((_val) & ((1 << (_num_bits)) - 1)) << (_index)); \
+ } while (0)
+
+#define WLAN_GET_SCORE_PERCENTAGE(value32, bw_index) \
+ WLAN_GET_BITS(value32, (8 * (bw_index)), 8)
+#define WLAN_SET_SCORE_PERCENTAGE(value32, score_pcnt, bw_index) \
+ WLAN_SET_BITS(value32, (8 * (bw_index)), 8, score_pcnt)
+
+
+
+/**
+ * struct sir_weight_config - weight params to
+ * calculate best candidate
+ * @rssi_weightage: RSSI weightage
+ * @ht_caps_weightage: HT caps weightage
+ * @vht_caps_weightage: VHT caps weightage
+ * @he_caps_weightage: HE caps weightage
+ * @chan_width_weightage: Channel width weightage
+ * @chan_band_weightage: Channel band weightage
+ * @nss_weightage: NSS weightage
+ * @beamforming_cap_weightage: Beamforming caps weightage
+ * @pcl_weightage: PCL weightage
+ * @channel_congestion_weightage: channel congestion weightage
+ * @oce_wan_weightage: OCE WAN metrics weightage
+ */
+struct sir_weight_config {
+ uint8_t rssi_weightage;
+ uint8_t ht_caps_weightage;
+ uint8_t vht_caps_weightage;
+ uint8_t he_caps_weightage;
+ uint8_t chan_width_weightage;
+ uint8_t chan_band_weightage;
+ uint8_t nss_weightage;
+ uint8_t beamforming_cap_weightage;
+ uint8_t pcl_weightage;
+ uint8_t channel_congestion_weightage;
+ uint8_t oce_wan_weightage;
+};
+
+struct sir_rssi_cfg_score {
+ uint32_t best_rssi_threshold;
+ uint32_t good_rssi_threshold;
+ uint32_t bad_rssi_threshold;
+ uint32_t good_rssi_pcnt;
+ uint32_t bad_rssi_pcnt;
+ uint32_t good_rssi_bucket_size;
+ uint32_t bad_rssi_bucket_size;
+ uint32_t rssi_pref_5g_rssi_thresh;
+};
+
+/**
+ * struct sir_per_slot_scoring - define % score for differents slots for a
+ * scoring param.
+ * num_slot: number of slots in which the param will be divided.
+ * Max 15. index 0 is used for 'not_present. Num_slot will
+ * equally divide 100. e.g, if num_slot = 4 slot 0 = 0-25%, slot
+ * 1 = 26-50% slot 2 = 51-75%, slot 3 = 76-100%
+ * score_pcnt3_to_0: Conatins score percentage for slot 0-3
+ * BITS 0-7 :- the scoring pcnt when not present
+ * BITS 8-15 :- SLOT_1
+ * BITS 16-23 :- SLOT_2
+ * BITS 24-31 :- SLOT_3
+ * score_pcnt7_to_4: Conatins score percentage for slot 4-7
+ * BITS 0-7 :- SLOT_4
+ * BITS 8-15 :- SLOT_5
+ * BITS 16-23 :- SLOT_6
+ * BITS 24-31 :- SLOT_7
+ * score_pcnt11_to_8: Conatins score percentage for slot 8-11
+ * BITS 0-7 :- SLOT_8
+ * BITS 8-15 :- SLOT_9
+ * BITS 16-23 :- SLOT_10
+ * BITS 24-31 :- SLOT_11
+ * score_pcnt15_to_12: Conatins score percentage for slot 12-15
+ * BITS 0-7 :- SLOT_12
+ * BITS 8-15 :- SLOT_13
+ * BITS 16-23 :- SLOT_14
+ * BITS 24-31 :- SLOT_15
+ */
+struct sir_per_slot_scoring {
+ uint32_t num_slot;
+ uint32_t score_pcnt3_to_0;
+ uint32_t score_pcnt7_to_4;
+ uint32_t score_pcnt11_to_8;
+ uint32_t score_pcnt15_to_12;
+};
+
+struct sir_score_config {
+ struct sir_weight_config weight_cfg;
+ struct sir_rssi_cfg_score rssi_score;
+ struct sir_per_slot_scoring esp_qbss_scoring;
+ struct sir_per_slot_scoring oce_wan_scoring;
+ uint32_t bandwidth_weight_per_index;
+ uint32_t nss_weight_per_index;
+ uint32_t band_weight_per_index;
+};
+
+
/**
* struct pmkid_mode_bits - Bit flags for PMKID usage in RSN IE
* @fw_okc: Opportunistic key caching enable in firmware
@@ -3335,6 +3448,7 @@
bool RoamScanOffloadEnabled;
bool MAWCEnabled;
int8_t LookupThreshold;
+ int8_t rssi_thresh_offset_5g;
uint8_t delay_before_vdev_stop;
uint8_t OpportunisticScanThresholdDiff;
uint8_t RoamRescanRssiDiff;
@@ -3387,6 +3501,7 @@
enum wmi_dwelltime_adaptive_mode roamscan_adaptive_dwell_mode;
tSirAddie assoc_ie;
struct lca_disallow_config_params lca_config_params;
+ struct scoring_param score_params;
} tSirRoamOffloadScanReq, *tpSirRoamOffloadScanReq;
typedef struct sSirRoamOffloadScanRsp {
diff --git a/drivers/staging/qcacld-3.0/core/mac/inc/sir_mac_prot_def.h b/drivers/staging/qcacld-3.0/core/mac/inc/sir_mac_prot_def.h
index b1fc79c..f02b5ab 100644
--- a/drivers/staging/qcacld-3.0/core/mac/inc/sir_mac_prot_def.h
+++ b/drivers/staging/qcacld-3.0/core/mac/inc/sir_mac_prot_def.h
@@ -553,6 +553,7 @@
#define SIR_MAC_MAX_NUM_OF_DEFAULT_KEYS 4
#define SIR_MAC_KEY_LENGTH 13 /* WEP Maximum key length size */
#define SIR_MAC_AUTH_CHALLENGE_LENGTH 253
+#define SIR_MAC_SAP_AUTH_CHALLENGE_LENGTH 128
#define SIR_MAC_WEP_IV_LENGTH 4
#define SIR_MAC_WEP_ICV_LENGTH 4
#define SIR_MAC_CHALLENGE_ID_LEN 2
diff --git a/drivers/staging/qcacld-3.0/core/mac/src/cfg/cfgUtil/dot11f.frms b/drivers/staging/qcacld-3.0/core/mac/src/cfg/cfgUtil/dot11f.frms
index d528454..dc2cfeb 100644
--- a/drivers/staging/qcacld-3.0/core/mac/src/cfg/cfgUtil/dot11f.frms
+++ b/drivers/staging/qcacld-3.0/core/mac/src/cfg/cfgUtil/dot11f.frms
@@ -2463,6 +2463,11 @@
version[4];
}
+IE ESP_information (EID_EXTN_ID_ELEMENT) OUI ( 0x0B )
+{
+ variable_data[0..96];
+}
+
IE fils_indication (EID_FILS_INDICATION)
{
// FILS Information element
@@ -3071,6 +3076,7 @@
OPTIE ESEVersion;
OPTIE MBO_IE;
OPTIE QCN_IE;
+ OPTIE ESP_information;
} // End frame Beacon.
// Ok, here's the story on Beacon1 & Beacon2. We presumably beacon a lot
@@ -3161,6 +3167,7 @@
OPTIE QComVendorIE;
OPTIE ESEVersion;
OPTIE QCN_IE;
+ OPTIE ESP_information;
}
// This frame is just Beacon with its Fixed Fields stripped out. It's handy
@@ -3228,6 +3235,7 @@
OPTIE QComVendorIE;
OPTIE MBO_IE;
OPTIE QCN_IE;
+ OPTIE ESP_information;
} // End frame BeaconIEs.
FRAME Disassociation // 7.3.3.3
@@ -3465,6 +3473,7 @@
OPTIE ESEVersion;
OPTIE MBO_IE;
OPTIE QCN_IE;
+ OPTIE ESP_information;
} // End frame ProbeResponse.
FRAME Authentication // 7.2.3.10
diff --git a/drivers/staging/qcacld-3.0/core/mac/src/include/dot11f.h b/drivers/staging/qcacld-3.0/core/mac/src/include/dot11f.h
index fe9bdfd..91eaf9b 100644
--- a/drivers/staging/qcacld-3.0/core/mac/src/include/dot11f.h
+++ b/drivers/staging/qcacld-3.0/core/mac/src/include/dot11f.h
@@ -35,7 +35,7 @@
*
*
* This file was automatically generated by 'framesc'
- * Fri Jun 9 14:23:47 2017 from the following file(s):
+ * Tue Jul 11 14:55:10 2017 from the following file(s):
*
* dot11f.frms
*
@@ -4316,6 +4316,46 @@
}; /* End extern "C". */
#endif /* C++ */
+/* EID 255 (0xff) Extended EID 11 (0x0b) */
+typedef struct sDot11fIEESP_information {
+ uint8_t present;
+ uint8_t num_variable_data;
+ uint8_t variable_data[96];
+} tDot11fIEESP_information;
+
+#define DOT11F_EID_ESP_INFORMATION (255)
+
+/* N.B. These #defines do *not* include the EID & length */
+#define DOT11F_IE_ESP_INFORMATION_MIN_LEN (0)
+
+#define DOT11F_IE_ESP_INFORMATION_MAX_LEN (96)
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* C++ */
+uint32_t dot11f_unpack_ie_ESP_information(
+ tpAniSirGlobal,
+ uint8_t *,
+ uint8_t,
+ tDot11fIEESP_information*,
+ bool);
+
+uint32_t dot11f_pack_ie_ESP_information(
+ tpAniSirGlobal,
+ tDot11fIEESP_information *,
+ uint8_t *,
+ uint32_t,
+ uint32_t*);
+
+uint32_t dot11f_get_packed_ie_ESP_information(
+ tpAniSirGlobal,
+ tDot11fIEESP_information *,
+ uint32_t*);
+
+#ifdef __cplusplus
+}; /* End extern "C". */
+#endif /* C++ */
+
/* EID 127 (0x7f) */
typedef struct sDot11fIEExtCap {
uint8_t present;
@@ -8591,6 +8631,7 @@
tDot11fIEESEVersion ESEVersion;
tDot11fIEMBO_IE MBO_IE;
tDot11fIEQCN_IE QCN_IE;
+ tDot11fIEESP_information ESP_information;
} tDot11fBeacon;
#define DOT11F_BEACON (6)
@@ -8687,6 +8728,7 @@
tDot11fIEQComVendorIE QComVendorIE;
tDot11fIEESEVersion ESEVersion;
tDot11fIEQCN_IE QCN_IE;
+ tDot11fIEESP_information ESP_information;
} tDot11fBeacon2;
#define DOT11F_BEACON2 (8)
@@ -8765,6 +8807,7 @@
tDot11fIEQComVendorIE QComVendorIE;
tDot11fIEMBO_IE MBO_IE;
tDot11fIEQCN_IE QCN_IE;
+ tDot11fIEESP_information ESP_information;
} tDot11fBeaconIEs;
#define DOT11F_BEACONIES (9)
@@ -9180,6 +9223,7 @@
tDot11fIEESEVersion ESEVersion;
tDot11fIEMBO_IE MBO_IE;
tDot11fIEQCN_IE QCN_IE;
+ tDot11fIEESP_information ESP_information;
} tDot11fProbeResponse;
#define DOT11F_PROBERESPONSE (22)
diff --git a/drivers/staging/qcacld-3.0/core/mac/src/include/parser_api.h b/drivers/staging/qcacld-3.0/core/mac/src/include/parser_api.h
index 7f34d3d..fb23b8a 100644
--- a/drivers/staging/qcacld-3.0/core/mac/src/include/parser_api.h
+++ b/drivers/staging/qcacld-3.0/core/mac/src/include/parser_api.h
@@ -192,6 +192,76 @@
struct public_key_identifier key_identifier;
};
#endif
+#define ESP_INFORMATION_LIST_LENGTH 3
+/*
+ * enum access_category: tells about access category in ESP paramameter
+ * @ESP_AC_BK: ESP access category for background
+ * @ESP_AC_BE: ESP access category for best effort
+ * @ESP_AC_VI: ESP access category for video
+ * @ESP_AC_VO: ESP access category for Voice
+ */
+enum access_category {
+ ESP_AC_BK,
+ ESP_AC_BE,
+ ESP_AC_VI,
+ ESP_AC_VO,
+
+};
+/*
+ * struct sir_esp_info: structure for Esp information parameter
+ * @access_category: access category info
+ * @reserved: reserved
+ * @data_format: two bits in length and tells about data format
+ * i.e. 0 = No aggregation is expected to be performed for MSDUs or MPDUs with
+ * the Type subfield equal to Data for the corresponding AC
+ * 1 = A-MSDU aggregation is expected to be performed for MSDUs for the
+ * corresponding AC, but A-MPDU aggregation is not expected to be performed
+ * for MPDUs with the Type subfield equal to Data for the corresponding AC
+ * 2 = A-MPDU aggregation is expected to be performed for MPDUs with the Type
+ * subfield equal to Data for the corresponding AC, but A-MSDU aggregation is
+ * not expected to be performed for MSDUs for the corresponding AC
+ * 3 = A-MSDU aggregation is expected to be performed for MSDUs for the
+ * corresponding AC and A-MPDU aggregation is expected to be performed for
+ * MPDUs with the Type subfield equal to Data for the corresponding AC
+ * @ba_window_size: BA Window Size subfield is three bits in length and
+ * indicates the size of the Block Ack window that is
+ * expected for the corresponding access category
+ * @estimated_air_fraction: Estimated Air Time Fraction subfield is 8 bits in
+ * length and contains an unsigned integer that represents
+ * the predicted percentage of time, linearly scaled with 255 representing
+ * 100%, that a new STA joining the
+ * BSS will be allocated for PPDUs that contain only MPDUs with the Type
+ * subfield equal to Data of the
+ * corresponding access category for that STA.
+ * @ppdu_duration: Data PPDU Duration Target field is 8 bits in length and is
+ * an unsigned integer that indicates the
+ * expected target duration of PPDUs that contain only MPDUs with the Type
+ * subfield equal to Data for the
+ * corresponding access category in units of 50 μs.
+ */
+struct sir_esp_info {
+ uint8_t access_category:2;
+ uint8_t reserved:1;
+ uint8_t data_format:2;
+ uint8_t ba_window_size:3;
+ uint8_t estimated_air_fraction;
+ uint8_t ppdu_duration;
+};
+/*
+ * struct sir_esp_information: struct for ESP information
+ * @is_present: If ESP information is present or not
+ * @esp_info_AC_BK: ESP information related to BK category
+ * @esp_info_AC_BE: ESP information related to BE category
+ * @esp_info_AC_VI: ESP information related to VI category
+ * @esp_info_AC_VO: ESP information related to VO category
+ */
+struct sir_esp_information {
+ bool is_present;
+ struct sir_esp_info esp_info_AC_BK;
+ struct sir_esp_info esp_info_AC_BE;
+ struct sir_esp_info esp_info_AC_VI;
+ struct sir_esp_info esp_info_AC_VO;
+};
/* Structure common to Beacons & Probe Responses */
typedef struct sSirProbeRespBeacon {
@@ -285,6 +355,7 @@
#ifdef WLAN_FEATURE_FILS_SK
struct sir_fils_indication fils_ind;
#endif
+ struct sir_esp_information esp_information;
} tSirProbeRespBeacon, *tpSirProbeRespBeacon;
/* probe Request structure */
diff --git a/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_p2p.c b/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_p2p.c
index 6623123..e95ca17 100644
--- a/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_p2p.c
+++ b/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_p2p.c
@@ -470,7 +470,7 @@
TXRX_FRM_802_11_MGMT,
ANI_TXDIR_TODS, 7, lim_tx_complete,
frame, tx_flag, sme_session_id,
- channel_freq);
+ channel_freq, RATEID_DEFAULT);
if (!mb_msg->noack)
lim_p2p_action_cnf(mac_ctx,
@@ -485,7 +485,7 @@
ANI_TXDIR_TODS, 7, lim_tx_complete,
frame, lim_p2p_action_cnf, tx_flag,
sme_session_id, false,
- channel_freq);
+ channel_freq, RATEID_DEFAULT);
if (!QDF_IS_STATUS_SUCCESS(qdf_status)) {
pe_err("couldn't send action frame");
diff --git a/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_process_auth_frame.c b/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_process_auth_frame.c
index c119c95..d96293f 100644
--- a/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_process_auth_frame.c
+++ b/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_process_auth_frame.c
@@ -199,7 +199,7 @@
*/
if (!QDF_IS_STATUS_SUCCESS(cds_rand_get_bytes(0,
(uint8_t *) challenge_txt_arr,
- SIR_MAC_AUTH_CHALLENGE_LENGTH)))
+ SIR_MAC_SAP_AUTH_CHALLENGE_LENGTH)))
pe_err("Challenge text preparation failed");
challenge = auth_node->challengeText;
qdf_mem_copy(challenge, (uint8_t *)challenge_txt_arr,
@@ -212,10 +212,10 @@
rx_auth_frm_body->authTransactionSeqNumber + 1;
auth_frame->authStatusCode = eSIR_MAC_SUCCESS_STATUS;
auth_frame->type = SIR_MAC_CHALLENGE_TEXT_EID;
- auth_frame->length = SIR_MAC_AUTH_CHALLENGE_LENGTH;
+ auth_frame->length = SIR_MAC_SAP_AUTH_CHALLENGE_LENGTH;
qdf_mem_copy(auth_frame->challengeText,
auth_node->challengeText,
- SIR_MAC_AUTH_CHALLENGE_LENGTH);
+ SIR_MAC_SAP_AUTH_CHALLENGE_LENGTH);
lim_send_auth_mgmt_frame(mac_ctx, auth_frame,
mac_hdr->sa, LIM_NO_WEP_IN_FC,
pe_session);
@@ -272,7 +272,7 @@
{
tpDphHashNode sta_ds_ptr = NULL;
struct tLimPreAuthNode *auth_node;
- uint8_t challenge_txt_arr[SIR_MAC_AUTH_CHALLENGE_LENGTH];
+ uint8_t challenge_txt_arr[SIR_MAC_SAP_AUTH_CHALLENGE_LENGTH];
uint32_t maxnum_preauth;
uint16_t associd = 0;
@@ -869,7 +869,7 @@
*/
if (!qdf_mem_cmp(rx_auth_frm_body->challengeText,
auth_node->challengeText,
- SIR_MAC_AUTH_CHALLENGE_LENGTH)) {
+ SIR_MAC_SAP_AUTH_CHALLENGE_LENGTH)) {
/*
* Challenge match. STA is autheticated
* Delete Authentication response timer if running
@@ -1163,7 +1163,7 @@
goto free;
}
- if (frame_len < LIM_ENCR_AUTH_BODY_LEN) {
+ if (frame_len < LIM_ENCR_AUTH_BODY_LEN_SAP) {
/* Log error */
pe_err("Not enough size: %d to decry rx Auth frm",
frame_len);
diff --git a/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_process_tdls.c b/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_process_tdls.c
index dee1ecb..a697356 100644
--- a/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_process_tdls.c
+++ b/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_process_tdls.c
@@ -669,7 +669,7 @@
lim_mgmt_tdls_tx_complete,
HAL_USE_BD_RATE2_FOR_MANAGEMENT_FRAME |
HAL_USE_PEER_STA_REQUESTED_MASK,
- smeSessionId, false, 0);
+ smeSessionId, false, 0, RATEID_DEFAULT);
if (!QDF_IS_STATUS_SUCCESS(qdf_status)) {
pMac->lim.tdls_frm_session_id = NO_SESSION;
pe_err("could not send TDLS Discovery Request frame");
@@ -975,7 +975,8 @@
lim_tx_complete, pFrame,
lim_mgmt_tdls_tx_complete,
HAL_USE_SELF_STA_REQUESTED_MASK,
- smeSessionId, false, 0);
+ smeSessionId, false, 0,
+ RATEID_DEFAULT);
if (!QDF_IS_STATUS_SUCCESS(qdf_status)) {
pMac->lim.tdls_frm_session_id = NO_SESSION;
pe_err("could not send TDLS Discovery Response frame!");
@@ -1057,7 +1058,7 @@
lim_mgmt_tdls_tx_complete,
HAL_USE_BD_RATE2_FOR_MANAGEMENT_FRAME
| HAL_USE_PEER_STA_REQUESTED_MASK,
- smeSessionId, flag, 0);
+ smeSessionId, flag, 0, RATEID_DEFAULT);
}
#else
@@ -1077,7 +1078,7 @@
lim_mgmt_tdls_tx_complete,
HAL_USE_BD_RATE2_FOR_MANAGEMENT_FRAME
| HAL_USE_PEER_STA_REQUESTED_MASK,
- smeSessionId, false, 0);
+ smeSessionId, false, 0, RATEID_DEFAULT);
}
#endif
diff --git a/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_prop_exts_utils.c b/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_prop_exts_utils.c
index 9f8222c..9b09e6a 100644
--- a/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_prop_exts_utils.c
+++ b/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_prop_exts_utils.c
@@ -110,13 +110,14 @@
#endif
/**
- * lim_get_nss_supported_by_beacon() - finds out nss from beacom
+ * lim_get_nss_supported_by_sta_and_ap() - finds out nss from session
+ * and beacon from AP
* @bcn: beacon structure pointer
* @session: pointer to pe session
*
* Return: number of nss advertised by beacon
*/
-static uint8_t lim_get_nss_supported_by_beacon(tpSchBeaconStruct bcn,
+static uint8_t lim_get_nss_supported_by_sta_and_ap(tpSchBeaconStruct bcn,
tpPESession session)
{
if (session->vhtCapability && bcn->VHTCaps.present) {
@@ -196,7 +197,8 @@
if (mac_ctx->roam.configParam.is_force_1x1 &&
cfg_get_vendor_ie_ptr_from_oui(mac_ctx, SIR_MAC_VENDOR_AP_1_OUI,
SIR_MAC_VENDOR_AP_1_OUI_LEN, p_ie, ie_len) &&
- lim_get_nss_supported_by_beacon(beacon_struct, session) == 2 &&
+ lim_get_nss_supported_by_sta_and_ap(
+ beacon_struct, session) == 2 &&
mac_ctx->lteCoexAntShare &&
IS_24G_CH(session->currentOperChannel)) {
session->supported_nss_1x1 = true;
@@ -205,10 +207,10 @@
pe_debug("For special ap, NSS: %d", session->nss);
}
- if (session->nss > lim_get_nss_supported_by_beacon(beacon_struct,
+ if (session->nss > lim_get_nss_supported_by_sta_and_ap(beacon_struct,
session)) {
- session->nss = lim_get_nss_supported_by_beacon(beacon_struct,
- session);
+ session->nss = lim_get_nss_supported_by_sta_and_ap(
+ beacon_struct, session);
session->vdev_nss = session->nss;
}
diff --git a/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_scan_result_utils.c b/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_scan_result_utils.c
index 79d7acb..94e92d8 100644
--- a/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_scan_result_utils.c
+++ b/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_scan_result_utils.c
@@ -100,6 +100,37 @@
#endif
/**
+ * lim_get_nss_supported_by_ap() - finds out nss from AP
+ * @bcn: beacon structure pointer
+ *
+ * Return: number of nss advertised by AP
+ */
+static uint8_t lim_get_nss_supported_by_ap(tpSirProbeRespBeacon bcn)
+{
+ if (bcn->VHTCaps.present) {
+ if ((bcn->VHTCaps.rxMCSMap & 0xC0) != 0xC0)
+ return 4;
+
+ if ((bcn->VHTCaps.rxMCSMap & 0x30) != 0x30)
+ return 3;
+
+ if ((bcn->VHTCaps.rxMCSMap & 0x0C) != 0x0C)
+ return 2;
+ } else if (bcn->HTCaps.present) {
+ if (bcn->HTCaps.supportedMCSSet[3])
+ return 4;
+
+ if (bcn->HTCaps.supportedMCSSet[2])
+ return 3;
+
+ if (bcn->HTCaps.supportedMCSSet[1])
+ return 2;
+ }
+
+ return 1;
+}
+
+/**
* lim_collect_bss_description()
*
***FUNCTION:
@@ -186,12 +217,40 @@
/* VHT Parameters */
if (pBPR->VHTCaps.present) {
pBssDescr->vht_caps_present = 1;
- if (pBPR->VHTCaps.muBeamformerCap)
+ if (pBPR->VHTCaps.suBeamFormerCap)
pBssDescr->beacomforming_capable = 1;
}
if (pBPR->VHTOperation.present)
if (pBPR->VHTOperation.chanWidth == 1)
pBssDescr->chan_width = eHT_CHANNEL_WIDTH_80MHZ;
+ /*
+ * If the ESP metric is transmitting multiple airtime fractions, then
+ * follow the sequence AC_BE, AC_VI, AC_VO, AC_BK and pick whichever is
+ * the first one available
+ */
+ if (pBPR->esp_information.is_present) {
+ if (pBPR->esp_information.esp_info_AC_BE.access_category
+ == ESP_AC_BE)
+ pBssDescr->air_time_fraction =
+ pBPR->esp_information.esp_info_AC_BE.
+ estimated_air_fraction;
+ else if (pBPR->esp_information.esp_info_AC_VI.access_category
+ == ESP_AC_VI)
+ pBssDescr->air_time_fraction =
+ pBPR->esp_information.esp_info_AC_VI.
+ estimated_air_fraction;
+ else if (pBPR->esp_information.esp_info_AC_VO.access_category
+ == ESP_AC_VO)
+ pBssDescr->air_time_fraction =
+ pBPR->esp_information.esp_info_AC_VO.
+ estimated_air_fraction;
+ else if (pBPR->esp_information.esp_info_AC_BK.access_category
+ == ESP_AC_BK)
+ pBssDescr->air_time_fraction =
+ pBPR->esp_information.esp_info_AC_BK.
+ estimated_air_fraction;
+ }
+ pBssDescr->nss = lim_get_nss_supported_by_ap(pBPR);
if (!pBssDescr->beaconInterval) {
pe_warn("Beacon Interval is ZERO, making it to default 100 "
diff --git a/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_security_utils.h b/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_security_utils.h
index d4b760d..ee42580 100644
--- a/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_security_utils.h
+++ b/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_security_utils.h
@@ -44,6 +44,12 @@
SIR_MAC_WEP_IV_LENGTH + \
SIR_MAC_WEP_ICV_LENGTH)
+#define LIM_ENCR_AUTH_BODY_LEN_SAP (SIR_MAC_AUTH_FRAME_INFO_LEN + \
+ SIR_MAC_SAP_AUTH_CHALLENGE_LENGTH + \
+ SIR_MAC_CHALLENGE_ID_LEN + \
+ SIR_MAC_WEP_IV_LENGTH + \
+ SIR_MAC_WEP_ICV_LENGTH)
+
#define LIM_ENCR_AUTH_INFO_LEN (SIR_MAC_AUTH_FRAME_INFO_LEN +\
SIR_MAC_WEP_IV_LENGTH + \
SIR_MAC_WEP_ICV_LENGTH + \
diff --git a/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_send_frames_host_roam.c b/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_send_frames_host_roam.c
index 1f80e39..868ceaf 100644
--- a/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_send_frames_host_roam.c
+++ b/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_send_frames_host_roam.c
@@ -446,7 +446,7 @@
(uint16_t) (bytes + ft_ies_length),
TXRX_FRM_802_11_MGMT, ANI_TXDIR_TODS, 7,
lim_tx_complete, frame, tx_flag, sme_sessionid,
- 0);
+ 0, RATEID_DEFAULT);
MTRACE(qdf_trace(QDF_MODULE_ID_PE, TRACE_CODE_TX_COMPLETE,
pe_session->peSessionId, qdf_status));
if (!QDF_IS_STATUS_SUCCESS(qdf_status)) {
@@ -766,7 +766,8 @@
wma_tx_frame(pMac, pPacket,
(uint16_t) (sizeof(tSirMacMgmtHdr) + nPayload),
TXRX_FRM_802_11_MGMT, ANI_TXDIR_TODS, 7,
- lim_tx_complete, pFrame, txFlag, smeSessionId, 0);
+ lim_tx_complete, pFrame, txFlag, smeSessionId, 0,
+ RATEID_DEFAULT);
MTRACE(qdf_trace
(QDF_MODULE_ID_PE, TRACE_CODE_TX_COMPLETE,
psessionEntry->peSessionId, qdf_status));
diff --git a/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_send_management_frames.c b/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_send_management_frames.c
index 2b9d70c..cf4b2e4 100644
--- a/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_send_management_frames.c
+++ b/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_send_management_frames.c
@@ -449,7 +449,7 @@
(uint16_t) sizeof(tSirMacMgmtHdr) + payload,
TXRX_FRM_802_11_MGMT, ANI_TXDIR_TODS, 7,
lim_tx_complete, frame, txflag, sme_sessionid,
- 0);
+ 0, RATEID_DEFAULT);
if (!QDF_IS_STATUS_SUCCESS(qdf_status)) {
pe_err("could not send Probe Request frame!");
/* Pkt will be freed up by the callback */
@@ -852,7 +852,7 @@
TXRX_FRM_802_11_MGMT,
ANI_TXDIR_TODS,
7, lim_tx_complete, frame, tx_flag,
- sme_sessionid, 0);
+ sme_sessionid, 0, RATEID_DEFAULT);
/* Pkt will be freed up by the callback */
if (!QDF_IS_STATUS_SUCCESS(qdf_status))
@@ -1066,7 +1066,7 @@
TXRX_FRM_802_11_MGMT,
ANI_TXDIR_TODS,
7, lim_tx_complete, pFrame, txFlag,
- smeSessionId, 0);
+ smeSessionId, 0, RATEID_DEFAULT);
MTRACE(qdf_trace(QDF_MODULE_ID_PE, TRACE_CODE_TX_COMPLETE,
psessionEntry->peSessionId, qdf_status));
@@ -1406,7 +1406,7 @@
TXRX_FRM_802_11_MGMT,
ANI_TXDIR_TODS,
7, lim_tx_complete, frame, tx_flag,
- sme_session, 0);
+ sme_session, 0, RATEID_DEFAULT);
MTRACE(qdf_trace(QDF_MODULE_ID_PE, TRACE_CODE_TX_COMPLETE,
pe_session->peSessionId, qdf_status));
@@ -1553,7 +1553,7 @@
TXRX_FRM_802_11_MGMT,
ANI_TXDIR_TODS,
7, lim_tx_complete, pFrame, txFlag,
- smeSessionId, 0);
+ smeSessionId, 0, RATEID_DEFAULT);
MTRACE(qdf_trace(QDF_MODULE_ID_PE, TRACE_CODE_TX_COMPLETE,
psessionEntry->peSessionId, qdf_status));
/* Pkt will be freed up by the callback */
@@ -1632,6 +1632,7 @@
uint8_t *bcn_ie = NULL;
uint32_t bcn_ie_len = 0;
uint32_t aes_block_size_len = 0;
+ enum rateid min_rid = RATEID_DEFAULT;
if (NULL == pe_session) {
pe_err("pe_session is NULL");
@@ -2029,13 +2030,14 @@
MTRACE(qdf_trace(QDF_MODULE_ID_PE, TRACE_CODE_TX_MGMT,
pe_session->peSessionId, mac_hdr->fc.subType));
+ min_rid = lim_get_min_session_txrate(pe_session);
pe_debug("Sending Association Request length %d to ", bytes);
qdf_status = wma_tx_frameWithTxComplete(mac_ctx, packet,
(uint16_t) (sizeof(tSirMacMgmtHdr) + payload),
TXRX_FRM_802_11_MGMT, ANI_TXDIR_TODS, 7,
lim_tx_complete, frame,
lim_assoc_tx_complete_cnf,
- tx_flag, sme_sessionid, false, 0);
+ tx_flag, sme_sessionid, false, 0, min_rid);
MTRACE(qdf_trace(QDF_MODULE_ID_PE, TRACE_CODE_TX_COMPLETE,
pe_session->peSessionId, qdf_status));
@@ -2120,6 +2122,7 @@
uint8_t sme_sessionid = 0;
uint16_t ft_ies_length = 0, auth_ack_status;
bool challenge_req = false;
+ enum rateid min_rid = RATEID_DEFAULT;
if (NULL == session) {
pe_err("Error: psession Entry is NULL");
@@ -2212,7 +2215,8 @@
challenge_req = true;
body_len = SIR_MAC_AUTH_FRAME_INFO_LEN +
- SIR_MAC_AUTH_CHALLENGE_BODY_LEN;
+ SIR_MAC_SAP_AUTH_CHALLENGE_LENGTH +
+ SIR_MAC_CHALLENGE_ID_LEN;
frame_len = sizeof(tSirMacMgmtHdr) + body_len;
}
break;
@@ -2316,10 +2320,11 @@
*body = auth_frame->length;
body++;
qdf_mem_copy(body, auth_frame->challengeText,
- SIR_MAC_AUTH_CHALLENGE_LENGTH);
- body += SIR_MAC_AUTH_CHALLENGE_LENGTH;
+ SIR_MAC_SAP_AUTH_CHALLENGE_LENGTH);
+ body += SIR_MAC_SAP_AUTH_CHALLENGE_LENGTH;
- body_len -= SIR_MAC_AUTH_CHALLENGE_BODY_LEN;
+ body_len -= SIR_MAC_SAP_AUTH_CHALLENGE_LENGTH +
+ SIR_MAC_CHALLENGE_ID_LEN;
}
}
@@ -2387,12 +2392,15 @@
session->peSessionId, mac_hdr->fc.subType));
mac_ctx->auth_ack_status = LIM_AUTH_ACK_NOT_RCD;
+
+ min_rid = lim_get_min_session_txrate(session);
qdf_status = wma_tx_frameWithTxComplete(mac_ctx, packet,
(uint16_t)frame_len,
TXRX_FRM_802_11_MGMT, ANI_TXDIR_TODS,
7, lim_tx_complete, frame,
lim_auth_tx_complete_cnf,
- tx_flag, sme_sessionid, false, 0);
+ tx_flag, sme_sessionid, false, 0,
+ min_rid);
MTRACE(qdf_trace(QDF_MODULE_ID_PE, TRACE_CODE_TX_COMPLETE,
session->peSessionId, qdf_status));
if (!QDF_IS_STATUS_SUCCESS(qdf_status)) {
@@ -2732,7 +2740,8 @@
TXRX_FRM_802_11_MGMT,
ANI_TXDIR_TODS, 7, lim_tx_complete,
pFrame, lim_disassoc_tx_complete_cnf,
- txFlag, smeSessionId, false, 0);
+ txFlag, smeSessionId, false, 0,
+ RATEID_DEFAULT);
MTRACE(qdf_trace
(QDF_MODULE_ID_PE, TRACE_CODE_TX_COMPLETE,
psessionEntry->peSessionId, qdf_status));
@@ -2762,7 +2771,7 @@
ANI_TXDIR_TODS,
7,
lim_tx_complete, pFrame, txFlag,
- smeSessionId, 0);
+ smeSessionId, 0, RATEID_DEFAULT);
MTRACE(qdf_trace
(QDF_MODULE_ID_PE, TRACE_CODE_TX_COMPLETE,
psessionEntry->peSessionId, qdf_status));
@@ -2907,7 +2916,8 @@
TXRX_FRM_802_11_MGMT,
ANI_TXDIR_TODS, 7, lim_tx_complete,
pFrame, lim_deauth_tx_complete_cnf,
- txFlag, smeSessionId, false, 0);
+ txFlag, smeSessionId, false, 0,
+ RATEID_DEFAULT);
MTRACE(qdf_trace
(QDF_MODULE_ID_PE, TRACE_CODE_TX_COMPLETE,
psessionEntry->peSessionId, qdf_status));
@@ -2950,7 +2960,7 @@
wma_tx_frame(pMac, pPacket, (uint16_t) nBytes,
TXRX_FRM_802_11_MGMT, ANI_TXDIR_IBSS,
7, lim_tx_complete, pFrame, txFlag,
- smeSessionId, 0);
+ smeSessionId, 0, RATEID_DEFAULT);
} else {
#endif
/* Queue Disassociation frame in high priority WQ */
@@ -2958,7 +2968,7 @@
wma_tx_frame(pMac, pPacket, (uint16_t) nBytes,
TXRX_FRM_802_11_MGMT, ANI_TXDIR_TODS,
7, lim_tx_complete, pFrame, txFlag,
- smeSessionId, 0);
+ smeSessionId, 0, RATEID_DEFAULT);
#ifdef FEATURE_WLAN_TDLS
}
#endif
@@ -3088,7 +3098,7 @@
qdf_status =
wma_tx_frame(pMac, pPacket, (uint16_t) nBytes,
TXRX_FRM_802_11_MGMT, ANI_TXDIR_TODS, 7,
- lim_tx_complete, pFrame, 0, 0);
+ lim_tx_complete, pFrame, 0, 0, RATEID_DEFAULT);
MTRACE(qdf_trace
(QDF_MODULE_ID_PE, TRACE_CODE_TX_COMPLETE,
((psessionEntry) ? psessionEntry->peSessionId : NO_SESSION),
@@ -3188,7 +3198,7 @@
qdf_status =
wma_tx_frame(pMac, pPacket, (uint16_t) nBytes,
TXRX_FRM_802_11_MGMT, ANI_TXDIR_TODS, 7,
- lim_tx_complete, pFrame, 0, 0);
+ lim_tx_complete, pFrame, 0, 0, RATEID_DEFAULT);
MTRACE(qdf_trace
(QDF_MODULE_ID_PE, TRACE_CODE_TX_COMPLETE,
((psessionEntry) ? psessionEntry->peSessionId : NO_SESSION),
@@ -3291,7 +3301,7 @@
qdf_status =
wma_tx_frame(pMac, pPacket, (uint16_t) nBytes,
TXRX_FRM_802_11_MGMT, ANI_TXDIR_TODS, 7,
- lim_tx_complete, pFrame, 0, 0);
+ lim_tx_complete, pFrame, 0, 0, RATEID_DEFAULT);
MTRACE(qdf_trace
(QDF_MODULE_ID_PE, TRACE_CODE_TX_COMPLETE,
((psessionEntry) ? psessionEntry->peSessionId : NO_SESSION),
@@ -3421,7 +3431,7 @@
TXRX_FRM_802_11_MGMT,
ANI_TXDIR_TODS,
7, lim_tx_complete, pFrame, txFlag,
- smeSessionId, 0);
+ smeSessionId, 0, RATEID_DEFAULT);
MTRACE(qdf_trace(QDF_MODULE_ID_PE, TRACE_CODE_TX_COMPLETE,
psessionEntry->peSessionId, qdf_status));
if (!QDF_IS_STATUS_SUCCESS(qdf_status)) {
@@ -3548,7 +3558,8 @@
ANI_TXDIR_TODS,
7,
lim_tx_complete, frame,
- txFlag, sme_session_id, 0);
+ txFlag, sme_session_id, 0,
+ RATEID_DEFAULT);
MTRACE(qdf_trace(QDF_MODULE_ID_PE, TRACE_CODE_TX_COMPLETE,
session_entry->peSessionId, qdf_status));
if (!QDF_IS_STATUS_SUCCESS(qdf_status)) {
@@ -3692,7 +3703,7 @@
TXRX_FRM_802_11_MGMT, ANI_TXDIR_TODS,
7, lim_tx_complete, frame,
lim_oper_chan_change_confirm_tx_complete_cnf,
- tx_flag, sme_session_id, false, 0);
+ tx_flag, sme_session_id, false, 0, RATEID_DEFAULT);
MTRACE(qdf_trace(QDF_MODULE_ID_PE, TRACE_CODE_TX_COMPLETE,
session_entry->peSessionId, qdf_status));
@@ -3795,7 +3806,7 @@
TXRX_FRM_802_11_MGMT,
ANI_TXDIR_TODS,
7, lim_tx_complete, pFrame, txFlag,
- smeSessionId, 0);
+ smeSessionId, 0, RATEID_DEFAULT);
MTRACE(qdf_trace(QDF_MODULE_ID_PE, TRACE_CODE_TX_COMPLETE,
psessionEntry->peSessionId, qdf_status));
if (!QDF_IS_STATUS_SUCCESS(qdf_status)) {
@@ -3930,7 +3941,7 @@
TXRX_FRM_802_11_MGMT,
ANI_TXDIR_TODS,
7, lim_tx_complete, pFrame, txFlag,
- smeSessionId, 0);
+ smeSessionId, 0, RATEID_DEFAULT);
MTRACE(qdf_trace(QDF_MODULE_ID_PE, TRACE_CODE_TX_COMPLETE,
psessionEntry->peSessionId, qdf_status));
if (QDF_STATUS_SUCCESS != qdf_status) {
@@ -4077,7 +4088,7 @@
TXRX_FRM_802_11_MGMT,
ANI_TXDIR_TODS,
7, lim_tx_complete, pFrame, txFlag,
- smeSessionId, 0);
+ smeSessionId, 0, RATEID_DEFAULT);
MTRACE(qdf_trace(QDF_MODULE_ID_PE, TRACE_CODE_TX_COMPLETE,
psessionEntry->peSessionId, qdf_status));
if (QDF_STATUS_SUCCESS != qdf_status) {
@@ -4264,7 +4275,7 @@
TXRX_FRM_802_11_MGMT,
ANI_TXDIR_TODS,
7, lim_tx_complete, pFrame, txFlag,
- smeSessionId, 0);
+ smeSessionId, 0, RATEID_DEFAULT);
MTRACE(qdf_trace(QDF_MODULE_ID_PE, TRACE_CODE_TX_COMPLETE,
psessionEntry->peSessionId, qdf_status));
if (QDF_STATUS_SUCCESS != qdf_status) {
@@ -4400,7 +4411,7 @@
TXRX_FRM_802_11_MGMT,
ANI_TXDIR_TODS,
7, lim_tx_complete, pFrame, txFlag,
- smeSessionId, 0);
+ smeSessionId, 0, RATEID_DEFAULT);
if (QDF_STATUS_SUCCESS != qdf_status) {
pe_err("wma_tx_frame FAILED! Status [%d]", qdf_status);
nSirStatus = eSIR_FAILURE;
@@ -4532,7 +4543,7 @@
TXRX_FRM_802_11_MGMT,
ANI_TXDIR_TODS,
7, lim_tx_complete, pFrame, txFlag,
- smeSessionId, 0);
+ smeSessionId, 0, RATEID_DEFAULT);
MTRACE(qdf_trace(QDF_MODULE_ID_PE, TRACE_CODE_TX_COMPLETE,
psessionEntry->peSessionId, qdf_status));
if (QDF_STATUS_SUCCESS != qdf_status) {
diff --git a/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_utils.c b/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_utils.c
index 2ce7428..6e8b859 100644
--- a/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_utils.c
+++ b/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_utils.c
@@ -7245,3 +7245,64 @@
else
return false;
}
+
+uint8_t lim_get_min_session_txrate(tpPESession session)
+{
+ enum rateid rid = RATEID_DEFAULT;
+ uint8_t min_rate = SIR_MAC_RATE_54, curr_rate, i;
+ tSirMacRateSet *rateset = &session->rateSet;
+
+ if (!session)
+ return rid;
+
+ for (i = 0; i < rateset->numRates; i++) {
+ /* Ignore MSB - set to indicate basic rate */
+ curr_rate = rateset->rate[i] & 0x7F;
+ min_rate = (curr_rate < min_rate) ? curr_rate : min_rate;
+ }
+ pe_debug("supported min_rate: %0x(%d)", min_rate, min_rate);
+
+ switch (min_rate) {
+ case SIR_MAC_RATE_1:
+ rid = RATEID_1MBPS;
+ break;
+ case SIR_MAC_RATE_2:
+ rid = RATEID_2MBPS;
+ break;
+ case SIR_MAC_RATE_5_5:
+ rid = RATEID_5_5MBPS;
+ break;
+ case SIR_MAC_RATE_11:
+ rid = RATEID_11MBPS;
+ break;
+ case SIR_MAC_RATE_6:
+ rid = RATEID_6MBPS;
+ break;
+ case SIR_MAC_RATE_9:
+ rid = RATEID_9MBPS;
+ break;
+ case SIR_MAC_RATE_12:
+ rid = RATEID_12MBPS;
+ break;
+ case SIR_MAC_RATE_18:
+ rid = RATEID_18MBPS;
+ break;
+ case SIR_MAC_RATE_24:
+ rid = RATEID_24MBPS;
+ break;
+ case SIR_MAC_RATE_36:
+ rid = RATEID_36MBPS;
+ break;
+ case SIR_MAC_RATE_48:
+ rid = RATEID_48MBPS;
+ break;
+ case SIR_MAC_RATE_54:
+ rid = RATEID_54MBPS;
+ break;
+ default:
+ rid = RATEID_DEFAULT;
+ break;
+ }
+
+ return rid;
+}
diff --git a/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_utils.h b/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_utils.h
index ebd5037..0a842e5 100644
--- a/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_utils.h
+++ b/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_utils.h
@@ -742,4 +742,16 @@
bool lim_check_if_vendor_oui_match(tpAniSirGlobal mac_ctx,
uint8_t *oui, uint8_t oui_len,
uint8_t *ie, uint8_t ie_len);
+
+/**
+ * lim_get_min_session_txrate() - Get the minimum rate supported in the session
+ * @session: Pointer to PE session
+ *
+ * This API will find the minimum rate supported by the given PE session and
+ * return the enum rateid corresponding to the rate.
+ *
+ * Return: enum rateid
+ */
+uint8_t lim_get_min_session_txrate(tpPESession session);
+
#endif /* __LIM_UTILS_H */
diff --git a/drivers/staging/qcacld-3.0/core/mac/src/sys/legacy/src/utils/src/dot11f.c b/drivers/staging/qcacld-3.0/core/mac/src/sys/legacy/src/utils/src/dot11f.c
index 61e897b..366252c 100644
--- a/drivers/staging/qcacld-3.0/core/mac/src/sys/legacy/src/utils/src/dot11f.c
+++ b/drivers/staging/qcacld-3.0/core/mac/src/sys/legacy/src/utils/src/dot11f.c
@@ -33,7 +33,7 @@
*
*
* This file was automatically generated by 'framesc'
- * Fri Jun 9 14:23:47 2017 from the following file(s):
+ * Tue Jul 11 14:55:10 2017 from the following file(s):
*
* dot11f.frms
*
@@ -3047,6 +3047,31 @@
#define SigIeESEVersion (0x0033)
+uint32_t dot11f_unpack_ie_ESP_information(tpAniSirGlobal pCtx,
+ uint8_t *pBuf,
+ uint8_t ielen,
+ tDot11fIEESP_information *pDst,
+ bool append_ie)
+{
+ uint32_t status = DOT11F_PARSE_SUCCESS;
+ (void) pBuf; (void)ielen; /* Shutup the compiler */
+ if (pDst->present)
+ status = DOT11F_DUPLICATE_IE;
+ pDst->present = 1;
+ pDst->num_variable_data = (uint8_t)(ielen);
+ if (ielen > 96) {
+ pDst->present = 0;
+ return DOT11F_SKIPPED_BAD_IE;
+ }
+
+ DOT11F_MEMCPY(pCtx, pDst->variable_data, pBuf, (ielen));
+ (void)pCtx;
+ return status;
+} /* End dot11f_unpack_ie_ESP_information. */
+
+#define SigIeESP_information (0x0034)
+
+
uint32_t dot11f_unpack_ie_ext_cap(tpAniSirGlobal pCtx,
uint8_t *pBuf,
uint8_t ielen,
@@ -3074,7 +3099,7 @@
return status;
} /* End dot11f_unpack_ie_ext_cap. */
-#define SigIeExtCap (0x0034)
+#define SigIeExtCap (0x0035)
uint32_t dot11f_unpack_ie_ext_supp_rates(tpAniSirGlobal pCtx,
@@ -3107,7 +3132,7 @@
return status;
} /* End dot11f_unpack_ie_ext_supp_rates. */
-#define SigIeExtSuppRates (0x0035)
+#define SigIeExtSuppRates (0x0036)
uint32_t dot11f_unpack_ie_fh_param_set(tpAniSirGlobal pCtx,
@@ -3135,7 +3160,7 @@
return status;
} /* End dot11f_unpack_ie_fh_param_set. */
-#define SigIeFHParamSet (0x0036)
+#define SigIeFHParamSet (0x0037)
uint32_t dot11f_unpack_ie_fh_params(tpAniSirGlobal pCtx,
@@ -3157,7 +3182,7 @@
return status;
} /* End dot11f_unpack_ie_fh_params. */
-#define SigIeFHParams (0x0037)
+#define SigIeFHParams (0x0038)
uint32_t dot11f_unpack_ie_fh_patt_table(tpAniSirGlobal pCtx,
@@ -3194,7 +3219,7 @@
return status;
} /* End dot11f_unpack_ie_fh_patt_table. */
-#define SigIeFHPattTable (0x0038)
+#define SigIeFHPattTable (0x0039)
static const tFFDefn FFS_FTInfo[] = {
@@ -3254,7 +3279,7 @@
return status;
} /* End dot11f_unpack_ie_ft_info. */
-#define SigIeFTInfo (0x0039)
+#define SigIeFTInfo (0x003a)
uint32_t dot11f_unpack_ie_ht_caps(tpAniSirGlobal pCtx,
@@ -3348,7 +3373,7 @@
return status;
} /* End dot11f_unpack_ie_ht_caps. */
-#define SigIeHTCaps (0x003a)
+#define SigIeHTCaps (0x003b)
uint32_t dot11f_unpack_ie_ht_info(tpAniSirGlobal pCtx,
@@ -3408,7 +3433,7 @@
return status;
} /* End dot11f_unpack_ie_ht_info. */
-#define SigIeHTInfo (0x003b)
+#define SigIeHTInfo (0x003c)
uint32_t dot11f_unpack_ie_ibss_params(tpAniSirGlobal pCtx,
@@ -3427,7 +3452,7 @@
return status;
} /* End dot11f_unpack_ie_ibss_params. */
-#define SigIeIBSSParams (0x003c)
+#define SigIeIBSSParams (0x003d)
uint32_t dot11f_unpack_ie_link_identifier(tpAniSirGlobal pCtx,
@@ -3452,7 +3477,7 @@
return status;
} /* End dot11f_unpack_ie_link_identifier. */
-#define SigIeLinkIdentifier (0x003d)
+#define SigIeLinkIdentifier (0x003e)
uint32_t dot11f_unpack_ie_MBO_IE(tpAniSirGlobal pCtx,
@@ -3480,7 +3505,7 @@
return status;
} /* End dot11f_unpack_ie_MBO_IE. */
-#define SigIeMBO_IE (0x003e)
+#define SigIeMBO_IE (0x003f)
static const tFFDefn FFS_reportBeacon[] = {
@@ -3643,7 +3668,7 @@
return status;
} /* End dot11f_unpack_ie_measurement_report. */
-#define SigIeMeasurementReport (0x003f)
+#define SigIeMeasurementReport (0x0040)
static const tFFDefn FFS_measurement_requestBeacon[] = {
@@ -3836,7 +3861,7 @@
return status;
} /* End dot11f_unpack_ie_measurement_request. */
-#define SigIeMeasurementRequest (0x0040)
+#define SigIeMeasurementRequest (0x0041)
uint32_t dot11f_unpack_ie_mobility_domain(tpAniSirGlobal pCtx,
@@ -3862,7 +3887,7 @@
return status;
} /* End dot11f_unpack_ie_mobility_domain. */
-#define SigIeMobilityDomain (0x0041)
+#define SigIeMobilityDomain (0x0042)
static const tFFDefn FFS_NeighborReport[] = {
@@ -3949,7 +3974,7 @@
return status;
} /* End dot11f_unpack_ie_neighbor_report. */
-#define SigIeNeighborReport (0x0042)
+#define SigIeNeighborReport (0x0043)
uint32_t dot11f_unpack_ie_obss_scan_parameters(tpAniSirGlobal pCtx,
@@ -3986,7 +4011,7 @@
return status;
} /* End dot11f_unpack_ie_obss_scan_parameters. */
-#define SigIeOBSSScanParameters (0x0043)
+#define SigIeOBSSScanParameters (0x0044)
uint32_t dot11f_unpack_ie_operating_mode(tpAniSirGlobal pCtx,
@@ -4010,7 +4035,7 @@
return status;
} /* End dot11f_unpack_ie_operating_mode. */
-#define SigIeOperatingMode (0x0044)
+#define SigIeOperatingMode (0x0045)
static const tTLVDefn TLVS_P2PAssocReq[] = {
@@ -4042,7 +4067,7 @@
return status;
} /* End dot11f_unpack_ie_p2_p_assoc_req. */
-#define SigIeP2PAssocReq (0x0045)
+#define SigIeP2PAssocReq (0x0046)
static const tTLVDefn TLVS_P2PAssocRes[] = {
@@ -4071,7 +4096,7 @@
return status;
} /* End dot11f_unpack_ie_p2_p_assoc_res. */
-#define SigIeP2PAssocRes (0x0046)
+#define SigIeP2PAssocRes (0x0047)
static const tTLVDefn TLVS_P2PBeacon[] = {
@@ -4103,7 +4128,7 @@
return status;
} /* End dot11f_unpack_ie_p2_p_beacon. */
-#define SigIeP2PBeacon (0x0047)
+#define SigIeP2PBeacon (0x0048)
static const tTLVDefn TLVS_P2PBeaconProbeRes[] = {
@@ -4145,7 +4170,7 @@
return status;
} /* End dot11f_unpack_ie_p2_p_beacon_probe_res. */
-#define SigIeP2PBeaconProbeRes (0x0048)
+#define SigIeP2PBeaconProbeRes (0x0049)
static const tTLVDefn TLVS_P2PDeAuth[] = {
@@ -4171,7 +4196,7 @@
return status;
} /* End dot11f_unpack_ie_p2_p_de_auth. */
-#define SigIeP2PDeAuth (0x0049)
+#define SigIeP2PDeAuth (0x004a)
static const tTLVDefn TLVS_P2PDisAssoc[] = {
@@ -4197,7 +4222,7 @@
return status;
} /* End dot11f_unpack_ie_p2_p_dis_assoc. */
-#define SigIeP2PDisAssoc (0x004a)
+#define SigIeP2PDisAssoc (0x004b)
uint32_t dot11f_unpack_ie_p2_pie_opaque(tpAniSirGlobal pCtx,
@@ -4222,7 +4247,7 @@
return status;
} /* End dot11f_unpack_ie_p2_pie_opaque. */
-#define SigIeP2PIEOpaque (0x004b)
+#define SigIeP2PIEOpaque (0x004c)
static const tTLVDefn TLVS_P2PProbeReq[] = {
@@ -4261,7 +4286,7 @@
return status;
} /* End dot11f_unpack_ie_p2_p_probe_req. */
-#define SigIeP2PProbeReq (0x004c)
+#define SigIeP2PProbeReq (0x004d)
static const tTLVDefn TLVS_P2PProbeRes[] = {
@@ -4300,7 +4325,7 @@
return status;
} /* End dot11f_unpack_ie_p2_p_probe_res. */
-#define SigIeP2PProbeRes (0x004d)
+#define SigIeP2PProbeRes (0x004e)
uint32_t dot11f_unpack_ie_pti_control(tpAniSirGlobal pCtx,
@@ -4322,7 +4347,7 @@
return status;
} /* End dot11f_unpack_ie_pti_control. */
-#define SigIePTIControl (0x004e)
+#define SigIePTIControl (0x004f)
uint32_t dot11f_unpack_ie_pu_buffer_status(tpAniSirGlobal pCtx,
@@ -4347,7 +4372,7 @@
return status;
} /* End dot11f_unpack_ie_pu_buffer_status. */
-#define SigIePUBufferStatus (0x004f)
+#define SigIePUBufferStatus (0x0050)
uint32_t dot11f_unpack_ie_power_caps(tpAniSirGlobal pCtx,
@@ -4369,7 +4394,7 @@
return status;
} /* End dot11f_unpack_ie_power_caps. */
-#define SigIePowerCaps (0x0050)
+#define SigIePowerCaps (0x0051)
uint32_t dot11f_unpack_ie_power_constraints(tpAniSirGlobal pCtx,
@@ -4388,7 +4413,7 @@
return status;
} /* End dot11f_unpack_ie_power_constraints. */
-#define SigIePowerConstraints (0x0051)
+#define SigIePowerConstraints (0x0052)
uint32_t dot11f_unpack_ie_qbss_load(tpAniSirGlobal pCtx,
@@ -4413,7 +4438,7 @@
return status;
} /* End dot11f_unpack_ie_qbss_load. */
-#define SigIeQBSSLoad (0x0052)
+#define SigIeQBSSLoad (0x0053)
uint32_t dot11f_unpack_ie_QCN_IE(tpAniSirGlobal pCtx,
@@ -4432,7 +4457,7 @@
return status;
} /* End dot11f_unpack_ie_QCN_IE. */
-#define SigIeQCN_IE (0x0053)
+#define SigIeQCN_IE (0x0054)
uint32_t dot11f_unpack_ie_QComVendorIE(tpAniSirGlobal pCtx,
@@ -4454,7 +4479,7 @@
return status;
} /* End dot11f_unpack_ie_QComVendorIE. */
-#define SigIeQComVendorIE (0x0054)
+#define SigIeQComVendorIE (0x0055)
uint32_t dot11f_unpack_ie_qos_caps_ap(tpAniSirGlobal pCtx,
@@ -4479,7 +4504,7 @@
return status;
} /* End dot11f_unpack_ie_qos_caps_ap. */
-#define SigIeQOSCapsAp (0x0055)
+#define SigIeQOSCapsAp (0x0056)
uint32_t dot11f_unpack_ie_qos_caps_station(tpAniSirGlobal pCtx,
@@ -4506,7 +4531,7 @@
return status;
} /* End dot11f_unpack_ie_qos_caps_station. */
-#define SigIeQOSCapsStation (0x0056)
+#define SigIeQOSCapsStation (0x0057)
uint32_t dot11f_unpack_ie_qos_map_set(tpAniSirGlobal pCtx,
@@ -4531,7 +4556,7 @@
return status;
} /* End dot11f_unpack_ie_qos_map_set. */
-#define SigIeQosMapSet (0x0057)
+#define SigIeQosMapSet (0x0058)
uint32_t dot11f_unpack_ie_quiet(tpAniSirGlobal pCtx,
@@ -4559,7 +4584,7 @@
return status;
} /* End dot11f_unpack_ie_quiet. */
-#define SigIeQuiet (0x0058)
+#define SigIeQuiet (0x0059)
uint32_t dot11f_unpack_ie_rcpiie(tpAniSirGlobal pCtx,
@@ -4578,7 +4603,7 @@
return status;
} /* End dot11f_unpack_ie_rcpiie. */
-#define SigIeRCPIIE (0x0059)
+#define SigIeRCPIIE (0x005a)
static const tFFDefn FFS_RICDataDesc[] = {
@@ -4654,7 +4679,7 @@
return status;
} /* End dot11f_unpack_ie_ric_data_desc. */
-#define SigIeRICDataDesc (0x005a)
+#define SigIeRICDataDesc (0x005b)
uint32_t dot11f_unpack_ie_rsn(tpAniSirGlobal pCtx,
@@ -4746,7 +4771,7 @@
return status;
} /* End dot11f_unpack_ie_rsn. */
-#define SigIeRSN (0x005b)
+#define SigIeRSN (0x005c)
uint32_t dot11f_unpack_ie_rsniie(tpAniSirGlobal pCtx,
@@ -4765,7 +4790,7 @@
return status;
} /* End dot11f_unpack_ie_rsniie. */
-#define SigIeRSNIIE (0x005c)
+#define SigIeRSNIIE (0x005d)
uint32_t dot11f_unpack_ie_rsn_opaque(tpAniSirGlobal pCtx,
@@ -4790,7 +4815,7 @@
return status;
} /* End dot11f_unpack_ie_rsn_opaque. */
-#define SigIeRSNOpaque (0x005d)
+#define SigIeRSNOpaque (0x005e)
uint32_t dot11f_unpack_ie_supp_channels(tpAniSirGlobal pCtx,
@@ -4815,7 +4840,7 @@
return status;
} /* End dot11f_unpack_ie_supp_channels. */
-#define SigIeSuppChannels (0x005e)
+#define SigIeSuppChannels (0x005f)
uint32_t dot11f_unpack_ie_supp_operating_classes(tpAniSirGlobal pCtx,
@@ -4840,7 +4865,7 @@
return status;
} /* End dot11f_unpack_ie_supp_operating_classes. */
-#define SigIeSuppOperatingClasses (0x005f)
+#define SigIeSuppOperatingClasses (0x0060)
uint32_t dot11f_unpack_ie_supp_rates(tpAniSirGlobal pCtx,
@@ -4873,7 +4898,7 @@
return status;
} /* End dot11f_unpack_ie_supp_rates. */
-#define SigIeSuppRates (0x0060)
+#define SigIeSuppRates (0x0061)
uint32_t dot11f_unpack_ie_tim(tpAniSirGlobal pCtx,
@@ -4907,7 +4932,7 @@
return status;
} /* End dot11f_unpack_ie_tim. */
-#define SigIeTIM (0x0061)
+#define SigIeTIM (0x0062)
uint32_t dot11f_unpack_ie_tpc_report(tpAniSirGlobal pCtx,
@@ -4929,7 +4954,7 @@
return status;
} /* End dot11f_unpack_ie_tpc_report. */
-#define SigIeTPCReport (0x0062)
+#define SigIeTPCReport (0x0063)
uint32_t dot11f_unpack_ie_tpc_request(tpAniSirGlobal pCtx,
@@ -4947,7 +4972,7 @@
return status;
} /* End dot11f_unpack_ie_tpc_request. */
-#define SigIeTPCRequest (0x0063)
+#define SigIeTPCRequest (0x0064)
uint32_t dot11f_unpack_ie_time_advertisement(tpAniSirGlobal pCtx,
@@ -4972,7 +4997,7 @@
return status;
} /* End dot11f_unpack_ie_time_advertisement. */
-#define SigIeTimeAdvertisement (0x0064)
+#define SigIeTimeAdvertisement (0x0065)
uint32_t dot11f_unpack_ie_timeout_interval(tpAniSirGlobal pCtx,
@@ -4994,7 +5019,7 @@
return status;
} /* End dot11f_unpack_ie_timeout_interval. */
-#define SigIeTimeoutInterval (0x0065)
+#define SigIeTimeoutInterval (0x0066)
uint32_t dot11f_unpack_ie_vht_ext_bss_load(tpAniSirGlobal pCtx,
@@ -5025,7 +5050,7 @@
return status;
} /* End dot11f_unpack_ie_vht_ext_bss_load. */
-#define SigIeVHTExtBssLoad (0x0066)
+#define SigIeVHTExtBssLoad (0x0067)
uint32_t dot11f_unpack_ie_vendor1_ie(tpAniSirGlobal pCtx,
@@ -5043,7 +5068,7 @@
return status;
} /* End dot11f_unpack_ie_vendor1_ie. */
-#define SigIeVendor1IE (0x0067)
+#define SigIeVendor1IE (0x0068)
uint32_t dot11f_unpack_ie_vendor3_ie(tpAniSirGlobal pCtx,
@@ -5061,7 +5086,7 @@
return status;
} /* End dot11f_unpack_ie_vendor3_ie. */
-#define SigIeVendor3IE (0x0068)
+#define SigIeVendor3IE (0x0069)
uint32_t dot11f_unpack_ie_wapi(tpAniSirGlobal pCtx,
@@ -5131,7 +5156,7 @@
return status;
} /* End dot11f_unpack_ie_wapi. */
-#define SigIeWAPI (0x0069)
+#define SigIeWAPI (0x006a)
uint32_t dot11f_unpack_ie_wapi_opaque(tpAniSirGlobal pCtx,
@@ -5156,7 +5181,7 @@
return status;
} /* End dot11f_unpack_ie_wapi_opaque. */
-#define SigIeWAPIOpaque (0x006a)
+#define SigIeWAPIOpaque (0x006b)
uint32_t dot11f_unpack_ie_wfatpc(tpAniSirGlobal pCtx,
@@ -5178,7 +5203,7 @@
return status;
} /* End dot11f_unpack_ie_wfatpc. */
-#define SigIeWFATPC (0x006b)
+#define SigIeWFATPC (0x006c)
uint32_t dot11f_unpack_ie_wfdie_opaque(tpAniSirGlobal pCtx,
@@ -5203,7 +5228,7 @@
return status;
} /* End dot11f_unpack_ie_wfdie_opaque. */
-#define SigIeWFDIEOpaque (0x006c)
+#define SigIeWFDIEOpaque (0x006d)
uint32_t dot11f_unpack_ie_wmm_caps(tpAniSirGlobal pCtx,
@@ -5235,7 +5260,7 @@
return status;
} /* End dot11f_unpack_ie_wmm_caps. */
-#define SigIeWMMCaps (0x006d)
+#define SigIeWMMCaps (0x006e)
uint32_t dot11f_unpack_ie_wmm_info_ap(tpAniSirGlobal pCtx,
@@ -5261,7 +5286,7 @@
return status;
} /* End dot11f_unpack_ie_wmm_info_ap. */
-#define SigIeWMMInfoAp (0x006e)
+#define SigIeWMMInfoAp (0x006f)
uint32_t dot11f_unpack_ie_wmm_info_station(tpAniSirGlobal pCtx,
@@ -5291,7 +5316,7 @@
return status;
} /* End dot11f_unpack_ie_wmm_info_station. */
-#define SigIeWMMInfoStation (0x006f)
+#define SigIeWMMInfoStation (0x0070)
uint32_t dot11f_unpack_ie_wmm_params(tpAniSirGlobal pCtx,
@@ -5388,7 +5413,7 @@
return status;
} /* End dot11f_unpack_ie_wmm_params. */
-#define SigIeWMMParams (0x0070)
+#define SigIeWMMParams (0x0071)
uint32_t dot11f_unpack_ie_wpa(tpAniSirGlobal pCtx,
@@ -5462,7 +5487,7 @@
return status;
} /* End dot11f_unpack_ie_wpa. */
-#define SigIeWPA (0x0071)
+#define SigIeWPA (0x0072)
uint32_t dot11f_unpack_ie_wpa_opaque(tpAniSirGlobal pCtx,
@@ -5487,7 +5512,7 @@
return status;
} /* End dot11f_unpack_ie_wpa_opaque. */
-#define SigIeWPAOpaque (0x0072)
+#define SigIeWPAOpaque (0x0073)
static const tTLVDefn TLVS_WSC[] = {
@@ -5578,7 +5603,7 @@
return status;
} /* End dot11f_unpack_ie_wsc. */
-#define SigIeWSC (0x0073)
+#define SigIeWSC (0x0074)
static const tTLVDefn TLVS_WscAssocReq[] = {
@@ -5610,7 +5635,7 @@
return status;
} /* End dot11f_unpack_ie_wsc_assoc_req. */
-#define SigIeWscAssocReq (0x0074)
+#define SigIeWscAssocReq (0x0075)
static const tTLVDefn TLVS_WscAssocRes[] = {
@@ -5642,7 +5667,7 @@
return status;
} /* End dot11f_unpack_ie_wsc_assoc_res. */
-#define SigIeWscAssocRes (0x0075)
+#define SigIeWscAssocRes (0x0076)
static const tTLVDefn TLVS_WscBeacon[] = {
@@ -5695,7 +5720,7 @@
return status;
} /* End dot11f_unpack_ie_wsc_beacon. */
-#define SigIeWscBeacon (0x0076)
+#define SigIeWscBeacon (0x0077)
static const tTLVDefn TLVS_WscBeaconProbeRes[] = {
@@ -5773,7 +5798,7 @@
return status;
} /* End dot11f_unpack_ie_wsc_beacon_probe_res. */
-#define SigIeWscBeaconProbeRes (0x0077)
+#define SigIeWscBeaconProbeRes (0x0078)
uint32_t dot11f_unpack_ie_wsc_ie_opaque(tpAniSirGlobal pCtx,
@@ -5798,7 +5823,7 @@
return status;
} /* End dot11f_unpack_ie_wsc_ie_opaque. */
-#define SigIeWscIEOpaque (0x0078)
+#define SigIeWscIEOpaque (0x0079)
static const tTLVDefn TLVS_WscProbeReq[] = {
@@ -5871,7 +5896,7 @@
return status;
} /* End dot11f_unpack_ie_wsc_probe_req. */
-#define SigIeWscProbeReq (0x0079)
+#define SigIeWscProbeReq (0x007a)
static const tTLVDefn TLVS_WscProbeRes[] = {
@@ -5949,7 +5974,7 @@
return status;
} /* End dot11f_unpack_ie_wsc_probe_res. */
-#define SigIeWscProbeRes (0x007a)
+#define SigIeWscProbeRes (0x007b)
static const tTLVDefn TLVS_WscReassocRes[] = {
@@ -5981,7 +6006,7 @@
return status;
} /* End dot11f_unpack_ie_wsc_reassoc_res. */
-#define SigIeWscReassocRes (0x007b)
+#define SigIeWscReassocRes (0x007c)
uint32_t dot11f_unpack_ie_ext_chan_switch_ann(tpAniSirGlobal pCtx,
@@ -6009,7 +6034,7 @@
return status;
} /* End dot11f_unpack_ie_ext_chan_switch_ann. */
-#define SigIeext_chan_switch_ann (0x007c)
+#define SigIeext_chan_switch_ann (0x007d)
uint32_t dot11f_unpack_ie_fils_assoc_delay_info(tpAniSirGlobal pCtx,
@@ -6028,7 +6053,7 @@
return status;
} /* End dot11f_unpack_ie_fils_assoc_delay_info. */
-#define SigIefils_assoc_delay_info (0x007d)
+#define SigIefils_assoc_delay_info (0x007e)
uint32_t dot11f_unpack_ie_fils_hlp_container(tpAniSirGlobal pCtx,
@@ -6054,7 +6079,7 @@
return status;
} /* End dot11f_unpack_ie_fils_hlp_container. */
-#define SigIefils_hlp_container (0x007e)
+#define SigIefils_hlp_container (0x007f)
uint32_t dot11f_unpack_ie_fils_indication(tpAniSirGlobal pCtx,
@@ -6087,7 +6112,7 @@
return status;
} /* End dot11f_unpack_ie_fils_indication. */
-#define SigIefils_indication (0x007f)
+#define SigIefils_indication (0x0080)
uint32_t dot11f_unpack_ie_fils_kde(tpAniSirGlobal pCtx,
@@ -6110,7 +6135,7 @@
return status;
} /* End dot11f_unpack_ie_fils_kde. */
-#define SigIefils_kde (0x0080)
+#define SigIefils_kde (0x0081)
uint32_t dot11f_unpack_ie_fils_key_confirmation(tpAniSirGlobal pCtx,
@@ -6130,7 +6155,7 @@
return status;
} /* End dot11f_unpack_ie_fils_key_confirmation. */
-#define SigIefils_key_confirmation (0x0081)
+#define SigIefils_key_confirmation (0x0082)
uint32_t dot11f_unpack_ie_fils_nonce(tpAniSirGlobal pCtx,
@@ -6149,7 +6174,7 @@
return status;
} /* End dot11f_unpack_ie_fils_nonce. */
-#define SigIefils_nonce (0x0082)
+#define SigIefils_nonce (0x0083)
uint32_t dot11f_unpack_ie_fils_public_key(tpAniSirGlobal pCtx,
@@ -6172,7 +6197,7 @@
return status;
} /* End dot11f_unpack_ie_fils_public_key. */
-#define SigIefils_public_key (0x0083)
+#define SigIefils_public_key (0x0084)
uint32_t dot11f_unpack_ie_fils_session(tpAniSirGlobal pCtx,
@@ -6191,7 +6216,7 @@
return status;
} /* End dot11f_unpack_ie_fils_session. */
-#define SigIefils_session (0x0084)
+#define SigIefils_session (0x0085)
uint32_t dot11f_unpack_ie_fils_wrapped_data(tpAniSirGlobal pCtx,
@@ -6211,7 +6236,7 @@
return status;
} /* End dot11f_unpack_ie_fils_wrapped_data. */
-#define SigIefils_wrapped_data (0x0085)
+#define SigIefils_wrapped_data (0x0086)
uint32_t dot11f_unpack_ie_fragment_ie(tpAniSirGlobal pCtx,
@@ -6231,7 +6256,7 @@
return status;
} /* End dot11f_unpack_ie_fragment_ie. */
-#define SigIefragment_ie (0x0086)
+#define SigIefragment_ie (0x0087)
uint32_t dot11f_unpack_ie_hs20vendor_ie(tpAniSirGlobal pCtx,
@@ -6273,7 +6298,7 @@
return status;
} /* End dot11f_unpack_ie_hs20vendor_ie. */
-#define SigIehs20vendor_ie (0x0087)
+#define SigIehs20vendor_ie (0x0088)
uint32_t dot11f_unpack_ie_ht2040_bss_coexistence(tpAniSirGlobal pCtx,
@@ -6299,7 +6324,7 @@
return status;
} /* End dot11f_unpack_ie_ht2040_bss_coexistence. */
-#define SigIeht2040_bss_coexistence (0x0088)
+#define SigIeht2040_bss_coexistence (0x0089)
uint32_t dot11f_unpack_ie_ht2040_bss_intolerant_report(tpAniSirGlobal pCtx,
@@ -6327,7 +6352,7 @@
return status;
} /* End dot11f_unpack_ie_ht2040_bss_intolerant_report. */
-#define SigIeht2040_bss_intolerant_report (0x0089)
+#define SigIeht2040_bss_intolerant_report (0x008a)
uint32_t dot11f_unpack_ie_osen_ie(tpAniSirGlobal pCtx,
@@ -6347,7 +6372,7 @@
return status;
} /* End dot11f_unpack_ie_osen_ie. */
-#define SigIeosen_ie (0x008a)
+#define SigIeosen_ie (0x008b)
uint32_t dot11f_unpack_ie_sec_chan_offset_ele(tpAniSirGlobal pCtx,
@@ -6366,7 +6391,7 @@
return status;
} /* End dot11f_unpack_ie_sec_chan_offset_ele. */
-#define SigIesec_chan_offset_ele (0x008b)
+#define SigIesec_chan_offset_ele (0x008c)
static const tFFDefn FFS_vendor_vht_ie[] = {
@@ -6413,7 +6438,7 @@
return status;
} /* End dot11f_unpack_ie_vendor_vht_ie. */
-#define SigIevendor_vht_ie (0x008c)
+#define SigIevendor_vht_ie (0x008d)
static const tFFDefn FFS_AddTSRequest[] = {
@@ -7098,6 +7123,10 @@
{ offsetof(tDot11fBeacon, QCN_IE), offsetof(tDot11fIEQCN_IE, present), 0,
"QCN_IE", 0, 10, 10, SigIeQCN_IE, {140, 253, 240, 1, 0},
4, DOT11F_EID_QCN_IE, 0, 0, },
+ { offsetof(tDot11fBeacon, ESP_information),
+ offsetof(tDot11fIEESP_information, present), 0, "ESP_information",
+ 0, 2, 98, SigIeESP_information, {0, 0, 0, 0, 0},
+ 0, DOT11F_EID_ESP_INFORMATION, 11, 0, },
{0, 0, 0, NULL, 0, 0, 0, 0, {0, 0, 0, 0, 0}, 0, 0xff, 0, },};
uint32_t dot11f_unpack_beacon(tpAniSirGlobal pCtx,
@@ -7306,6 +7335,10 @@
{ offsetof(tDot11fBeacon2, QCN_IE), offsetof(tDot11fIEQCN_IE, present), 0,
"QCN_IE", 0, 10, 10, SigIeQCN_IE, {140, 253, 240, 1, 0},
4, DOT11F_EID_QCN_IE, 0, 0, },
+ { offsetof(tDot11fBeacon2, ESP_information),
+ offsetof(tDot11fIEESP_information, present), 0, "ESP_information",
+ 0, 2, 98, SigIeESP_information, {0, 0, 0, 0, 0},
+ 0, DOT11F_EID_ESP_INFORMATION, 11, 0, },
{0, 0, 0, NULL, 0, 0, 0, 0, {0, 0, 0, 0, 0}, 0, 0xff, 0, },};
uint32_t dot11f_unpack_beacon2(tpAniSirGlobal pCtx,
@@ -7514,6 +7547,10 @@
{ offsetof(tDot11fBeaconIEs, QCN_IE), offsetof(tDot11fIEQCN_IE, present),
0, "QCN_IE", 0, 10, 10, SigIeQCN_IE, {140, 253, 240, 1, 0},
4, DOT11F_EID_QCN_IE, 0, 0, },
+ { offsetof(tDot11fBeaconIEs, ESP_information),
+ offsetof(tDot11fIEESP_information, present), 0, "ESP_information",
+ 0, 2, 98, SigIeESP_information, {0, 0, 0, 0, 0},
+ 0, DOT11F_EID_ESP_INFORMATION, 11, 0, },
{0, 0, 0, NULL, 0, 0, 0, 0, {0, 0, 0, 0, 0}, 0, 0xff, 0, },};
uint32_t dot11f_unpack_beacon_i_es(tpAniSirGlobal pCtx,
@@ -8138,6 +8175,10 @@
{ offsetof(tDot11fProbeResponse, QCN_IE), offsetof(tDot11fIEQCN_IE,
present), 0, "QCN_IE", 0, 10, 10, SigIeQCN_IE, {140, 253, 240, 1, 0},
4, DOT11F_EID_QCN_IE, 0, 0, },
+ { offsetof(tDot11fProbeResponse, ESP_information),
+ offsetof(tDot11fIEESP_information, present), 0, "ESP_information",
+ 0, 2, 98, SigIeESP_information, {0, 0, 0, 0, 0},
+ 0, DOT11F_EID_ESP_INFORMATION, 11, 0, },
{0, 0, 0, NULL, 0, 0, 0, 0, {0, 0, 0, 0, 0}, 0, 0xff, 0, },};
uint32_t dot11f_unpack_probe_response(tpAniSirGlobal pCtx,
@@ -10236,6 +10277,16 @@
countOffset),
append_ie);
break;
+ case SigIeESP_information:
+ status |=
+ dot11f_unpack_ie_ESP_information(
+ pCtx, pBufRemaining, len,
+ (tDot11fIEESP_information *)
+ (pFrm + pIe->offset +
+ sizeof(tDot11fIEESP_information) *
+ countOffset),
+ append_ie);
+ break;
case SigIeExtCap:
status |=
dot11f_unpack_ie_ext_cap(
@@ -13222,6 +13273,15 @@
(pFrm + pIe->offset + offset * i))->
present;
break;
+ case SigIeESP_information:
+ offset = sizeof(tDot11fIEESP_information);
+ byteCount = ((tDot11fIEESP_information *)
+ (pFrm + pIe->offset + offset * i))->
+ num_variable_data;
+ pIePresent = ((tDot11fIEESP_information *)
+ (pFrm + pIe->offset + offset * i))->
+ present;
+ break;
case SigIeExtCap:
offset = sizeof(tDot11fIEExtCap);
byteCount = ((tDot11fIEExtCap *)
@@ -17895,6 +17955,37 @@
return DOT11F_PARSE_SUCCESS;
} /* End dot11f_pack_ie_ese_version. */
+uint32_t dot11f_pack_ie_ESP_information(tpAniSirGlobal pCtx,
+ tDot11fIEESP_information *pSrc,
+ uint8_t *pBuf,
+ uint32_t nBuf,
+ uint32_t *pnConsumed)
+{
+ uint8_t *pIeLen = 0;
+ uint32_t nConsumedOnEntry = *pnConsumed;
+ uint32_t nNeeded = 0U;
+ nNeeded += pSrc->num_variable_data;
+ while (pSrc->present) {
+ if (nNeeded > nBuf)
+ return DOT11F_BUFFER_OVERFLOW;
+ *pBuf = 255;
+ ++pBuf; ++(*pnConsumed);
+ pIeLen = pBuf;
+ ++pBuf; ++(*pnConsumed);
+ *pBuf = 11;
+ ++pBuf; ++(*pnConsumed);
+ DOT11F_MEMCPY(pCtx, pBuf, &(pSrc->variable_data), pSrc->num_variable_data);
+ *pnConsumed += pSrc->num_variable_data;
+ /* fieldsEndFlag = 1 */
+ break;
+ }
+ (void)pCtx;
+ if (pIeLen) {
+ *pIeLen = *pnConsumed - nConsumedOnEntry - 2;
+ }
+ return DOT11F_PARSE_SUCCESS;
+} /* End dot11f_pack_ie_ESP_information. */
+
uint32_t dot11f_pack_ie_ext_cap(tpAniSirGlobal pCtx,
tDot11fIEExtCap *pSrc,
uint8_t *pBuf,
@@ -23209,6 +23300,14 @@
sizeof(tDot11fIEESEVersion) * i),
pBufRemaining, nBufRemaining, &len);
break;
+ case SigIeESP_information:
+ status |=
+ dot11f_pack_ie_ESP_information(
+ pCtx, (tDot11fIEESP_information *)
+ (pSrc + pIe->offset +
+ sizeof(tDot11fIEESP_information) * i),
+ pBufRemaining, nBufRemaining, &len);
+ break;
case SigIeExtCap:
status |=
dot11f_pack_ie_ext_cap(
diff --git a/drivers/staging/qcacld-3.0/core/mac/src/sys/legacy/src/utils/src/parser_api.c b/drivers/staging/qcacld-3.0/core/mac/src/sys/legacy/src/utils/src/parser_api.c
index 76c3a34..0a52cbe 100644
--- a/drivers/staging/qcacld-3.0/core/mac/src/sys/legacy/src/utils/src/parser_api.c
+++ b/drivers/staging/qcacld-3.0/core/mac/src/sys/legacy/src/utils/src/parser_api.c
@@ -2138,7 +2138,7 @@
}
/* & "transliterate" from a 'tDot11fProbeRequestto' a 'tSirProbeReq'... */
if (!pr.SSID.present) {
- pe_warn("Mandatory IE SSID not present!");
+ pe_debug_rate_limited(30, "Mandatory IE SSID not present!");
} else {
pProbeReq->ssidPresent = 1;
convert_ssid(pMac, &pProbeReq->ssId, &pr.SSID);
@@ -2282,6 +2282,71 @@
fils_info->key_auth, fils_info->key_auth_len);
}
}
+/**
+ * update_esp_data: update ESP params from beacon/probe response
+ * @esp_information: pointer to sir_esp_information
+ * @esp_indication: pointer to tDot11fIEESP_information
+ *
+ * The Estimated Service Parameters element is
+ * used by a AP to provide information to another STA which
+ * can then use the information as input to an algorithm to
+ * generate an estimate of throughput between the two STAs.
+ * The ESP Information List field contains from 1 to 4 ESP
+ * Information fields(each field 24 bits), each corresponding
+ * to an access category for which estimated service parameters
+ * information is provided.
+ *
+ * Return: None
+ */
+
+static void update_esp_data(struct sir_esp_information *esp_information,
+ tDot11fIEESP_information *esp_indication)
+{
+
+ uint8_t *data;
+ int i = 0;
+ int total_elements;
+ struct sir_esp_info *esp_info;
+
+ data = esp_indication->variable_data;
+ total_elements = esp_indication->num_variable_data;
+ esp_information->is_present = esp_indication->present;
+ do_div(total_elements, ESP_INFORMATION_LIST_LENGTH);
+
+ if (total_elements > 4) {
+ pe_err("No of Air time fractions are greater than supported");
+ return;
+ }
+
+ for (i = 0; i < total_elements; i++) {
+ esp_info = (struct sir_esp_info *)data;
+ if (esp_info->access_category == ESP_AC_BK) {
+ qdf_mem_copy(&esp_information->esp_info_AC_BK,
+ data, 3);
+ data = data + ESP_INFORMATION_LIST_LENGTH;
+ continue;
+ }
+ if (esp_info->access_category == ESP_AC_BE) {
+ qdf_mem_copy(&esp_information->esp_info_AC_BE,
+ data, 3);
+ data = data + ESP_INFORMATION_LIST_LENGTH;
+ continue;
+ }
+ if (esp_info->access_category == ESP_AC_VI) {
+ qdf_mem_copy(&esp_information->esp_info_AC_VI,
+ data, 3);
+ data = data + ESP_INFORMATION_LIST_LENGTH;
+ continue;
+ }
+ if (esp_info->access_category == ESP_AC_VO) {
+ qdf_mem_copy(&esp_information->esp_info_AC_VO,
+ data, 3);
+ data = data + ESP_INFORMATION_LIST_LENGTH;
+ break;
+ }
+ }
+ return;
+}
/**
* update_fils_data: update fils params from beacon/probe response
@@ -2349,6 +2414,11 @@
tDot11fProbeResponse *pr)
{
}
+
+static void update_esp_data(struct sir_esp_information *esp_information,
+ tDot11fIEESP_information *esp_indication)
+{
+}
#endif
void sir_copy_caps_info(tpAniSirGlobal mac_ctx, tDot11fFfCapabilities caps,
@@ -2372,6 +2442,24 @@
pProbeResp->capabilityInfo.immediateBA = caps.immediateBA;
}
+/**
+ * sir_convert_esp_data_to_probersp_struct: update ESP params from probe resp
+ * @probe_resp: pointer to tpSirProbeRespBeacon
+ * @pr: pointer to tDot11fProbeResponse
+ *
+ * Return: None
+ */
+static void
+sir_convert_esp_data_to_probersp_struct(tpSirProbeRespBeacon probe_resp,
+ tDot11fProbeResponse *pr)
+{
+ if (!pr->ESP_information.present)
+ return;
+
+ update_esp_data(&probe_resp->esp_information,
+ &pr->ESP_information);
+}
+
tSirRetStatus sir_convert_probe_frame2_struct(tpAniSirGlobal pMac,
uint8_t *pFrame,
uint32_t nFrame,
@@ -2411,7 +2499,7 @@
sir_copy_caps_info(pMac, pr->Capabilities, pProbeResp);
if (!pr->SSID.present) {
- pe_warn("Mandatory IE SSID not present!");
+ pe_debug_rate_limited(30, "Mandatory IE SSID not present!");
} else {
pProbeResp->ssidPresent = 1;
convert_ssid(pMac, &pProbeResp->ssId, &pr->SSID);
@@ -2638,12 +2726,15 @@
}
}
+ sir_convert_esp_data_to_probersp_struct(pProbeResp, pr);
sir_convert_fils_data_to_probersp_struct(pProbeResp, pr);
qdf_mem_free(pr);
return eSIR_SUCCESS;
} /* End sir_convert_probe_frame2_struct. */
+
+
tSirRetStatus
sir_convert_assoc_req_frame2_struct(tpAniSirGlobal pMac,
uint8_t *pFrame,
@@ -3369,7 +3460,7 @@
}
/* & "transliterate" from a 'tDot11fBeaconIEs' to a 'eseBcnReportMandatoryIe'... */
if (!pBies->SSID.present) {
- pe_warn("Mandatory IE SSID not present!");
+ pe_debug_rate_limited(30, "Mandatory IE SSID not present!");
} else {
eseBcnReportMandatoryIe.ssidPresent = 1;
convert_ssid(pMac, &eseBcnReportMandatoryIe.ssId, &pBies->SSID);
@@ -3674,7 +3765,7 @@
}
/* & "transliterate" from a 'tDot11fBeaconIEs' to a 'tSirProbeRespBeacon'... */
if (!pBies->SSID.present) {
- pe_warn("Mandatory IE SSID not present!");
+ pe_debug_rate_limited(30, "Mandatory IE SSID not present!");
} else {
pBeaconStruct->ssidPresent = 1;
convert_ssid(pMac, &pBeaconStruct->ssId, &pBies->SSID);
@@ -3952,6 +4043,24 @@
}
#endif
+/**
+ * sir_convert_esp_data_to_beacon_struct: update ESP params from beacon
+ * @beacon_struct: pointer to tpSirProbeRespBeacon
+ * @beacon: pointer to tDot11fBeacon
+ *
+ * Return: None
+ */
+static void
+sir_convert_esp_data_to_beacon_struct(tpSirProbeRespBeacon beacon_struct,
+ tDot11fBeacon *beacon)
+{
+ if (!beacon->ESP_information.present)
+ return;
+
+ update_esp_data(&beacon_struct->esp_information,
+ &beacon->ESP_information);
+}
+
tSirRetStatus
sir_convert_beacon_frame2_struct(tpAniSirGlobal pMac,
uint8_t *pFrame,
@@ -4028,7 +4137,7 @@
pBeacon->Capabilities.immediateBA;
if (!pBeacon->SSID.present) {
- pe_warn("Mandatory IE SSID not present!");
+ pe_debug_rate_limited(30, "Mandatory IE SSID not present!");
} else {
pBeaconStruct->ssidPresent = 1;
convert_ssid(pMac, &pBeaconStruct->ssId, &pBeacon->SSID);
@@ -4307,6 +4416,7 @@
}
}
+ sir_convert_esp_data_to_beacon_struct(pBeaconStruct, pBeacon);
sir_convert_fils_data_to_beacon_struct(pBeaconStruct, pBeacon);
qdf_mem_free(pBeacon);
return eSIR_SUCCESS;
diff --git a/drivers/staging/qcacld-3.0/core/pld/inc/pld_common.h b/drivers/staging/qcacld-3.0/core/pld/inc/pld_common.h
index e9e3824..97925cd 100644
--- a/drivers/staging/qcacld-3.0/core/pld/inc/pld_common.h
+++ b/drivers/staging/qcacld-3.0/core/pld/inc/pld_common.h
@@ -139,6 +139,7 @@
enum pld_uevent {
PLD_RECOVERY,
PLD_FW_DOWN,
+ PLD_FW_READY,
};
/**
diff --git a/drivers/staging/qcacld-3.0/core/pld/src/pld_snoc.c b/drivers/staging/qcacld-3.0/core/pld/src/pld_snoc.c
index 3b29a01..2ae6ce0 100644
--- a/drivers/staging/qcacld-3.0/core/pld/src/pld_snoc.c
+++ b/drivers/staging/qcacld-3.0/core/pld/src/pld_snoc.c
@@ -263,6 +263,10 @@
data.uevent = PLD_FW_DOWN;
data.fw_down.crashed = uevent_data->crashed;
break;
+ case ICNSS_UEVENT_FW_READY:
+ data.uevent = PLD_FW_READY;
+ break;
+
default:
return 0;
}
diff --git a/drivers/staging/qcacld-3.0/core/sme/inc/csr_api.h b/drivers/staging/qcacld-3.0/core/sme/inc/csr_api.h
index 5fabb15..e4c2dd0 100644
--- a/drivers/staging/qcacld-3.0/core/sme/inc/csr_api.h
+++ b/drivers/staging/qcacld-3.0/core/sme/inc/csr_api.h
@@ -1079,6 +1079,7 @@
uint32_t nNeighborScanTimerPeriod;
uint8_t nNeighborLookupRssiThreshold;
+ int8_t rssi_thresh_offset_5g;
uint16_t nNeighborScanMinChanTime;
uint16_t nNeighborScanMaxChanTime;
sCsrChannel neighborScanChanList;
@@ -1315,6 +1316,7 @@
uint32_t roam_dense_rssi_thresh_offset;
uint32_t roam_dense_min_aps;
int8_t roam_bg_scan_bad_rssi_thresh;
+ uint8_t roam_bad_rssi_thresh_offset_2g;
uint32_t roam_bg_scan_client_bitmap;
uint32_t obss_width_interval;
uint32_t obss_active_dwelltime;
@@ -1351,6 +1353,7 @@
uint32_t disallow_duration;
uint32_t rssi_channel_penalization;
uint32_t num_disallowed_aps;
+ struct sir_score_config bss_score_params;
} tCsrConfigParam;
/* Tush */
diff --git a/drivers/staging/qcacld-3.0/core/sme/inc/csr_internal.h b/drivers/staging/qcacld-3.0/core/sme/inc/csr_internal.h
index 4a49d70..da05c55 100644
--- a/drivers/staging/qcacld-3.0/core/sme/inc/csr_internal.h
+++ b/drivers/staging/qcacld-3.0/core/sme/inc/csr_internal.h
@@ -453,6 +453,7 @@
typedef struct tagCsrNeighborRoamConfig {
uint32_t nNeighborScanTimerPeriod;
uint8_t nNeighborLookupRssiThreshold;
+ int8_t rssi_thresh_offset_5g;
uint16_t nNeighborScanMinChanTime;
uint16_t nNeighborScanMaxChanTime;
sCsrChannel neighborScanChanList;
@@ -656,6 +657,7 @@
uint32_t disallow_duration;
uint32_t rssi_channel_penalization;
uint32_t num_disallowed_aps;
+ struct sir_score_config bss_score_params;
} tCsrConfig;
typedef struct tagCsrChannelPowerInfo {
diff --git a/drivers/staging/qcacld-3.0/core/sme/inc/csr_neighbor_roam.h b/drivers/staging/qcacld-3.0/core/sme/inc/csr_neighbor_roam.h
index dff1bd9..bddc3c6 100644
--- a/drivers/staging/qcacld-3.0/core/sme/inc/csr_neighbor_roam.h
+++ b/drivers/staging/qcacld-3.0/core/sme/inc/csr_neighbor_roam.h
@@ -56,6 +56,7 @@
uint32_t neighborScanPeriod;
tCsrChannelInfo channelInfo;
uint8_t neighborLookupThreshold;
+ int8_t rssi_thresh_offset_5g;
uint8_t neighborReassocThreshold;
uint32_t minChannelScanTime;
uint32_t maxChannelScanTime;
diff --git a/drivers/staging/qcacld-3.0/core/sme/src/common/sme_power_save.c b/drivers/staging/qcacld-3.0/core/sme/src/common/sme_power_save.c
index 40bd467..95c0a64 100644
--- a/drivers/staging/qcacld-3.0/core/sme/src/common/sme_power_save.c
+++ b/drivers/staging/qcacld-3.0/core/sme/src/common/sme_power_save.c
@@ -531,6 +531,7 @@
QDF_STATUS sme_ps_timer_flush_sync(tHalHandle hal, uint8_t session_id)
{
+ QDF_STATUS status;
tpAniSirGlobal mac_ctx = PMAC_STRUCT(hal);
struct ps_params *ps_parm;
enum ps_state ps_state;
@@ -538,28 +539,27 @@
struct sEnablePsParams *req;
t_wma_handle *wma;
+ status = sme_enable_sta_ps_check(mac_ctx, session_id);
+ if (QDF_IS_STATUS_ERROR(status)) {
+ sme_debug("Power save not allowed for vdev id %d", session_id);
+ return QDF_STATUS_SUCCESS;
+ }
+
ps_parm = &mac_ctx->sme.ps_global_info.ps_params[session_id];
tstate = qdf_mc_timer_get_current_state(&ps_parm->auto_ps_enable_timer);
if (tstate != QDF_TIMER_STATE_RUNNING)
return QDF_STATUS_SUCCESS;
- if (QDF_STATUS_SUCCESS != sme_enable_sta_ps_check(mac_ctx,
- session_id)) {
- sme_debug("Power save not allowed for vdev id %d", session_id);
- qdf_mc_timer_stop(&ps_parm->auto_ps_enable_timer);
- return QDF_STATUS_SUCCESS;
- }
-
sme_debug("flushing powersave enable for vdev %u", session_id);
+ qdf_mc_timer_stop(&ps_parm->auto_ps_enable_timer);
+
wma = cds_get_context(QDF_MODULE_ID_WMA);
if (!wma) {
sme_err("wma is null");
return QDF_STATUS_E_INVAL;
}
- qdf_mc_timer_stop(&ps_parm->auto_ps_enable_timer);
-
req = qdf_mem_malloc(sizeof(*req));
if (!req) {
sme_err("out of memory");
diff --git a/drivers/staging/qcacld-3.0/core/sme/src/csr/csr_api_roam.c b/drivers/staging/qcacld-3.0/core/sme/src/csr/csr_api_roam.c
index 50e8521..b41ef68 100644
--- a/drivers/staging/qcacld-3.0/core/sme/src/csr/csr_api_roam.c
+++ b/drivers/staging/qcacld-3.0/core/sme/src/csr/csr_api_roam.c
@@ -1571,6 +1571,7 @@
pMac->roam.configParam.neighborRoamConfig.nMaxNeighborRetries = 3;
pMac->roam.configParam.neighborRoamConfig.nNeighborLookupRssiThreshold =
120;
+ pMac->roam.configParam.neighborRoamConfig.rssi_thresh_offset_5g = 0;
pMac->roam.configParam.neighborRoamConfig.nOpportunisticThresholdDiff =
30;
pMac->roam.configParam.neighborRoamConfig.nRoamRescanRssiDiff = 5;
@@ -1604,6 +1605,27 @@
pMac->roam.configParam.nInitialDwellTime = 0;
pMac->roam.configParam.initial_scan_no_dfs_chnl = 0;
+
+ qdf_mem_zero(&pMac->roam.configParam.bss_score_params,
+ sizeof(struct sir_score_config));
+ pMac->roam.configParam.bss_score_params.weight_cfg.rssi_weightage =
+ RSSI_WEIGHTAGE;
+ pMac->roam.configParam.bss_score_params.weight_cfg.ht_caps_weightage =
+ HT_CAPABILITY_WEIGHTAGE;
+ pMac->roam.configParam.bss_score_params.weight_cfg.vht_caps_weightage =
+ VHT_CAP_WEIGHTAGE;
+ pMac->roam.configParam.bss_score_params.
+ weight_cfg.chan_width_weightage = CHAN_WIDTH_WEIGHTAGE;
+ pMac->roam.configParam.bss_score_params.
+ weight_cfg.chan_band_weightage = CHAN_BAND_WEIGHTAGE;
+ pMac->roam.configParam.bss_score_params.weight_cfg.nss_weightage =
+ NSS_WEIGHTAGE;
+ pMac->roam.configParam.bss_score_params.weight_cfg.
+ beamforming_cap_weightage = BEAMFORMING_CAP_WEIGHTAGE;
+ pMac->roam.configParam.bss_score_params.weight_cfg.pcl_weightage =
+ PCL_WEIGHT;
+ pMac->roam.configParam.bss_score_params.weight_cfg.
+ channel_congestion_weightage = CHANNEL_CONGESTION_WEIGHTAGE;
}
eCsrBand csr_get_current_band(tHalHandle hHal)
@@ -2488,6 +2510,8 @@
sme_debug("nNeighborLookupRssiThreshold: %d",
pMac->roam.configParam.neighborRoamConfig.
nNeighborLookupRssiThreshold);
+ sme_debug("rssi_thresh_offset_5g: %d",
+ pMac->roam.configParam.neighborRoamConfig.rssi_thresh_offset_5g);
sme_debug("nOpportunisticThresholdDiff: %d",
pMac->roam.configParam.neighborRoamConfig.
nOpportunisticThresholdDiff);
@@ -2624,11 +2648,8 @@
pParam->roam_bg_scan_bad_rssi_thresh;
pMac->roam.configParam.roam_params.bg_scan_client_bitmap =
pParam->roam_bg_scan_client_bitmap;
-
- pMac->roam.configParam.roam_params.bg_scan_bad_rssi_thresh =
- pParam->roam_bg_scan_bad_rssi_thresh;
- pMac->roam.configParam.roam_params.bg_scan_client_bitmap =
- pParam->roam_bg_scan_client_bitmap;
+ pMac->roam.configParam.roam_params.roam_bad_rssi_thresh_offset_2g =
+ pParam->roam_bad_rssi_thresh_offset_2g;
pMac->roam.configParam.scan_adaptive_dwell_mode =
pParam->scan_adaptive_dwell_mode;
@@ -2739,6 +2760,10 @@
pMac->roam.configParam.num_disallowed_aps =
pParam->num_disallowed_aps;
+ qdf_mem_copy(&pMac->roam.configParam.bss_score_params,
+ &pParam->bss_score_params,
+ sizeof(struct sir_score_config));
+
}
return status;
}
@@ -2879,6 +2904,8 @@
cfg_params->roam_params.bg_scan_bad_rssi_thresh;
pParam->roam_bg_scan_client_bitmap =
cfg_params->roam_params.bg_scan_client_bitmap;
+ pParam->roam_bad_rssi_thresh_offset_2g =
+ cfg_params->roam_params.roam_bad_rssi_thresh_offset_2g;
pParam->scan_adaptive_dwell_mode =
cfg_params->scan_adaptive_dwell_mode;
@@ -2994,6 +3021,9 @@
pMac->roam.configParam.rssi_channel_penalization;
pParam->num_disallowed_aps =
pMac->roam.configParam.num_disallowed_aps;
+ qdf_mem_copy(&pParam->bss_score_params,
+ &pMac->roam.configParam.bss_score_params,
+ sizeof(struct sir_score_config));
return QDF_STATUS_SUCCESS;
}
@@ -18732,6 +18762,84 @@
}
/**
+ * csr_update_score_params() - API to update Score params in RSO
+ * @mac_ctx: Mac context
+ * @req_buffer: RSO request buffer
+ *
+ * Return: None
+ */
+static void csr_update_score_params(tpAniSirGlobal mac_ctx,
+ tSirRoamOffloadScanReq *req_buffer)
+{
+ struct scoring_param *req_score_params;
+ struct rssi_scoring *req_rssi_score;
+ struct sir_score_config *bss_score_params;
+ struct sir_weight_config *weight_config;
+ struct sir_rssi_cfg_score *rssi_score;
+
+ req_score_params = &req_buffer->score_params;
+ req_rssi_score = &req_score_params->rssi_scoring;
+
+ bss_score_params = &mac_ctx->roam.configParam.bss_score_params;
+ weight_config = &bss_score_params->weight_cfg;
+ rssi_score = &bss_score_params->rssi_score;
+
+ req_score_params->rssi_weightage = weight_config->rssi_weightage;
+ req_score_params->ht_weightage = weight_config->ht_caps_weightage;
+ req_score_params->vht_weightage = weight_config->vht_caps_weightage;
+ req_score_params->he_weightage = weight_config->he_caps_weightage;
+ req_score_params->bw_weightage = weight_config->chan_width_weightage;
+ req_score_params->band_weightage = weight_config->chan_band_weightage;
+ req_score_params->nss_weightage = weight_config->nss_weightage;
+ req_score_params->esp_qbss_weightage =
+ weight_config->channel_congestion_weightage;
+ req_score_params->beamforming_weightage =
+ weight_config->beamforming_cap_weightage;
+ req_score_params->pcl_weightage =
+ weight_config->pcl_weightage;
+ req_score_params->oce_wan_weightage = weight_config->oce_wan_weightage;
+
+ req_score_params->bw_index_score =
+ bss_score_params->bandwidth_weight_per_index;
+ req_score_params->band_index_score =
+ bss_score_params->band_weight_per_index;
+ req_score_params->nss_index_score =
+ bss_score_params->nss_weight_per_index;
+
+ req_rssi_score->best_rssi_threshold = rssi_score->best_rssi_threshold;
+ req_rssi_score->good_rssi_threshold = rssi_score->good_rssi_threshold;
+ req_rssi_score->bad_rssi_threshold = rssi_score->bad_rssi_threshold;
+ req_rssi_score->good_rssi_pcnt = rssi_score->good_rssi_pcnt;
+ req_rssi_score->bad_rssi_pcnt = rssi_score->bad_rssi_pcnt;
+ req_rssi_score->good_bucket_size = rssi_score->good_rssi_bucket_size;
+ req_rssi_score->bad_bucket_size = rssi_score->bad_rssi_bucket_size;
+ req_rssi_score->rssi_pref_5g_rssi_thresh =
+ rssi_score->rssi_pref_5g_rssi_thresh;
+
+ req_score_params->esp_qbss_scoring.num_slot =
+ bss_score_params->esp_qbss_scoring.num_slot;
+ req_score_params->esp_qbss_scoring.score_pcnt3_to_0 =
+ bss_score_params->esp_qbss_scoring.score_pcnt3_to_0;
+ req_score_params->esp_qbss_scoring.score_pcnt7_to_4 =
+ bss_score_params->esp_qbss_scoring.score_pcnt7_to_4;
+ req_score_params->esp_qbss_scoring.score_pcnt11_to_8 =
+ bss_score_params->esp_qbss_scoring.score_pcnt11_to_8;
+ req_score_params->esp_qbss_scoring.score_pcnt15_to_12 =
+ bss_score_params->esp_qbss_scoring.score_pcnt15_to_12;
+
+ req_score_params->oce_wan_scoring.num_slot =
+ bss_score_params->oce_wan_scoring.num_slot;
+ req_score_params->oce_wan_scoring.score_pcnt3_to_0 =
+ bss_score_params->oce_wan_scoring.score_pcnt3_to_0;
+ req_score_params->oce_wan_scoring.score_pcnt7_to_4 =
+ bss_score_params->oce_wan_scoring.score_pcnt7_to_4;
+ req_score_params->oce_wan_scoring.score_pcnt11_to_8 =
+ bss_score_params->oce_wan_scoring.score_pcnt11_to_8;
+ req_score_params->oce_wan_scoring.score_pcnt15_to_12 =
+ bss_score_params->oce_wan_scoring.score_pcnt15_to_12;
+
+}
+/**
* csr_roam_offload_scan() - populates roam offload scan request and sends to
* WMA
*
@@ -18847,6 +18955,9 @@
req_buf->LookupThreshold =
(int8_t)roam_info->cfgParams.neighborLookupThreshold *
(-1);
+ req_buf->rssi_thresh_offset_5g =
+ roam_info->cfgParams.rssi_thresh_offset_5g;
+ sme_debug("5g offset threshold: %d", req_buf->rssi_thresh_offset_5g);
qdf_mem_copy(roam_params_dst, roam_params_src,
sizeof(*roam_params_dst));
/*
@@ -18883,9 +18994,10 @@
roam_params_dst->traffic_threshold,
roam_params_dst->initial_dense_status,
mac_ctx->scan.roam_candidate_count[session_id]);
- sme_debug("BG Scan Bad RSSI:%d, bitmap:0x%x",
+ sme_debug("BG Scan Bad RSSI:%d, bitmap:0x%x Offset for 2G to 5G Roam %d",
roam_params_dst->bg_scan_bad_rssi_thresh,
- roam_params_dst->bg_scan_client_bitmap);
+ roam_params_dst->bg_scan_client_bitmap,
+ roam_params_dst->roam_bad_rssi_thresh_offset_2g);
for (i = 0; i < roam_params_dst->num_bssid_avoid_list; i++) {
QDF_TRACE(QDF_MODULE_ID_SME, QDF_TRACE_LEVEL_DEBUG,
@@ -18937,6 +19049,7 @@
session->pAddIEAssoc,
session->nAddIEAssocLength);
csr_update_driver_assoc_ies(mac_ctx, session, req_buf);
+ csr_update_score_params(mac_ctx, req_buf);
}
QDF_TRACE(QDF_MODULE_ID_SME, QDF_TRACE_LEVEL_DEBUG,
diff --git a/drivers/staging/qcacld-3.0/core/sme/src/csr/csr_api_scan.c b/drivers/staging/qcacld-3.0/core/sme/src/csr/csr_api_scan.c
index ebf139c..6b4466c 100644
--- a/drivers/staging/qcacld-3.0/core/sme/src/csr/csr_api_scan.c
+++ b/drivers/staging/qcacld-3.0/core/sme/src/csr/csr_api_scan.c
@@ -86,6 +86,8 @@
#define CSR_SCAN_IS_OVER_BSS_LIMIT(pMac) \
((pMac)->scan.nBssLimit <= (csr_ll_count(&(pMac)->scan.scanResultList)))
+#define MIN_11D_AP_COUNT 3
+
static void csr_set_default_scan_timing(tpAniSirGlobal pMac,
tSirScanType scanType,
tCsrScanRequest *pScanRequest);
@@ -1520,6 +1522,7 @@
* This function helps in determining the preference value
* of a particular BSS in the scan result which is further
* used in the sorting logic of the final candidate AP's.
+ * If score is same for both the APs, AP with good rssi is selected.
*
* Return: true, if bss1 is better than bss2
* false, if bss2 is better than bss1.
@@ -1528,9 +1531,18 @@
struct tag_csrscan_result *bss1, struct tag_csrscan_result *bss2)
{
bool ret;
+ int rssi1, rssi2;
+
+ rssi1 = bss1->Result.BssDescriptor.rssi;
+ rssi2 = bss2->Result.BssDescriptor.rssi;
+
if (CSR_IS_BETTER_PREFER_VALUE(bss1->bss_score,
bss2->bss_score))
ret = true;
+ else if (CSR_IS_EQUAL_PREFER_VALUE(bss1->bss_score,
+ bss2->bss_score) &&
+ CSR_IS_BETTER_RSSI(rssi1, rssi2))
+ ret = true;
else
ret = false;
return ret;
@@ -1662,217 +1674,560 @@
}
/**
- * csr_calc_other_rssi_count_weight() - Calculate channel weight based on other
- * APs RSSI and count for candiate selection.
- * @rssi: Best rssi on that channel
- * @count: No. of APs on that channel
+ * csr_get_rssi_pcnt_for_slot () - calculate rssi % score based on the slot
+ * index between the high rssi and low rssi threshold
+ * @high_rssi_threshold: High rssi of the window
+ * @low_rssi_threshold: low rssi of the window
+ * @high_rssi_pcnt: % score for the high rssi
+ * @low_rssi_pcnt: %score for the low rssi
+ * @bucket_size: bucket size of the window
+ * @bss_rssi: Input rssi for which value need to be calculated
*
- * Return : uint32_t
+ * Return : rssi pct to use for the given rssi
*/
-static uint32_t csr_calc_other_rssi_count_weight(int32_t rssi, int32_t count)
+static inline int8_t csr_get_rssi_pcnt_for_slot(int32_t high_rssi_threshold,
+ int32_t low_rssi_threshold, uint32_t high_rssi_pcnt,
+ uint32_t low_rssi_pcnt, uint32_t bucket_size, int8_t bss_rssi)
{
- int32_t rssi_weight = 0;
- int32_t count_weight = 0;
- int32_t rssi_count_weight = 0;
+ int8_t slot_index, slot_size, rssi_diff, num_slot, rssi_pcnt;
- rssi_weight = BEST_CANDIDATE_RSSI_WEIGHT * (rssi - MIN_RSSI);
+ num_slot = ((high_rssi_threshold -
+ low_rssi_threshold) / bucket_size) + 1;
+ slot_size = ((high_rssi_pcnt - low_rssi_pcnt) +
+ (num_slot / 2)) / (num_slot);
+ rssi_diff = high_rssi_threshold - bss_rssi;
+ slot_index = (rssi_diff / bucket_size) + 1;
+ rssi_pcnt = high_rssi_pcnt - (slot_size * slot_index);
+ if (rssi_pcnt < low_rssi_pcnt)
+ rssi_pcnt = low_rssi_pcnt;
- do_div(rssi_weight, (MAX_RSSI - MIN_RSSI));
+ sme_debug("Window %d -> %d pcnt range %d -> %d bucket_size %d bss_rssi %d num_slot %d slot_size %d rssi_diff %d slot_index %d rssi_pcnt %d",
+ high_rssi_threshold, low_rssi_threshold, high_rssi_pcnt,
+ low_rssi_pcnt, bucket_size, bss_rssi, num_slot, slot_size,
+ rssi_diff, slot_index, rssi_pcnt);
- if (rssi_weight > BEST_CANDIDATE_RSSI_WEIGHT)
- rssi_weight = BEST_CANDIDATE_RSSI_WEIGHT;
- else if (rssi_weight < 0)
- rssi_weight = 0;
-
- count_weight = BEST_CANDIDATE_AP_COUNT_WEIGHT *
- (count + BEST_CANDIDATE_MIN_COUNT);
-
- do_div(count_weight,
- (BEST_CANDIDATE_MAX_COUNT + BEST_CANDIDATE_MIN_COUNT));
-
- if (count_weight > BEST_CANDIDATE_AP_COUNT_WEIGHT)
- count_weight = BEST_CANDIDATE_AP_COUNT_WEIGHT;
-
- rssi_count_weight = ROAM_MAX_CHANNEL_WEIGHT -
- (rssi_weight + count_weight);
-
- QDF_TRACE(QDF_MODULE_ID_SME, QDF_TRACE_LEVEL_DEBUG,
- FL("rssi_weight=%d, count_weight=%d, rssi_count_weight=%d rssi=%d count=%d"),
- rssi_weight, count_weight, rssi_count_weight, rssi, count);
-
- return rssi_count_weight;
+ return rssi_pcnt;
}
/**
- * _csr_calculate_bss_score () - Calculate BSS score based on AP capabilty
- * and channel condition for best candidate
- * selection
- * @bss_info: bss information
- * @best_rssi : Best rssi on BSS channel
- * @ap_cnt: No of AP count on BSS channel
- * @pcl_chan_weight: pcl weight of BSS channel
+ * csr_rssi_is_same_bucket () - check if both rssi fall in same bucket
+ * @rssi_top_thresh: high rssi threshold of the the window
+ * @low_rssi_threshold: low rssi of the window
+ * @rssi_ref1: rssi ref one
+ * @rssi_ref2: rssi ref two
+ * @bucket_size: bucket size of the window
*
- * Return : int32_t
+ * Return : true if both fall in same window
*/
-static int32_t _csr_calculate_bss_score(tSirBssDescription *bss_info,
- int32_t best_rssi, int32_t ap_cnt, int pcl_chan_weight)
+static inline bool csr_rssi_is_same_bucket(int8_t rssi_top_thresh,
+ int8_t rssi_ref1, int8_t rssi_ref2, int8_t bucket_size)
{
- int32_t score = 0;
- int32_t ap_load = 0;
- int32_t normalised_width = BEST_CANDIDATE_20MHZ;
- int32_t normalised_rssi = 0;
- int32_t channel_weight;
- int32_t pcl_score = 0;
- int32_t modified_rssi = 0;
- int32_t temp_pcl_chan_weight = 0;
+ int8_t rssi_diff1 = 0;
+ int8_t rssi_diff2 = 0;
+
+ rssi_diff1 = rssi_top_thresh - rssi_ref1;
+ rssi_diff2 = rssi_top_thresh - rssi_ref2;
+
+ return (rssi_diff1 / bucket_size) == (rssi_diff2 / bucket_size);
+}
+
+/**
+ * csr_calculate_rssi_score () - Calculate RSSI score based on AP RSSI
+ * @score_param: rssi score params
+ * @bss_info: bss information
+ * @rssi_weightage: rssi_weightage out of total weightage
+ *
+ * Return : rssi score
+ */
+static int32_t csr_calculate_rssi_score(struct sir_rssi_cfg_score *score_param,
+ tSirBssDescription *bss_info, uint8_t rssi_weightage)
+{
+ int8_t rssi_pcnt;
+ int32_t total_rssi_score;
+ int32_t best_rssi_threshold;
+ int32_t good_rssi_threshold;
+ int32_t bad_rssi_threshold;
+ uint32_t good_rssi_pcnt;
+ uint32_t bad_rssi_pcnt;
+ uint32_t good_bucket_size;
+ uint32_t bad_bucket_size;
+
+ best_rssi_threshold = score_param->best_rssi_threshold*(-1);
+ good_rssi_threshold = score_param->good_rssi_threshold*(-1);
+ bad_rssi_threshold = score_param->bad_rssi_threshold*(-1);
+ good_rssi_pcnt = score_param->good_rssi_pcnt;
+ bad_rssi_pcnt = score_param->bad_rssi_pcnt;
+ good_bucket_size = score_param->good_rssi_bucket_size;
+ bad_bucket_size = score_param->bad_rssi_bucket_size;
+ sme_debug("best_rssi_threshold %d good_rssi_threshold %d bad_rssi_threshold %d good_rssi_pcnt %d bad_rssi_pcnt %d good_bucket_size %d bad_bucket_size %d",
+ best_rssi_threshold, good_rssi_threshold,
+ bad_rssi_threshold, good_rssi_pcnt,
+ bad_rssi_pcnt, good_bucket_size,
+ bad_bucket_size);
+
+ total_rssi_score = (BEST_CANDIDATE_MAX_WEIGHT * rssi_weightage);
/*
- * Total weight of a BSSID is calculated on basis of 100 in which
- * contribution of every factor is considered like this.
- * RSSI: RSSI_WEIGHTAGE : 25
- * HT_CAPABILITY_WEIGHTAGE: 7
- * VHT_CAP_WEIGHTAGE: 5
- * BEAMFORMING_CAP_WEIGHTAGE: 2
- * CHAN_WIDTH_WEIGHTAGE:10
- * CHAN_BAND_WEIGHTAGE: 5
- * CCA_WEIGHTAGE: 8
- * OTHER_AP_WEIGHT: 28
- * PCL: 10
- *
- * Rssi_weightage is again divided in another factors like if Rssi is
- * very good, very less or medium.
- * According to this FR, best rssi is being considered as -40.
- * EXCELLENT_RSSI = -40
- * GOOD_RSSI = -55
- * POOR_RSSI = -65
- * BAD_RSSI = -80
- *
- * And weightage to all the RSSI type is given like this.
- * ROAM_EXCELLENT_RSSI_WEIGHT = 100
- * ROAM_GOOD_RSSI_WEIGHT = 80
- * ROAM_BAD_RSSI_WEIGHT = 60
- *
+ * If RSSI is better than the best rssi threshold then it return full
+ * score.
*/
+ if (bss_info->rssi > best_rssi_threshold)
+ return total_rssi_score;
+ /*
+ * If RSSI is less or equal to bad rssi threshold then it return
+ * least score.
+ */
+ if (bss_info->rssi <= bad_rssi_threshold)
+ return (total_rssi_score * bad_rssi_pcnt) / 100;
- if (bss_info->rssi) {
- /*
- * if RSSI of AP is less then -80, driver should ignore that
- * candidate.
- */
- if (bss_info->rssi < BAD_RSSI) {
- QDF_TRACE(QDF_MODULE_ID_SME, QDF_TRACE_LEVEL_DEBUG,
- FL("Drop this BSS "MAC_ADDRESS_STR " due to low rssi %d"),
- MAC_ADDR_ARRAY(bss_info->bssId), bss_info->rssi);
- score = 0;
- return score;
- }
- /* If BSS is in PCL list, give a boost of -20dbm */
- if (pcl_chan_weight)
- modified_rssi = bss_info->rssi + PCL_ADVANTAGE;
- else
- modified_rssi = bss_info->rssi;
- /*
- * Calculate % of rssi we are getting
- * max = 100
- * min = 0
- * less than -40 = 100%
- * -40 - -55 = 80%
- * -55 - -65 = 60%
- * below that = 100 - value
- **/
- if (modified_rssi >= BEST_CANDIDATE_EXCELLENT_RSSI)
- normalised_rssi = BEST_CANDIDATE_EXCELLENT_RSSI_WEIGHT;
- else if (modified_rssi >= BEST_CANDIDATE_GOOD_RSSI)
- normalised_rssi = BEST_CANDIDATE_GOOD_RSSI_WEIGHT;
- else if (modified_rssi >= BEST_CANDIDATE_POOR_RSSI)
- normalised_rssi = BEST_CANDIDATE_BAD_RSSI_WEIGHT;
- else
- normalised_rssi = modified_rssi - MIN_RSSI;
+ /* RSSI lies between best to good rssi threshold */
+ if (bss_info->rssi > good_rssi_threshold)
+ rssi_pcnt = csr_get_rssi_pcnt_for_slot(best_rssi_threshold,
+ good_rssi_threshold, 100, good_rssi_pcnt,
+ good_bucket_size, bss_info->rssi);
+ else
+ rssi_pcnt = csr_get_rssi_pcnt_for_slot(good_rssi_threshold,
+ bad_rssi_threshold, good_rssi_pcnt,
+ bad_rssi_pcnt, bad_bucket_size,
+ bss_info->rssi);
- /* Calculate score part for rssi */
- score += (normalised_rssi * RSSI_WEIGHTAGE);
- }
- /* If BSS is in PCL list extra pcl Weight is added n % */
+ return (total_rssi_score * rssi_pcnt) / 100;
+
+}
+
+/**
+ * csr_roam_calculate_prorated_pcnt_by_rssi () - Calculate prorated RSSI score
+ * based on AP RSSI. This will be used to determine HT VHT score
+ * @score_param: rssi score params
+ * @bss_info: bss information
+ * @rssi_weightage: rssi_weightage out of total weightage
+ *
+ * If rssi is greater than good threshold return 100, if less than bad return 0,
+ * if between good and bad, return prorated rssi score for the index.
+ *
+ * Return : rssi prorated score
+ */
+static int8_t csr_roam_calculate_prorated_pcnt_by_rssi(
+ struct sir_rssi_cfg_score *score_param,
+ tSirBssDescription *bss_info, uint8_t rssi_weightage)
+{
+ int32_t good_rssi_threshold;
+ int32_t bad_rssi_threshold;
+ int8_t rssi_pref_5g_rssi_thresh;
+ bool same_bucket;
+
+ good_rssi_threshold = score_param->good_rssi_threshold * (-1);
+ bad_rssi_threshold = score_param->bad_rssi_threshold * (-1);
+ rssi_pref_5g_rssi_thresh = score_param->rssi_pref_5g_rssi_thresh * (-1);
+
+ /* If RSSI is greater than good rssi return full weight */
+ if (bss_info->rssi > good_rssi_threshold)
+ return BEST_CANDIDATE_MAX_WEIGHT;
+
+ same_bucket = csr_rssi_is_same_bucket(good_rssi_threshold,
+ bss_info->rssi, rssi_pref_5g_rssi_thresh,
+ score_param->bad_rssi_bucket_size);
+ if (same_bucket || (bss_info->rssi < rssi_pref_5g_rssi_thresh))
+ return 0;
+ /* If RSSI is less or equal to bad rssi threshold then it return 0 */
+ if (bss_info->rssi <= bad_rssi_threshold)
+ return 0;
+
+ /* If RSSI is between good and bad threshold */
+ return csr_get_rssi_pcnt_for_slot(good_rssi_threshold,
+ bad_rssi_threshold,
+ score_param->good_rssi_pcnt,
+ score_param->bad_rssi_pcnt,
+ score_param->bad_rssi_bucket_size,
+ bss_info->rssi);
+}
+
+/**
+ * csr_calculate_pcl_score () - Calculate PCL score based on PCL weightage
+ * @mac_ctx: Pointer to mac context
+ * @bss_info: bss information
+ * @pcl_chan_weight: pcl weight of BSS channel
+ * @pcl_weightage: PCL _weightage out of total weightage
+ *
+ * Return : pcl score
+ */
+static int32_t csr_calculate_pcl_score(tpAniSirGlobal mac_ctx,
+ tSirBssDescription *bss_info, int pcl_chan_weight,
+ uint8_t pcl_weightage)
+{
+ int32_t pcl_score = 0;
+ int32_t score = 0;
+ int32_t temp_pcl_chan_weight = 0;
+
if (pcl_chan_weight) {
temp_pcl_chan_weight =
(MAX_WEIGHT_OF_PCL_CHANNELS - pcl_chan_weight);
do_div(temp_pcl_chan_weight,
PCL_GROUPS_WEIGHT_DIFFERENCE);
- pcl_score = PCL_WEIGHT - temp_pcl_chan_weight;
+ pcl_score = pcl_weightage -
+ temp_pcl_chan_weight;
if (pcl_score < 0)
pcl_score = 0;
score += pcl_score * BEST_CANDIDATE_MAX_WEIGHT;
}
- /* If AP supports HT caps, extra 10% score will be added */
- if (bss_info->ht_caps_present)
- score += BEST_CANDIDATE_MAX_WEIGHT * HT_CAPABILITY_WEIGHTAGE;
+ return score;
- /* If AP supports VHT caps, Extra 6% score will be added to score */
- if (bss_info->vht_caps_present)
- score += BEST_CANDIDATE_MAX_WEIGHT * VHT_CAP_WEIGHTAGE;
+}
- /* If AP supports beam forming, extra 2% score will be added to score.*/
- if (bss_info->beacomforming_capable)
- score += BEST_CANDIDATE_MAX_WEIGHT * BEAMFORMING_CAP_WEIGHTAGE;
+/**
+ * csr_calculate_bandwidth_score () - Calculate BW score
+ * @mac_ctx: Pointer to mac context
+ * @bss_info: bss information
+ * @chan_width_weightage: PCL _weightage out of total weightage
+ *
+ * Return : bw score
+ */
+static int32_t csr_calculate_bandwidth_score(tpAniSirGlobal mac_ctx,
+ tSirBssDescription *bss_info,
+ uint8_t chan_width_weightage)
+{
+ uint32_t score;
+ int32_t bw_weight_per_idx;
+ uint8_t cbmode;
- /*
- * Channel width is again calculated on basis of 100.
- * Where if AP is
- * 80MHZ = 100
- * 40MHZ = 70
- * 20MHZ = 30 weightage is given out of 100.
- * Channel width weightage is given as CHAN_WIDTH_WEIGHTAGE (10%).
- */
- if (bss_info->chan_width == eHT_CHANNEL_WIDTH_80MHZ)
- normalised_width = BEST_CANDIDATE_80MHZ;
- else if (bss_info->chan_width == eHT_CHANNEL_WIDTH_40MHZ)
- normalised_width = BEST_CANDIDATE_40MHZ;
- else
- normalised_width = BEST_CANDIDATE_20MHZ;
- score += normalised_width * CHAN_WIDTH_WEIGHTAGE;
-
- /* If AP is on 5Ghz channel , extra score of 5% is added to BSS score.*/
- if (get_rf_band(bss_info->channelId) == SIR_BAND_5_GHZ &&
- bss_info->rssi > RSSI_THRESHOLD_5GHZ)
- score += BEST_CANDIDATE_MAX_WEIGHT * CHAN_BAND_WEIGHTAGE;
-
- /*
- * If QBSS load is present, extra CCA weightage is given on based of AP
- * load as 10%.
- */
- if (bss_info->QBSSLoad_present) {
- /* calculate value in % */
- ap_load = (bss_info->qbss_chan_load *
- BEST_CANDIDATE_MAX_WEIGHT);
- do_div(ap_load, MAX_AP_LOAD);
+ bw_weight_per_idx = mac_ctx->roam.configParam.
+ bss_score_params.bandwidth_weight_per_index;
+ if (CDS_IS_CHANNEL_24GHZ(bss_info->channelId)) {
+ cbmode =
+ mac_ctx->roam.configParam.channelBondingMode24GHz;
+ } else {
+ cbmode =
+ mac_ctx->roam.configParam.channelBondingMode5GHz;
}
+
+ if (cbmode) {
+ if (bss_info->chan_width == eHT_CHANNEL_WIDTH_160MHZ)
+ score = WLAN_GET_SCORE_PERCENTAGE(bw_weight_per_idx,
+ WLAN_SCORE_160MHZ_BW_INDEX);
+ else if (bss_info->chan_width == eHT_CHANNEL_WIDTH_80MHZ)
+ score = WLAN_GET_SCORE_PERCENTAGE(bw_weight_per_idx,
+ WLAN_SCORE_80MHZ_BW_INDEX);
+ else if (bss_info->chan_width == eHT_CHANNEL_WIDTH_40MHZ)
+ score = WLAN_GET_SCORE_PERCENTAGE(bw_weight_per_idx,
+ WLAN_SCORE_40MHZ_BW_INDEX);
+ else
+ score = WLAN_GET_SCORE_PERCENTAGE(bw_weight_per_idx,
+ WLAN_20MHZ_BW_INDEX);
+ } else {
+ score = WLAN_GET_SCORE_PERCENTAGE(bw_weight_per_idx,
+ WLAN_20MHZ_BW_INDEX);
+ }
+
+ return score * chan_width_weightage;
+}
+
+/**
+ * csr_get_congestion_score_for_index () - get congetion score for the given
+ * index
+ * @index: index for which we need the score
+ * @bss_score_params: bss score params
+ *
+ * Return : congestion score for the index
+ */
+static int32_t csr_get_congestion_score_for_index(uint8_t index,
+ struct sir_score_config *bss_score_params)
+{
+ if (index <= WLAN_ESP_QBSS_INDEX_3)
+ return bss_score_params->weight_cfg.
+ channel_congestion_weightage *
+ WLAN_GET_SCORE_PERCENTAGE(
+ bss_score_params->esp_qbss_scoring.score_pcnt3_to_0,
+ index);
+ else if (index <= WLAN_ESP_QBSS_INDEX_7)
+ return bss_score_params->weight_cfg.
+ channel_congestion_weightage *
+ WLAN_GET_SCORE_PERCENTAGE(
+ bss_score_params->esp_qbss_scoring.score_pcnt7_to_4,
+ index - WLAN_ESP_QBSS_OFFSET_INDEX_7_4);
+ else if (index <= WLAN_ESP_QBSS_INDEX_11)
+ return bss_score_params->weight_cfg.
+ channel_congestion_weightage *
+ WLAN_GET_SCORE_PERCENTAGE(
+ bss_score_params->esp_qbss_scoring.score_pcnt11_to_8,
+ index - WLAN_ESP_QBSS_OFFSET_INDEX_11_8);
+ else
+ return bss_score_params->weight_cfg.
+ channel_congestion_weightage *
+ WLAN_GET_SCORE_PERCENTAGE(
+ bss_score_params->esp_qbss_scoring.score_pcnt15_to_12,
+ index - WLAN_ESP_QBSS_OFFSET_INDEX_15_12);
+}
+/**
+ * csr_calculate_congestion_score () - Calculate congestion score
+ * @mac_ctx: Pointer to mac context
+ * @bss_info: bss information
+ * @bss_score_params: bss score params
+ *
+ * Return : congestion score
+ */
+static int32_t csr_calculate_congestion_score(tpAniSirGlobal mac_ctx,
+ tSirBssDescription *bss_info,
+ struct sir_score_config *bss_score_params)
+{
+ uint32_t ap_load = 0;
+ uint32_t est_air_time_percentage = 0;
+ uint32_t congestion = 0;
+ uint32_t window_size;
+ uint8_t index;
+ int32_t good_rssi_threshold;
+
+ if (!bss_score_params->esp_qbss_scoring.num_slot)
+ return 0;
+
+ if (bss_score_params->esp_qbss_scoring.num_slot >
+ WLAN_ESP_QBSS_MAX_INDEX)
+ bss_score_params->esp_qbss_scoring.num_slot =
+ WLAN_ESP_QBSS_MAX_INDEX;
+
+ good_rssi_threshold =
+ bss_score_params->rssi_score.good_rssi_threshold * (-1);
+
+ /* For bad zone rssi get score from last index */
+ if (bss_info->rssi < good_rssi_threshold)
+ return csr_get_congestion_score_for_index(
+ bss_score_params->esp_qbss_scoring.num_slot,
+ bss_score_params);
+
+ if (bss_info->air_time_fraction) {
+ /* Convert 0-255 range to percentage */
+ est_air_time_percentage =
+ bss_info->air_time_fraction *
+ ROAM_MAX_CHANNEL_WEIGHT;
+ est_air_time_percentage =
+ qdf_do_div(est_air_time_percentage,
+ MAX_ESTIMATED_AIR_TIME_FRACTION);
+ /*
+ * Calculate channel congestion from estimated air time
+ * fraction.
+ */
+ congestion = MAX_CHANNEL_UTILIZATION -
+ est_air_time_percentage;
+ } else if (bss_info->QBSSLoad_present) {
+ ap_load = (bss_info->qbss_chan_load *
+ BEST_CANDIDATE_MAX_WEIGHT);
+ /*
+ * Calculate ap_load in % from qbss channel load from
+ * 0-255 range
+ */
+ congestion = qdf_do_div(ap_load, MAX_AP_LOAD);
+ } else {
+ return bss_score_params->weight_cfg.
+ channel_congestion_weightage *
+ WLAN_GET_SCORE_PERCENTAGE(
+ bss_score_params->esp_qbss_scoring.score_pcnt3_to_0,
+ WLAN_ESP_QBSS_INDEX_0);
+ }
+
+ window_size = BEST_CANDIDATE_MAX_WEIGHT /
+ bss_score_params->esp_qbss_scoring.num_slot;
+
+ /* Desired values are from 1 to 15, as 0 is for not present. so do +1 */
+ index = qdf_do_div(congestion, window_size) + 1;
+
+ if (index > bss_score_params->esp_qbss_scoring.num_slot)
+ index = bss_score_params->esp_qbss_scoring.num_slot;
+
+ return csr_get_congestion_score_for_index(index, bss_score_params);
+}
+
+/**
+ * csr_calculate_nss_score () - Calculate congestion score
+ * @sta_nss: sta nss supported
+ * @ap_nss: AP nss supported
+ * @nss_weight_per_index: nss wight per index
+ * @nss_weightage: weightage for nss
+ *
+ * Return : nss score
+ */
+static int32_t csr_calculate_nss_score(uint8_t sta_nss, uint8_t ap_nss,
+ uint32_t nss_weight_per_index, uint8_t nss_weightage)
+{
+ uint8_t nss;
+
+ if (wma_is_current_hwmode_dbs())
+ sta_nss--;
+
+ nss = ap_nss;
+ if (sta_nss < nss)
+ nss = sta_nss;
+
+ if (nss == 4)
+ return nss_weightage *
+ WLAN_GET_SCORE_PERCENTAGE(nss_weight_per_index,
+ WLAN_NSS_4x4_INDEX);
+ else if (nss == 3)
+ return nss_weightage *
+ WLAN_GET_SCORE_PERCENTAGE(nss_weight_per_index,
+ WLAN_NSS_3x3_INDEX);
+ else if (nss == 2)
+ return nss_weightage *
+ WLAN_GET_SCORE_PERCENTAGE(nss_weight_per_index,
+ WLAN_NSS_2x2_INDEX);
+ else
+ return nss_weightage *
+ WLAN_GET_SCORE_PERCENTAGE(nss_weight_per_index,
+ WLAN_NSS_1x1_INDEX);
+}
+
+/**
+ * _csr_calculate_bss_score () - Calculate BSS score based on AP capabilty
+ * and channel condition for best candidate
+ * selection
+ * @mac_ctx: Pointer to mac context
+ * @bss_info: bss information
+ * @pcl_chan_weight: pcl weight of BSS channel
+ * @nss: NSS supported by station
+ *
+ * Return : int32_t
+ */
+static int32_t _csr_calculate_bss_score(tpAniSirGlobal mac_ctx,
+ tSirBssDescription *bss_info,
+ int pcl_chan_weight, uint8_t sta_nss)
+{
+ int32_t score = 0;
+ int32_t rssi_score;
+ int32_t ht_score = 0;
+ int32_t vht_score = 0;
+ int32_t beamformee_score = 0;
+ int32_t nss_score = 0;
+ int32_t congestion_score = 0;
+ int32_t pcl_score = 0;
+ int32_t bandwidth_score = 0;
+ int32_t band_score = 0;
+ struct sir_weight_config *weight_config;
+ uint32_t beamformee_cap;
+ uint32_t dot11mode;
+ struct sir_score_config *bss_score_params;
+ uint8_t prorated_pcnt;
+ bool same_bucket = false;
+ int8_t good_rssi_threshold;
+ int8_t rssi_pref_5g_rssi_thresh;
+
/*
- * If doesn't announce its ap load, driver will take it as 50% of
- * CCA_WEIGHTAGE
+ * Total weight of a BSSID is calculated on basis of 100 in which
+ * contribution of every factor is considered like this(default).
+ * RSSI: RSSI_WEIGHTAGE : 25
+ * HT_CAPABILITY_WEIGHTAGE: 7
+ * VHT_CAP_WEIGHTAGE: 5
+ * BEAMFORMING_CAP_WEIGHTAGE: 2
+ * CHAN_WIDTH_WEIGHTAGE:10
+ * CHAN_BAND_WEIGHTAGE: 5
+ * NSS: 5
+ * PCL: 10
+ * CHANNEL_CONGESTION: 5
+ * Reserved : 31
*/
- if (ap_load)
- score += (MAX_CHANNEL_UTILIZATION - ap_load) * CCA_WEIGHTAGE;
- else
- score += DEFAULT_CHANNEL_UTILIZATION * CCA_WEIGHTAGE;
+ bss_score_params = &mac_ctx->roam.configParam.bss_score_params;
+ weight_config = &bss_score_params->weight_cfg;
- if (ap_cnt == 0)
- channel_weight = ROAM_MAX_CHANNEL_WEIGHT;
- else
- channel_weight = csr_calc_other_rssi_count_weight(
- best_rssi, ap_cnt);
+ rssi_score = csr_calculate_rssi_score(&bss_score_params->rssi_score,
+ bss_info, weight_config->rssi_weightage);
+ score += rssi_score;
- score += channel_weight * OTHER_AP_WEIGHT;
- QDF_TRACE(QDF_MODULE_ID_SME, QDF_TRACE_LEVEL_DEBUG,
- FL("BSSID:"MAC_ADDRESS_STR" rssi=%d normalized_rssi=%d htcaps=%d vht=%d bw=%d channel=%d beamforming=%d ap_load=%d channel_weight=%d pcl_score %d Final Score %d "),
+ pcl_score = csr_calculate_pcl_score(mac_ctx, bss_info,
+ pcl_chan_weight, weight_config->pcl_weightage);
+ score += pcl_score;
+
+ dot11mode = csr_translate_to_wni_cfg_dot11_mode(mac_ctx,
+ mac_ctx->roam.configParam.uCfgDot11Mode);
+ prorated_pcnt = csr_roam_calculate_prorated_pcnt_by_rssi(
+ &bss_score_params->rssi_score, bss_info,
+ weight_config->rssi_weightage);
+ /* If device and AP supports HT caps, extra 10% score will be added */
+ if (IS_DOT11_MODE_HT(dot11mode) && bss_info->ht_caps_present)
+ ht_score = prorated_pcnt *
+ weight_config->ht_caps_weightage;
+ score += ht_score;
+
+ /*
+ * If device and AP supports VHT caps, Extra 6% score will
+ * be added to score
+ */
+ if (IS_DOT11_MODE_VHT(dot11mode) && bss_info->vht_caps_present)
+ vht_score = prorated_pcnt *
+ weight_config->vht_caps_weightage;
+ score += vht_score;
+
+ bandwidth_score = csr_calculate_bandwidth_score(mac_ctx, bss_info,
+ weight_config->chan_width_weightage);
+ score += bandwidth_score;
+
+ good_rssi_threshold =
+ bss_score_params->rssi_score.good_rssi_threshold * (-1);
+ rssi_pref_5g_rssi_thresh =
+ bss_score_params->rssi_score.rssi_pref_5g_rssi_thresh * (-1);
+ if (bss_info->rssi < good_rssi_threshold)
+ same_bucket = csr_rssi_is_same_bucket(good_rssi_threshold,
+ bss_info->rssi, rssi_pref_5g_rssi_thresh,
+ bss_score_params->rssi_score.
+ bad_rssi_bucket_size);
+
+ /*
+ * If AP is on 5Ghz channel , extra weigtage is added to BSS score.
+ * if RSSI is greater tha 5g rssi threshold or fall in same bucket.
+ * else give weigtage to 2.4 GH.
+ */
+ if ((bss_info->rssi > rssi_pref_5g_rssi_thresh) && !same_bucket) {
+ if (CDS_IS_CHANNEL_5GHZ(bss_info->channelId))
+ band_score = weight_config->chan_band_weightage *
+ WLAN_GET_SCORE_PERCENTAGE(
+ bss_score_params->band_weight_per_index,
+ WLAN_BAND_5G_INDEX);
+ } else if (CDS_IS_CHANNEL_24GHZ(bss_info->channelId)) {
+ band_score = weight_config->chan_band_weightage *
+ WLAN_GET_SCORE_PERCENTAGE(
+ bss_score_params->band_weight_per_index,
+ WLAN_BAND_2G_INDEX);
+ }
+
+ score += band_score;
+
+ /*
+ * If device and AP supports beam forming, extra 2% score
+ * will be added to score.
+ */
+ wlan_cfg_get_int(mac_ctx,
+ WNI_CFG_VHT_SU_BEAMFORMEE_CAP, &beamformee_cap);
+ if ((bss_info->rssi > rssi_pref_5g_rssi_thresh) && !same_bucket &&
+ beamformee_cap && bss_info->beacomforming_capable)
+ beamformee_score = BEST_CANDIDATE_MAX_WEIGHT *
+ weight_config->beamforming_cap_weightage;
+ score += beamformee_score;
+ congestion_score = csr_calculate_congestion_score(mac_ctx,
+ bss_info, bss_score_params);
+ score += congestion_score;
+ /*
+ * If station support nss as 2*2 but AP support NSS as 1*1,
+ * this AP will be given half weight compare to AP which are having
+ * NSS as 2*2.
+ */
+ nss_score = csr_calculate_nss_score(sta_nss, bss_info->nss,
+ bss_score_params->nss_weight_per_index,
+ weight_config->nss_weightage);
+ score += nss_score;
+
+ sme_debug("BSSID:"MAC_ADDRESS_STR" rssi=%d dot11mode %d htcaps=%d vht=%d AP bw=%d channel=%d self beamformee %d AP beamforming %d air time fraction %d qbss load %d ap_NSS %d sta nss %d",
MAC_ADDR_ARRAY(bss_info->bssId),
- bss_info->rssi, normalised_rssi, bss_info->ht_caps_present,
+ bss_info->rssi, dot11mode, bss_info->ht_caps_present,
bss_info->vht_caps_present, bss_info->chan_width,
- bss_info->channelId,
- bss_info->beacomforming_capable, ap_load, channel_weight,
- pcl_score,
- score);
+ bss_info->channelId, beamformee_cap,
+ bss_info->beacomforming_capable,
+ bss_info->air_time_fraction,
+ bss_info->qbss_chan_load,
+ bss_info->nss, sta_nss);
+
+ sme_debug("Scores : rssi %d pcl %d prorated_pcnt %d ht %d vht %d beamformee %d bw %d band %d congestion %d nss %d TOTAL score %d",
+ rssi_score, pcl_score, prorated_pcnt, ht_score, vht_score,
+ beamformee_score, bandwidth_score, band_score,
+ congestion_score, nss_score, score);
+
return score;
}
/**
@@ -1892,17 +2247,16 @@
{
int32_t score = 0;
int channel_id;
+ uint8_t nss = 1;
tSirBssDescription *bss_info = &(pBss->Result.BssDescriptor);
channel_id = cds_get_channel_enum(pBss->Result.BssDescriptor.channelId);
-
+ if (pMac->roam.configParam.enable2x2)
+ nss = 2;
if (channel_id < NUM_CHANNELS)
- score = _csr_calculate_bss_score(bss_info,
- pMac->candidate_channel_info[channel_id].
- max_rssi_on_channel,
- pMac->candidate_channel_info[channel_id].
- other_ap_count, pcl_chan_weight);
+ score = _csr_calculate_bss_score(pMac, bss_info,
+ pcl_chan_weight, nss);
pBss->bss_score = score;
return;
@@ -2020,85 +2374,6 @@
(*count)++;
return status;
}
-/**
- * csr_calculate_other_ap_count_n_rssi() - calculate channel load on matching
- * profile's channel
- * @mac_ctx: Global MAC Context pointer.
- * @pFilter: Filter to select candidate.
- *
- * This function processes scan list and match scan results
- * with candidate profile. If some scan result doesn't match
- * with candidate profile, this function calculates no of
- * those AP and best RSSI on that candidate's channel.
- *
- * Return : void
- */
-static void csr_calculate_other_ap_count_n_rssi(tpAniSirGlobal pMac,
- tCsrScanResultFilter *pFilter)
-{
- QDF_STATUS status = QDF_STATUS_SUCCESS;
- tListElem *entry;
- bool match = false;
- struct tag_csrscan_result *bss = NULL;
- tDot11fBeaconIEs *ies, *new_ies = NULL;
- int channel_id;
-
- csr_ll_lock(&pMac->scan.scanResultList);
- entry = csr_ll_peek_head(&pMac->scan.scanResultList, LL_ACCESS_NOLOCK);
- while (entry) {
- bss = GET_BASE_ADDR(entry, struct tag_csrscan_result, Link);
- ies = (tDot11fBeaconIEs *) (bss->Result.pvIes);
- match = false;
- new_ies = NULL;
-
- status = csr_save_ies(pMac, pFilter, bss, &new_ies,
- &match, NULL, NULL, NULL);
- if (new_ies)
- qdf_mem_free(new_ies);
- if (!QDF_IS_STATUS_SUCCESS(status)) {
- sme_info("save ies fail");
- break;
- }
- /*
- * Calculate Count of APs and Best Rssi on matching profile's
- * channel. This information will be used to calculate
- * total congestion on that channel.
- */
- if (!match) {
- channel_id = cds_get_channel_enum(
- bss->Result.BssDescriptor.channelId);
- if (channel_id >= NUM_CHANNELS) {
- sme_err("Invalid channel");
- return;
- }
- QDF_TRACE(QDF_MODULE_ID_SME, QDF_TRACE_LEVEL_DEBUG,
- FL("channel_id %d ssid %s rssi %d mac address "MAC_ADDRESS_STR),
- bss->Result.BssDescriptor.channelId,
- bss->Result.ssId.ssId,
- bss->Result.BssDescriptor.rssi,
- MAC_ADDR_ARRAY(
- bss->Result.BssDescriptor.bssId));
- pMac->candidate_channel_info[channel_id].
- other_ap_count++;
- if (pMac->candidate_channel_info[channel_id].
- max_rssi_on_channel >= 0)
- pMac->candidate_channel_info[channel_id].
- max_rssi_on_channel = -127;
- if ((bss->Result.BssDescriptor.rssi >
- pMac->candidate_channel_info[channel_id].
- max_rssi_on_channel))
- pMac->candidate_channel_info[channel_id].
- max_rssi_on_channel =
- bss->Result.BssDescriptor.rssi;
- }
- entry = csr_ll_next(&pMac->scan.scanResultList, entry,
- LL_ACCESS_NOLOCK);
-
- }
- csr_ll_unlock(&pMac->scan.scanResultList);
- return;
-}
-
static QDF_STATUS
csr_parse_scan_results(tpAniSirGlobal pMac,
@@ -2176,11 +2451,6 @@
if (phResult)
*phResult = CSR_INVALID_SCANRESULT_HANDLE;
- qdf_mem_set(&pMac->candidate_channel_info,
- sizeof(struct candidate_chan_info), 0);
- if (pFilter)
- csr_calculate_other_ap_count_n_rssi(pMac, pFilter);
-
pRetList = qdf_mem_malloc(sizeof(struct scan_result_list));
if (NULL == pRetList) {
sme_err("pRetList is NULL");
@@ -3123,12 +3393,90 @@
} /* end of loop */
}
+
+static bool is_us_country(uint8_t *country_code)
+{
+ if ((country_code[0] == 'U') &&
+ (country_code[1] == 'S'))
+ return true;
+
+ return false;
+}
+
+static bool is_world_mode(uint8_t *country_code)
+{
+ if ((country_code[0] == '0') &&
+ (country_code[1] == '0'))
+ return true;
+
+ return false;
+}
+
+static bool csr_elected_country_algo_fcc(tpAniSirGlobal mac_ctx)
+{
+ bool ctry_11d_found = false;
+ uint8_t max_votes = 0;
+ uint8_t i = 0;
+ uint8_t ctry_index;
+
+ if (!mac_ctx->scan.countryCodeCount) {
+ sme_warn("No AP with 11d Country code is present in scan list");
+ return ctry_11d_found;
+ }
+
+ max_votes = mac_ctx->scan.votes11d[0].votes;
+ if (is_us_country(mac_ctx->scan.votes11d[0].countryCode)) {
+ ctry_11d_found = true;
+ ctry_index = 0;
+ goto algo_done;
+ } else if (max_votes >= MIN_11D_AP_COUNT) {
+ ctry_11d_found = true;
+ ctry_index = 0;
+ }
+
+ for (i = 1; i < mac_ctx->scan.countryCodeCount; i++) {
+ if (is_us_country(mac_ctx->scan.votes11d[i].countryCode)) {
+ ctry_11d_found = true;
+ ctry_index = i;
+ goto algo_done;
+ }
+
+ if ((max_votes < mac_ctx->scan.votes11d[i].votes) &&
+ (mac_ctx->scan.votes11d[i].votes >= MIN_11D_AP_COUNT)) {
+ sme_debug(" Votes for Country %c%c : %d",
+ mac_ctx->scan.votes11d[i].countryCode[0],
+ mac_ctx->scan.votes11d[i].countryCode[1],
+ mac_ctx->scan.votes11d[i].votes);
+ max_votes = mac_ctx->scan.votes11d[i].votes;
+ ctry_index = i;
+ ctry_11d_found = true;
+ }
+ }
+
+algo_done:
+
+ if (ctry_11d_found) {
+ qdf_mem_copy(mac_ctx->scan.countryCodeElected,
+ mac_ctx->scan.votes11d[ctry_index].countryCode,
+ WNI_CFG_COUNTRY_CODE_LEN);
+
+ sme_debug("Selected Country is %c%c With count %d",
+ mac_ctx->scan.votes11d[ctry_index].countryCode[0],
+ mac_ctx->scan.votes11d[ctry_index].countryCode[1],
+ mac_ctx->scan.votes11d[ctry_index].votes);
+ }
+
+ return ctry_11d_found;
+}
+
+
static void csr_move_temp_scan_results_to_main_list(tpAniSirGlobal pMac,
uint8_t reason,
uint8_t sessionId)
{
tCsrRoamSession *pSession;
uint32_t i;
+ bool found_11d_ctry = false;
/* remove the BSS descriptions from temporary list */
csr_remove_from_tmp_list(pMac, reason, sessionId);
@@ -3148,7 +3496,14 @@
return;
}
}
- if (csr_elected_country_info(pMac))
+
+ if (is_us_country(pMac->scan.countryCodeCurrent) ||
+ is_world_mode(pMac->scan.countryCodeCurrent))
+ found_11d_ctry = csr_elected_country_algo_fcc(pMac);
+ else
+ found_11d_ctry = csr_elected_country_info(pMac);
+
+ if (found_11d_ctry)
csr_learn_11dcountry_information(pMac, NULL, NULL, true);
}
diff --git a/drivers/staging/qcacld-3.0/core/sme/src/csr/csr_inside_api.h b/drivers/staging/qcacld-3.0/core/sme/src/csr/csr_inside_api.h
index 3065293..d874b51 100644
--- a/drivers/staging/qcacld-3.0/core/sme/src/csr/csr_inside_api.h
+++ b/drivers/staging/qcacld-3.0/core/sme/src/csr/csr_inside_api.h
@@ -94,13 +94,13 @@
#define CSR_ROAMING_DFS_CHANNEL_ENABLED_NORMAL (1)
#define CSR_ROAMING_DFS_CHANNEL_ENABLED_ACTIVE (2)
-#define CSR_ACTIVE_SCAN_LIST_CMD_TIMEOUT (1000*30)
+#define CSR_ACTIVE_SCAN_LIST_CMD_TIMEOUT (1000 * 30)
-/* ***************************************************************************
+/*
* The MAX BSSID Count should be lower than the command timeout value and it
- * can be of a fraction of 3/4 of the total command timeout value.
- * ***************************************************************************/
-#define CSR_MAX_BSSID_COUNT ((SME_ACTIVE_LIST_CMD_TIMEOUT_VALUE/4000) * 3)
+ * can be of a fraction of 1/3 to 1/2 of the total command timeout value.
+ */
+#define CSR_MAX_BSSID_COUNT (SME_ACTIVE_LIST_CMD_TIMEOUT_VALUE / 3000)
#define CSR_CUSTOM_CONC_GO_BI 100
extern uint8_t csr_wpa_oui[][CSR_WPA_OUI_SIZE];
bool csr_is_supported_channel(tpAniSirGlobal pMac, uint8_t channelId);
@@ -109,39 +109,60 @@
#define BEST_CANDIDATE_RSSI_WEIGHT 50
#define MIN_RSSI (-100)
#define MAX_RSSI 0
-#define BEST_CANDIDATE_AP_COUNT_WEIGHT 50
-#define BEST_CANDIDATE_MAX_COUNT 30
-#define BEST_CANDIDATE_MIN_COUNT 0
#define ROAM_MAX_CHANNEL_WEIGHT 100
-#define DEFAULT_CHANNEL_UTILIZATION 50
#define MAX_CHANNEL_UTILIZATION 100
-
-#define RSSI_WEIGHTAGE 25
-#define HT_CAPABILITY_WEIGHTAGE 7
-#define VHT_CAP_WEIGHTAGE 5
-#define BEAMFORMING_CAP_WEIGHTAGE 2
-#define CHAN_WIDTH_WEIGHTAGE 10
-#define CHAN_BAND_WEIGHTAGE 5
-#define WMM_WEIGHTAGE 0
-#define CCA_WEIGHTAGE 8
-#define OTHER_AP_WEIGHT 28
-#define PCL_WEIGHT 10
-
+#define NSS_1X1_WEIGHTAGE 3
+#define MAX_ESTIMATED_AIR_TIME_FRACTION 255
#define MAX_AP_LOAD 255
-#define BEST_CANDIDATE_EXCELLENT_RSSI -40
-#define BEST_CANDIDATE_GOOD_RSSI -55
-#define BEST_CANDIDATE_POOR_RSSI -65
+
+#define LOW_CHANNEL_CONGESTION_WEIGHT 500
+#define MODERATE_CHANNEL_CONGESTION_WEIGHT 370
+#define CONSIDERABLE_CHANNEL_CONGESTION_WEIGHT 250
+#define HIGH_CHANNEL_CONGESTION_WEIGHT 120
+
+#define LOW_CHANNEL_CONGESTION 0
+#define MODERATE_CHANNEL_CONGESTION 25
+#define CONSIDERABLE_CHANNEL_CONGESTION 50
+#define HIGH_CHANNEL_CONGESTION 75
+#define EXTREME_CHANNEL_CONGESTION 100
+
+#define EXCELLENT_RSSI -55
#define BAD_RSSI -80
-#define BEST_CANDIDATE_EXCELLENT_RSSI_WEIGHT 100
-#define BEST_CANDIDATE_GOOD_RSSI_WEIGHT 80
-#define BEST_CANDIDATE_BAD_RSSI_WEIGHT 60
-#define BEST_CANDIDATE_MAX_WEIGHT 100
+#define EXCELLENT_RSSI_WEIGHT 100
+#define RSSI_BUCKET 5
+#define RSSI_WEIGHT_BUCKET 250
+
#define BEST_CANDIDATE_80MHZ 100
#define BEST_CANDIDATE_40MHZ 70
#define BEST_CANDIDATE_20MHZ 30
-#define BEST_CANDIDATE_PENALTY (3/10)
#define BEST_CANDIDATE_MAX_BSS_SCORE 10000
+#define WLAN_20MHZ_BW_INDEX 0
+#define WLAN_SCORE_40MHZ_BW_INDEX 1
+#define WLAN_SCORE_80MHZ_BW_INDEX 2
+#define WLAN_SCORE_160MHZ_BW_INDEX 3
+#define WLAN_SCORE_MAX_BW_INDEX 4
+
+#define WLAN_NSS_1x1_INDEX 0
+#define WLAN_NSS_2x2_INDEX 1
+#define WLAN_NSS_3x3_INDEX 2
+#define WLAN_NSS_4x4_INDEX 3
+#define WLAN_MAX_NSS_INDEX 4
+
+#define WLAN_BAND_2G_INDEX 0
+#define WLAN_BAND_5G_INDEX 1
+/* 2 and 3 are reserved */
+#define WLAN_MAX_BAND_INDEX 4
+
+#define WLAN_ESP_QBSS_INDEX_0 0
+#define WLAN_ESP_QBSS_INDEX_3 3
+#define WLAN_ESP_QBSS_INDEX_7 7
+#define WLAN_ESP_QBSS_OFFSET_INDEX_7_4 4
+#define WLAN_ESP_QBSS_INDEX_11 11
+#define WLAN_ESP_QBSS_OFFSET_INDEX_11_8 8
+#define WLAN_ESP_QBSS_MAX_INDEX 15
+#define WLAN_ESP_QBSS_OFFSET_INDEX_15_12 12
+
enum csr_scancomplete_nextcommand {
eCsrNextScanNothing,
diff --git a/drivers/staging/qcacld-3.0/core/sme/src/csr/csr_neighbor_roam.c b/drivers/staging/qcacld-3.0/core/sme/src/csr/csr_neighbor_roam.c
index 5bd360d..cdcdec2 100644
--- a/drivers/staging/qcacld-3.0/core/sme/src/csr/csr_neighbor_roam.c
+++ b/drivers/staging/qcacld-3.0/core/sme/src/csr/csr_neighbor_roam.c
@@ -1249,6 +1249,8 @@
pNeighborRoamInfo->cfgParams.neighborLookupThreshold =
pMac->roam.configParam.neighborRoamConfig.
nNeighborLookupRssiThreshold;
+ pNeighborRoamInfo->cfgParams.rssi_thresh_offset_5g =
+ pMac->roam.configParam.neighborRoamConfig.rssi_thresh_offset_5g;
pNeighborRoamInfo->cfgParams.delay_before_vdev_stop =
pMac->roam.configParam.neighborRoamConfig.
delay_before_vdev_stop;
diff --git a/drivers/staging/qcacld-3.0/core/wma/inc/wma_internal.h b/drivers/staging/qcacld-3.0/core/wma/inc/wma_internal.h
index 61d4528..7d3e410 100644
--- a/drivers/staging/qcacld-3.0/core/wma/inc/wma_internal.h
+++ b/drivers/staging/qcacld-3.0/core/wma/inc/wma_internal.h
@@ -162,8 +162,42 @@
wmi_unified_pdev_set_param(wmi_unified_t wmi_handle, WMI_PDEV_PARAM param_id,
uint32_t param_value);
+/**
+ * wma_send_msg_by_priority() - Send wma message to PE with priority.
+ * @wma_handle: wma handle
+ * @msg_type: message type
+ * @body_ptr: message body ptr
+ * @body_val: message body value
+ * @is_high_priority: if msg is high priority
+ *
+ * Return: none
+ */
+void wma_send_msg_by_priority(tp_wma_handle wma_handle, uint16_t msg_type,
+ void *body_ptr, uint32_t body_val, int is_high_priority);
+
+/**
+ * wma_send_msg() - Send wma message to PE.
+ * @wma_handle: wma handle
+ * @msg_type: message type
+ * @body_ptr: message body ptr
+ * @body_val: message body value
+ *
+ * Return: none
+ */
void wma_send_msg(tp_wma_handle wma_handle, uint16_t msg_type,
void *body_ptr, uint32_t body_val);
+/**
+ * wma_send_msg_high_priority() - Send wma message to PE with high priority.
+ * @wma_handle: wma handle
+ * @msg_type: message type
+ * @body_ptr: message body ptr
+ * @body_val: message body value
+ *
+ * Return: none
+ */
+void wma_send_msg_high_priority(tp_wma_handle wma_handle, uint16_t msg_type,
+ void *body_ptr, uint32_t body_val);
+
void wma_data_tx_ack_comp_hdlr(void *wma_context,
qdf_nbuf_t netbuf, int32_t status);
@@ -251,21 +285,12 @@
A_UINT32 e_csr_encryption_type_to_rsn_cipherset(eCsrEncryptionType encr);
-void wma_roam_scan_fill_ap_profile(tp_wma_handle wma_handle,
- tpAniSirGlobal pMac,
- tSirRoamOffloadScanReq *roam_req,
- wmi_ap_profile *ap_profile_p);
-
void wma_roam_scan_fill_scan_params(tp_wma_handle wma_handle,
tpAniSirGlobal pMac,
tSirRoamOffloadScanReq *roam_req,
wmi_start_scan_cmd_fixed_param *
scan_params);
-QDF_STATUS wma_roam_scan_offload_ap_profile(tp_wma_handle wma_handle,
- wmi_ap_profile *ap_profile_p,
- uint32_t vdev_id);
-
QDF_STATUS wma_roam_scan_bmiss_cnt(tp_wma_handle wma_handle,
A_INT32 first_bcnt,
A_UINT32 final_bcnt, uint32_t vdev_id);
diff --git a/drivers/staging/qcacld-3.0/core/wma/inc/wma_types.h b/drivers/staging/qcacld-3.0/core/wma/inc/wma_types.h
index baa2e3f..a177521 100644
--- a/drivers/staging/qcacld-3.0/core/wma/inc/wma_types.h
+++ b/drivers/staging/qcacld-3.0/core/wma/inc/wma_types.h
@@ -508,7 +508,7 @@
#define HAL_USE_BD_RATE2_FOR_MANAGEMENT_FRAME 0x40
#define wma_tx_frame(hHal, pFrmBuf, frmLen, frmType, txDir, tid, pCompFunc, \
- pData, txFlag, sessionid, channel_freq) \
+ pData, txFlag, sessionid, channel_freq, rid) \
(QDF_STATUS)( wma_tx_packet( \
cds_get_context(QDF_MODULE_ID_WMA), \
(pFrmBuf), \
@@ -522,11 +522,12 @@
(txFlag), \
(sessionid), \
(false), \
- (channel_freq)))
+ (channel_freq), \
+ (rid)))
#define wma_tx_frameWithTxComplete(hHal, pFrmBuf, frmLen, frmType, txDir, tid, \
pCompFunc, pData, pCBackFnTxComp, txFlag, sessionid, tdlsflag, \
- channel_freq) \
+ channel_freq, rid) \
(QDF_STATUS)( wma_tx_packet( \
cds_get_context(QDF_MODULE_ID_WMA), \
(pFrmBuf), \
@@ -540,7 +541,8 @@
(txFlag), \
(sessionid), \
(tdlsflag), \
- (channel_freq)))
+ (channel_freq), \
+ (rid)))
#define WMA_SetEnableSSR(enable_ssr) ((void)enable_ssr)
@@ -699,6 +701,22 @@
#endif /* FEATURE_WLAN_TDLS */
+enum rateid {
+ RATEID_1MBPS = 0,
+ RATEID_2MBPS,
+ RATEID_5_5MBPS,
+ RATEID_11MBPS,
+ RATEID_6MBPS,
+ RATEID_9MBPS,
+ RATEID_12MBPS,
+ RATEID_18MBPS,
+ RATEID_24MBPS,
+ RATEID_36MBPS,
+ RATEID_48MBPS = 10,
+ RATEID_54MBPS,
+ RATEID_DEFAULT
+};
+
tSirRetStatus wma_post_ctrl_msg(tpAniSirGlobal pMac, tSirMsgQ *pMsg);
tSirRetStatus u_mac_post_ctrl_msg(void *pSirGlobal, tSirMbMsg *pMb);
@@ -723,7 +741,7 @@
void *pData,
pWMAAckFnTxComp pAckTxComp,
uint8_t txFlag, uint8_t sessionId, bool tdlsflag,
- uint16_t channel_freq);
+ uint16_t channel_freq, enum rateid rid);
QDF_STATUS wma_open(void *p_cds_context,
wma_tgt_cfg_cb pTgtUpdCB,
diff --git a/drivers/staging/qcacld-3.0/core/wma/src/wma_data.c b/drivers/staging/qcacld-3.0/core/wma/src/wma_data.c
index d95c009..0856391 100644
--- a/drivers/staging/qcacld-3.0/core/wma/src/wma_data.c
+++ b/drivers/staging/qcacld-3.0/core/wma/src/wma_data.c
@@ -2520,6 +2520,58 @@
}
/**
+ * rate_pream: Mapping from data rates to preamble.
+ */
+static uint32_t rate_pream[] = {WMI_RATE_PREAMBLE_CCK, WMI_RATE_PREAMBLE_CCK,
+ WMI_RATE_PREAMBLE_CCK, WMI_RATE_PREAMBLE_CCK,
+ WMI_RATE_PREAMBLE_OFDM, WMI_RATE_PREAMBLE_OFDM,
+ WMI_RATE_PREAMBLE_OFDM, WMI_RATE_PREAMBLE_OFDM,
+ WMI_RATE_PREAMBLE_OFDM, WMI_RATE_PREAMBLE_OFDM,
+ WMI_RATE_PREAMBLE_OFDM, WMI_RATE_PREAMBLE_OFDM};
+
+/**
+ * rate_mcs: Mapping from data rates to MCS (+4 for OFDM to keep the sequence).
+ */
+static uint32_t rate_mcs[] = {WMI_MAX_CCK_TX_RATE_1M, WMI_MAX_CCK_TX_RATE_2M,
+ WMI_MAX_CCK_TX_RATE_5_5M, WMI_MAX_CCK_TX_RATE_11M,
+ WMI_MAX_OFDM_TX_RATE_6M + 4,
+ WMI_MAX_OFDM_TX_RATE_9M + 4,
+ WMI_MAX_OFDM_TX_RATE_12M + 4,
+ WMI_MAX_OFDM_TX_RATE_18M + 4,
+ WMI_MAX_OFDM_TX_RATE_24M + 4,
+ WMI_MAX_OFDM_TX_RATE_36M + 4,
+ WMI_MAX_OFDM_TX_RATE_48M + 4,
+ WMI_MAX_OFDM_TX_RATE_54M + 4};
+
+#define WMA_TX_SEND_MGMT_TYPE 0
+#define WMA_TX_SEND_DATA_TYPE 1
+
+/**
+ * wma_update_tx_send_params() - Update tx_send_params TLV info
+ * @tx_param: Pointer to tx_send_params
+ * @rid: rate ID passed by PE
+ *
+ * Return: None
+ */
+static void wma_update_tx_send_params(struct tx_send_params *tx_param,
+ enum rateid rid)
+{
+ uint8_t preamble = 0, nss = 0, rix = 0;
+
+ preamble = rate_pream[rid];
+ rix = rate_mcs[rid];
+
+ tx_param->mcs_mask = (1 << rix);
+ tx_param->nss_mask = (1 << nss);
+ tx_param->preamble_type = (1 << preamble);
+ tx_param->frame_type = WMA_TX_SEND_MGMT_TYPE;
+
+ WMA_LOGD(FL("rate_id: %d, mcs: %0x, nss: %0x, preamble: %0x"),
+ rid, tx_param->mcs_mask, tx_param->nss_mask,
+ tx_param->preamble_type);
+}
+
+/**
* wma_tx_packet() - Sends Tx Frame to TxRx
* @wma_context: wma context
* @tx_frame: frame buffer
@@ -2543,7 +2595,8 @@
eFrameType frmType, eFrameTxDir txDir, uint8_t tid,
pWMATxRxCompFunc tx_frm_download_comp_cb, void *pData,
pWMAAckFnTxComp tx_frm_ota_comp_cb, uint8_t tx_flag,
- uint8_t vdev_id, bool tdlsFlag, uint16_t channel_freq)
+ uint8_t vdev_id, bool tdlsFlag, uint16_t channel_freq,
+ enum rateid rid)
{
tp_wma_handle wma_handle = (tp_wma_handle) (wma_context);
int32_t status;
@@ -2906,6 +2959,17 @@
mgmt_param.pdata = pData;
mgmt_param.chanfreq = chanfreq;
mgmt_param.qdf_ctx = cds_get_context(QDF_MODULE_ID_QDF_DEVICE);
+ /*
+ * Update the tx_params TLV only for rates
+ * other than 1Mbps and 6 Mbps
+ */
+ if (rid < RATEID_DEFAULT &&
+ (rid != RATEID_1MBPS && rid != RATEID_6MBPS)) {
+ WMA_LOGD(FL("using rate id: %d for Tx"), rid);
+ mgmt_param.tx_params_valid = true;
+ wma_update_tx_send_params(&mgmt_param.tx_param, rid);
+ }
+
wmi_desc = wmi_desc_get(wma_handle);
if (!wmi_desc) {
/* Countinous failure can cause flooding of logs */
diff --git a/drivers/staging/qcacld-3.0/core/wma/src/wma_dev_if.c b/drivers/staging/qcacld-3.0/core/wma/src/wma_dev_if.c
index 5ea3fb3..5207948 100644
--- a/drivers/staging/qcacld-3.0/core/wma/src/wma_dev_if.c
+++ b/drivers/staging/qcacld-3.0/core/wma/src/wma_dev_if.c
@@ -786,7 +786,7 @@
WMA_LOGD("%s: Sending add bss rsp to umac(vdev %d status %d)",
__func__, resp_event->vdev_id, add_bss->status);
- wma_send_msg(wma, WMA_ADD_BSS_RSP, (void *)add_bss, 0);
+ wma_send_msg_high_priority(wma, WMA_ADD_BSS_RSP, (void *)add_bss, 0);
}
#ifdef FEATURE_AP_MCC_CH_AVOIDANCE
@@ -1087,7 +1087,8 @@
}
}
- wma_send_msg(wma, WMA_SWITCH_CHANNEL_RSP, (void *)params, 0);
+ wma_send_msg_high_priority(wma,
+ WMA_SWITCH_CHANNEL_RSP, (void *)params, 0);
} else if (req_msg->msg_type == WMA_ADD_BSS_REQ) {
tpAddBssParams bssParams = (tpAddBssParams) req_msg->user_data;
@@ -1711,8 +1712,8 @@
__func__, vdev_id);
} else {
params->status = QDF_STATUS_SUCCESS;
- wma_send_msg(wma, WMA_DELETE_BSS_RSP, (void *)params,
- 0);
+ wma_send_msg_high_priority(wma,
+ WMA_DELETE_BSS_RSP, (void *)params, 0);
}
if (iface->del_staself_req != NULL) {
@@ -2551,7 +2552,8 @@
params->staType, params->smesessionId,
params->assocId, params->bssId, params->staIdx,
params->status);
- wma_send_msg(wma, WMA_ADD_STA_RSP, (void *)params, 0);
+ wma_send_msg_high_priority(wma,
+ WMA_ADD_STA_RSP, (void *)params, 0);
} else if (req_msg->msg_type == WMA_ADD_BSS_REQ) {
tpAddBssParams params = (tpAddBssParams) req_msg->user_data;
@@ -2568,7 +2570,8 @@
params->operMode,
params->updateBss, params->nwType, params->bssId,
params->staContext.staIdx, params->status);
- wma_send_msg(wma, WMA_ADD_BSS_RSP, (void *)params, 0);
+ wma_send_msg_high_priority(wma,
+ WMA_ADD_BSS_RSP, (void *)params, 0);
} else {
WMA_LOGE(FL("Unhandled request message type: %d"),
req_msg->msg_type);
@@ -2686,7 +2689,7 @@
del_sta = req_msg->user_data;
if (del_sta->respReqd) {
WMA_LOGD(FL("Sending peer del rsp to umac"));
- wma_send_msg(wma, WMA_DELETE_STA_RSP,
+ wma_send_msg_high_priority(wma, WMA_DELETE_STA_RSP,
(void *)del_sta, QDF_STATUS_SUCCESS);
} else {
qdf_mem_free(del_sta);
@@ -2774,7 +2777,8 @@
if (wma_crash_on_fw_timeout(wma->fw_timeout_crash) == true)
QDF_BUG(0);
else
- wma_send_msg(wma, WMA_ADD_STA_RSP, (void *)params, 0);
+ wma_send_msg_high_priority(wma,
+ WMA_ADD_STA_RSP, (void *)params, 0);
} else if (tgt_req->msg_type == WMA_ADD_BSS_REQ) {
tpAddBssParams params = (tpAddBssParams) tgt_req->user_data;
@@ -2785,7 +2789,8 @@
if (wma_crash_on_fw_timeout(wma->fw_timeout_crash) == true)
QDF_BUG(0);
else
- wma_send_msg(wma, WMA_ADD_BSS_RSP, (void *)params, 0);
+ wma_send_msg_high_priority(wma,
+ WMA_ADD_BSS_RSP, (void *)params, 0);
} else if ((tgt_req->msg_type == WMA_DELETE_STA_REQ) &&
(tgt_req->type == WMA_DELETE_STA_RSP_START)) {
tpDeleteStaParams params =
@@ -2803,7 +2808,7 @@
* Send response in production builds.
*/
QDF_ASSERT(0);
- wma_send_msg(wma, WMA_DELETE_STA_RSP,
+ wma_send_msg_high_priority(wma, WMA_DELETE_STA_RSP,
(void *)params, 0);
}
} else if ((tgt_req->msg_type == WMA_DELETE_STA_REQ) &&
@@ -2843,7 +2848,8 @@
if (wma_crash_on_fw_timeout(wma->fw_timeout_crash) == true)
QDF_BUG(0);
else
- wma_send_msg(wma, WMA_DELETE_BSS_RSP, params, 0);
+ wma_send_msg_high_priority(wma,
+ WMA_DELETE_BSS_RSP, params, 0);
} else {
WMA_LOGE(FL("Unhandled timeout for msg_type:%d and type:%d"),
tgt_req->msg_type, tgt_req->type);
@@ -2987,7 +2993,7 @@
if (wma_crash_on_fw_timeout(wma->fw_timeout_crash) == true)
QDF_BUG(0);
else
- wma_send_msg(wma, WMA_SWITCH_CHANNEL_RSP,
+ wma_send_msg_high_priority(wma, WMA_SWITCH_CHANNEL_RSP,
(void *)params, 0);
if (wma->interfaces[tgt_req->vdev_id].is_channel_switch) {
wma->interfaces[tgt_req->vdev_id].is_channel_switch =
@@ -3073,7 +3079,7 @@
}
params->status = QDF_STATUS_E_TIMEOUT;
WMA_LOGA("%s: WMA_DELETE_BSS_REQ timedout", __func__);
- wma_send_msg(wma, WMA_DELETE_BSS_RSP,
+ wma_send_msg_high_priority(wma, WMA_DELETE_BSS_RSP,
(void *)params, 0);
if (iface->del_staself_req) {
WMA_LOGA("scheduling defered deletion(vdev id %x)",
@@ -3115,7 +3121,8 @@
if (wma_crash_on_fw_timeout(wma->fw_timeout_crash) == true) {
QDF_BUG(0);
} else {
- wma_send_msg(wma, WMA_ADD_BSS_RSP, (void *)params, 0);
+ wma_send_msg_high_priority(wma,
+ WMA_ADD_BSS_RSP, (void *)params, 0);
QDF_ASSERT(0);
}
goto free_tgt_req;
@@ -3444,7 +3451,7 @@
wma_remove_peer(wma, add_bss->bssId, vdev_id, peer, false);
send_fail_resp:
add_bss->status = QDF_STATUS_E_FAILURE;
- wma_send_msg(wma, WMA_ADD_BSS_RSP, (void *)add_bss, 0);
+ wma_send_msg_high_priority(wma, WMA_ADD_BSS_RSP, (void *)add_bss, 0);
}
#ifdef QCA_IBSS_SUPPORT
@@ -3597,7 +3604,7 @@
wma_remove_peer(wma, add_bss->bssId, vdev_id, peer, false);
send_fail_resp:
add_bss->status = QDF_STATUS_E_FAILURE;
- wma_send_msg(wma, WMA_ADD_BSS_RSP, (void *)add_bss, 0);
+ wma_send_msg_high_priority(wma, WMA_ADD_BSS_RSP, (void *)add_bss, 0);
}
#endif /* QCA_IBSS_SUPPORT */
@@ -3875,7 +3882,7 @@
__func__, add_bss->operMode,
add_bss->updateBss, add_bss->nwType, add_bss->bssId,
add_bss->staContext.staIdx, add_bss->status);
- wma_send_msg(wma, WMA_ADD_BSS_RSP, (void *)add_bss, 0);
+ wma_send_msg_high_priority(wma, WMA_ADD_BSS_RSP, (void *)add_bss, 0);
return;
peer_cleanup:
@@ -3884,7 +3891,8 @@
send_fail_resp:
add_bss->status = QDF_STATUS_E_FAILURE;
if (!wma_is_roam_synch_in_progress(wma, vdev_id))
- wma_send_msg(wma, WMA_ADD_BSS_RSP, (void *)add_bss, 0);
+ wma_send_msg_high_priority(wma,
+ WMA_ADD_BSS_RSP, (void *)add_bss, 0);
}
/**
@@ -4160,7 +4168,7 @@
add_sta->staType, add_sta->smesessionId,
add_sta->assocId, add_sta->bssId, add_sta->staIdx,
add_sta->status);
- wma_send_msg(wma, WMA_ADD_STA_RSP, (void *)add_sta, 0);
+ wma_send_msg_high_priority(wma, WMA_ADD_STA_RSP, (void *)add_sta, 0);
}
#ifdef FEATURE_WLAN_TDLS
@@ -4320,7 +4328,7 @@
add_sta->staType, add_sta->smesessionId,
add_sta->assocId, add_sta->bssId, add_sta->staIdx,
add_sta->status);
- wma_send_msg(wma, WMA_ADD_STA_RSP, (void *)add_sta, 0);
+ wma_send_msg_high_priority(wma, WMA_ADD_STA_RSP, (void *)add_sta, 0);
}
#endif
@@ -4554,7 +4562,8 @@
params->status);
/* Don't send a response during roam sync operation */
if (!wma_is_roam_synch_in_progress(wma, params->smesessionId))
- wma_send_msg(wma, WMA_ADD_STA_RSP, (void *)params, 0);
+ wma_send_msg_high_priority(wma,
+ WMA_ADD_STA_RSP, (void *)params, 0);
}
/**
@@ -4624,7 +4633,8 @@
if (del_sta->respReqd) {
WMA_LOGD("%s: Sending del rsp to umac (status: %d)",
__func__, del_sta->status);
- wma_send_msg(wma, WMA_DELETE_STA_RSP, (void *)del_sta, 0);
+ wma_send_msg_high_priority(wma,
+ WMA_DELETE_STA_RSP, (void *)del_sta, 0);
}
}
@@ -4713,7 +4723,8 @@
if (del_sta->respReqd) {
WMA_LOGD("%s: Sending del rsp to umac (status: %d)",
__func__, del_sta->status);
- wma_send_msg(wma, WMA_DELETE_STA_RSP, (void *)del_sta, 0);
+ wma_send_msg_high_priority(wma,
+ WMA_DELETE_STA_RSP, (void *)del_sta, 0);
}
}
#endif
@@ -4745,7 +4756,8 @@
if (params->respReqd) {
WMA_LOGD("%s: vdev_id %d status %d", __func__,
params->smesessionId, status);
- wma_send_msg(wma, WMA_DELETE_STA_RSP, (void *)params, 0);
+ wma_send_msg_high_priority(wma,
+ WMA_DELETE_STA_RSP, (void *)params, 0);
}
}
@@ -4970,9 +4982,48 @@
iface->peer_count);
fail_del_bss_ho_fail:
params->status = status;
- wma_send_msg(wma, WMA_DELETE_BSS_HO_FAIL_RSP, (void *)params, 0);
+ wma_send_msg_high_priority(wma,
+ WMA_DELETE_BSS_HO_FAIL_RSP, (void *)params, 0);
}
+#ifdef WLAN_FEATURE_HOST_ROAM
+/**
+ * wma_wait_tx_complete() - Wait till tx packets are drained
+ * @wma: wma handle
+ * @session_id: vdev id
+ *
+ * Return: none
+ */
+static void wma_wait_tx_complete(tp_wma_handle wma,
+ uint32_t session_id)
+{
+ ol_txrx_pdev_handle pdev;
+ uint8_t max_wait_iterations = 0;
+
+ if (!wma->interfaces[session_id].is_vdev_valid) {
+ WMA_LOGE("%s: Vdev is not valid: %d",
+ __func__, session_id);
+ return;
+ }
+
+ pdev = cds_get_context(QDF_MODULE_ID_TXRX);
+ max_wait_iterations =
+ wma->interfaces[session_id].delay_before_vdev_stop /
+ WMA_TX_Q_RECHECK_TIMER_WAIT;
+
+ while (ol_txrx_get_tx_pending(pdev) && max_wait_iterations) {
+ WMA_LOGW(FL("Waiting for outstanding packet to drain."));
+ qdf_wait_single_event(&wma->tx_queue_empty_event,
+ WMA_TX_Q_RECHECK_TIMER_WAIT);
+ max_wait_iterations--;
+ }
+}
+#else
+static void wma_wait_tx_complete(tp_wma_handle wma)
+{
+
+}
+#endif
/**
* wma_delete_bss() - process delete bss request from upper layer
* @wma: wma handle
@@ -4987,7 +5038,6 @@
struct wma_target_req *msg;
QDF_STATUS status = QDF_STATUS_SUCCESS;
uint8_t peer_id;
- uint8_t max_wait_iterations = 0;
ol_txrx_vdev_handle txrx_vdev = NULL;
bool roam_synch_in_progress = false;
struct wma_txrx_node *iface;
@@ -5084,17 +5134,7 @@
WMA_LOGW(FL("Outstanding msdu packets: %d"),
ol_txrx_get_tx_pending(pdev));
- max_wait_iterations =
- wma->interfaces[params->smesessionId].delay_before_vdev_stop /
- WMA_TX_Q_RECHECK_TIMER_WAIT;
-
- while (ol_txrx_get_tx_pending(pdev) && max_wait_iterations) {
- WMA_LOGW(FL("Waiting for outstanding packet to drain."));
- qdf_wait_single_event(&wma->tx_queue_empty_event,
- WMA_TX_Q_RECHECK_TIMER_MAX_WAIT);
- max_wait_iterations--;
- }
-
+ wma_wait_tx_complete(wma, params->smesessionId);
if (ol_txrx_get_tx_pending(pdev)) {
WMA_LOGW(FL("Outstanding msdu packets before VDEV_STOP : %d"),
ol_txrx_get_tx_pending(pdev));
@@ -5124,7 +5164,7 @@
out:
params->status = status;
- wma_send_msg(wma, WMA_DELETE_BSS_RSP, (void *)params, 0);
+ wma_send_msg_high_priority(wma, WMA_DELETE_BSS_RSP, (void *)params, 0);
}
/**
diff --git a/drivers/staging/qcacld-3.0/core/wma/src/wma_features.c b/drivers/staging/qcacld-3.0/core/wma/src/wma_features.c
index 783901b..3bf1577 100644
--- a/drivers/staging/qcacld-3.0/core/wma/src/wma_features.c
+++ b/drivers/staging/qcacld-3.0/core/wma/src/wma_features.c
@@ -1642,6 +1642,12 @@
buf_ptr = (uint8_t *) nan_rsp_event_hdr;
alloc_len = sizeof(tSirNanEvent);
alloc_len += nan_rsp_event_hdr->data_len;
+ if (nan_rsp_event_hdr->data_len > ((WMI_SVC_MSG_MAX_SIZE -
+ sizeof(*nan_rsp_event_hdr)) / sizeof(uint8_t))) {
+ WMA_LOGE("excess data length:%d", nan_rsp_event_hdr->data_len);
+ QDF_ASSERT(0);
+ return -EINVAL;
+ }
nan_rsp_event = (tSirNanEvent *) qdf_mem_malloc(alloc_len);
if (NULL == nan_rsp_event) {
WMA_LOGE("%s: Memory allocation failure", __func__);
@@ -6104,7 +6110,7 @@
wmi_unified_aggr_qos_cmd(wma->wmi_handle,
(struct aggr_add_ts_param *)pAggrQosRspMsg);
/* send reponse to upper layers from here only. */
- wma_send_msg(wma, WMA_AGGR_QOS_RSP, pAggrQosRspMsg, 0);
+ wma_send_msg_high_priority(wma, WMA_AGGR_QOS_RSP, pAggrQosRspMsg, 0);
}
#ifdef FEATURE_WLAN_ESE
@@ -6169,7 +6175,7 @@
#endif /* WLAN_FEATURE_ROAM_OFFLOAD */
}
- wma_send_msg(wma, WMA_ADD_TS_RSP, msg, 0);
+ wma_send_msg_high_priority(wma, WMA_ADD_TS_RSP, msg, 0);
}
/**
@@ -6379,6 +6385,24 @@
}
/**
+ * wma_multiple_add_clear_mcbc_filter() - send multiple mcast filter
+ * command to fw
+ * @wma_handle: wma handle
+ * @vdev_id: vdev id
+ * @mcast_filter_params: mcast fliter params
+ *
+ * Return: 0 for success or error code
+ */
+static QDF_STATUS wma_multiple_add_clear_mcbc_filter(tp_wma_handle wma_handle,
+ uint8_t vdev_id,
+ struct mcast_filter_params *filter_param)
+{
+ return wmi_unified_multiple_add_clear_mcbc_filter_cmd(
+ wma_handle->wmi_handle,
+ vdev_id, filter_param);
+}
+
+/**
* wma_config_enhance_multicast_offload() - config enhance multicast offload
* @wma_handle: wma handle
* @vdev_id: vdev id
@@ -6435,7 +6459,6 @@
tSirRcvFltMcAddrList *mcbc_param)
{
uint8_t vdev_id = 0;
- int i;
if (mcbc_param->ulMulticastAddrCnt <= 0) {
WMA_LOGW("Number of multicast addresses is 0");
@@ -6469,15 +6492,37 @@
__func__);
}
- /* set mcbc_param->action to clear MCList and reset
- * to configure the MCList in FW
- */
+ if (WMI_SERVICE_IS_ENABLED(wma_handle->wmi_service_bitmap,
+ WMI_SERVICE_MULTIPLE_MCAST_FILTER_SET)) {
+ struct mcast_filter_params filter_params;
- for (i = 0; i < mcbc_param->ulMulticastAddrCnt; i++) {
- wma_add_clear_mcbc_filter(wma_handle, vdev_id,
- mcbc_param->multicastAddr[i],
- (mcbc_param->action == 0));
+ WMA_LOGD("%s: FW supports multiple mcast filter", __func__);
+
+ filter_params.multicast_addr_cnt =
+ mcbc_param->ulMulticastAddrCnt;
+ qdf_mem_copy(filter_params.multicast_addr,
+ mcbc_param->multicastAddr,
+ mcbc_param->ulMulticastAddrCnt * ATH_MAC_LEN);
+ filter_params.action = mcbc_param->action;
+
+ wma_multiple_add_clear_mcbc_filter(wma_handle, vdev_id,
+ &filter_params);
+ } else {
+ int i;
+
+ WMA_LOGD("%s: FW does not support multiple mcast filter",
+ __func__);
+ /*
+ * set mcbc_param->action to clear MCList and reset
+ * to configure the MCList in FW
+ */
+
+ for (i = 0; i < mcbc_param->ulMulticastAddrCnt; i++)
+ wma_add_clear_mcbc_filter(wma_handle, vdev_id,
+ mcbc_param->multicastAddr[i],
+ (mcbc_param->action == 0));
}
+
return QDF_STATUS_SUCCESS;
}
diff --git a/drivers/staging/qcacld-3.0/core/wma/src/wma_main.c b/drivers/staging/qcacld-3.0/core/wma/src/wma_main.c
index b3569f2..fb5a2af 100644
--- a/drivers/staging/qcacld-3.0/core/wma/src/wma_main.c
+++ b/drivers/staging/qcacld-3.0/core/wma/src/wma_main.c
@@ -2819,33 +2819,41 @@
return qdf_status;
}
-/**
- * wma_send_msg() - Send wma message to PE.
- * @wma_handle: wma handle
- * @msg_type: message type
- * @body_ptr: message body ptr
- * @body_val: message body value
- *
- * Return: none
- */
-void wma_send_msg(tp_wma_handle wma_handle, uint16_t msg_type,
- void *body_ptr, uint32_t body_val)
+void wma_send_msg_by_priority(tp_wma_handle wma_handle, uint16_t msg_type,
+ void *body_ptr, uint32_t body_val, int is_high_priority)
{
- tSirMsgQ msg = { 0 };
- uint32_t status = QDF_STATUS_SUCCESS;
- tpAniSirGlobal pMac = cds_get_context(QDF_MODULE_ID_PE);
+ cds_msg_t msg = {0};
+ QDF_STATUS status;
msg.type = msg_type;
msg.bodyval = body_val;
msg.bodyptr = body_ptr;
- status = lim_post_msg_api(pMac, &msg);
- if (QDF_STATUS_SUCCESS != status) {
- if (NULL != body_ptr)
- qdf_mem_free(body_ptr);
- QDF_ASSERT(0);
+
+ status = cds_mq_post_message_by_priority(QDF_MODULE_ID_PE, &msg,
+ is_high_priority);
+ if (!QDF_IS_STATUS_SUCCESS(status)) {
+ WMA_LOGE("Failed to post msg to PE");
+ qdf_mem_free(body_ptr);
}
}
+
+void wma_send_msg(tp_wma_handle wma_handle, uint16_t msg_type,
+ void *body_ptr, uint32_t body_val)
+{
+ wma_send_msg_by_priority(wma_handle, msg_type,
+ body_ptr, body_val, LOW_PRIORITY);
+}
+
+void wma_send_msg_high_priority(tp_wma_handle wma_handle, uint16_t msg_type,
+ void *body_ptr, uint32_t body_val)
+{
+ wma_send_msg_by_priority(wma_handle, msg_type,
+ body_ptr, body_val, HIGH_PRIORITY);
+}
+
+
+
/**
* wma_set_base_macaddr_indicate() - set base mac address in fw
* @wma_handle: wma handle
diff --git a/drivers/staging/qcacld-3.0/core/wma/src/wma_mgmt.c b/drivers/staging/qcacld-3.0/core/wma/src/wma_mgmt.c
index 3aca9bd..356e604 100644
--- a/drivers/staging/qcacld-3.0/core/wma/src/wma_mgmt.c
+++ b/drivers/staging/qcacld-3.0/core/wma/src/wma_mgmt.c
@@ -1825,7 +1825,8 @@
key_info->status = QDF_STATUS_SUCCESS;
out:
- wma_send_msg(wma_handle, WMA_SET_BSSKEY_RSP, (void *)key_info, 0);
+ wma_send_msg_high_priority(wma_handle,
+ WMA_SET_BSSKEY_RSP, (void *)key_info, 0);
}
#ifdef QCA_IBSS_SUPPORT
@@ -2143,8 +2144,8 @@
key_info->status = QDF_STATUS_SUCCESS;
out:
if (key_info->sendRsp)
- wma_send_msg(wma_handle, WMA_SET_STAKEY_RSP, (void *)key_info,
- 0);
+ wma_send_msg_high_priority(wma_handle, WMA_SET_STAKEY_RSP,
+ (void *)key_info, 0);
}
/**
diff --git a/drivers/staging/qcacld-3.0/core/wma/src/wma_nan_datapath.c b/drivers/staging/qcacld-3.0/core/wma/src/wma_nan_datapath.c
index 5ff9bd7..eebeb25 100644
--- a/drivers/staging/qcacld-3.0/core/wma/src/wma_nan_datapath.c
+++ b/drivers/staging/qcacld-3.0/core/wma/src/wma_nan_datapath.c
@@ -1105,7 +1105,7 @@
send_fail_resp:
add_bss->status = QDF_STATUS_E_FAILURE;
- wma_send_msg(wma, WMA_ADD_BSS_RSP, (void *)add_bss, 0);
+ wma_send_msg_high_priority(wma, WMA_ADD_BSS_RSP, (void *)add_bss, 0);
}
/**
@@ -1275,7 +1275,7 @@
send_rsp:
WMA_LOGD(FL("Sending add sta rsp to umac (mac:%pM, status:%d)"),
add_sta->staMac, add_sta->status);
- wma_send_msg(wma, WMA_ADD_STA_RSP, (void *)add_sta, 0);
+ wma_send_msg_high_priority(wma, WMA_ADD_STA_RSP, (void *)add_sta, 0);
}
/**
@@ -1317,7 +1317,7 @@
if (del_sta->respReqd) {
WMA_LOGD(FL("Sending del rsp to umac (status: %d)"),
del_sta->status);
- wma_send_msg(wma, WMA_DELETE_STA_RSP, del_sta, 0);
+ wma_send_msg_high_priority(wma, WMA_DELETE_STA_RSP, del_sta, 0);
}
}
diff --git a/drivers/staging/qcacld-3.0/core/wma/src/wma_ocb.c b/drivers/staging/qcacld-3.0/core/wma/src/wma_ocb.c
index 3dd8eb7..16e9e3e 100644
--- a/drivers/staging/qcacld-3.0/core/wma/src/wma_ocb.c
+++ b/drivers/staging/qcacld-3.0/core/wma/src/wma_ocb.c
@@ -513,6 +513,13 @@
fix_param = param_tlvs->fixed_param;
/* Allocate and populate the response */
+ if (fix_param->num_channels > ((WMI_SVC_MSG_MAX_SIZE -
+ sizeof(*fix_param)) / sizeof(wmi_dcc_ndl_stats_per_channel))) {
+ WMA_LOGE("%s: too many channels:%d", __func__,
+ fix_param->num_channels);
+ QDF_ASSERT(0);
+ return -EINVAL;
+ }
response = qdf_mem_malloc(sizeof(*response) + fix_param->num_channels *
sizeof(wmi_dcc_ndl_stats_per_channel));
if (response == NULL)
@@ -653,6 +660,13 @@
param_tlvs = (WMI_DCC_STATS_EVENTID_param_tlvs *)event_buf;
fix_param = param_tlvs->fixed_param;
/* Allocate and populate the response */
+ if (fix_param->num_channels > ((WMI_SVC_MSG_MAX_SIZE -
+ sizeof(*fix_param)) / sizeof(wmi_dcc_ndl_stats_per_channel))) {
+ WMA_LOGE("%s: too many channels:%d", __func__,
+ fix_param->num_channels);
+ QDF_ASSERT(0);
+ return -EINVAL;
+ }
response = qdf_mem_malloc(sizeof(*response) +
fix_param->num_channels * sizeof(wmi_dcc_ndl_stats_per_channel));
if (response == NULL)
diff --git a/drivers/staging/qcacld-3.0/core/wma/src/wma_scan_roam.c b/drivers/staging/qcacld-3.0/core/wma/src/wma_scan_roam.c
index 3109b98..0316c94 100644
--- a/drivers/staging/qcacld-3.0/core/wma/src/wma_scan_roam.c
+++ b/drivers/staging/qcacld-3.0/core/wma/src/wma_scan_roam.c
@@ -97,6 +97,12 @@
};
#define WMA_EXTSCAN_CYCLE_WAKE_LOCK_DURATION (5 * 1000) /* in msec */
+
+/*
+ * Maximum number of entires that could be present in the
+ * WMI_EXTSCAN_HOTLIST_MATCH_EVENT buffer from the firmware
+ */
+#define WMA_EXTSCAN_MAX_HOTLIST_ENTRIES 10
#endif
/**
@@ -707,9 +713,12 @@
chan_list->chanParam[i].dfsSet,
chan_list->chanParam[i].pwr);
- if (chan_list->chanParam[i].dfsSet)
+ if (chan_list->chanParam[i].dfsSet) {
WMI_SET_CHANNEL_FLAG(tchan_info,
WMI_CHAN_FLAG_PASSIVE);
+ WMI_SET_CHANNEL_FLAG(tchan_info,
+ WMI_CHAN_FLAG_DFS);
+ }
if (tchan_info->mhz < WMA_2_4_GHZ_MAX_FREQ) {
WMI_SET_CHANNEL_MODE(tchan_info, MODE_11G);
@@ -885,6 +894,10 @@
params.bg_scan_bad_rssi_thresh = roam_params->bg_scan_bad_rssi_thresh -
WMA_NOISE_FLOOR_DBM_DEFAULT;
params.bg_scan_client_bitmap = roam_params->bg_scan_client_bitmap;
+ params.roam_bad_rssi_thresh_offset_2g =
+ roam_params->roam_bad_rssi_thresh_offset_2g;
+ if (params.roam_bad_rssi_thresh_offset_2g)
+ params.flags |= WMI_ROAM_BG_SCAN_FLAGS_2G_TO_5G_ONLY;
/*
* The current Noise floor in firmware is -96dBm. Penalty/Boost
@@ -931,11 +944,14 @@
params.roam_earlystop_thres_min = 0;
params.roam_earlystop_thres_max = 0;
}
+ params.rssi_thresh_offset_5g =
+ roam_req->rssi_thresh_offset_5g;
WMA_LOGD("early_stop_thresholds en=%d, min=%d, max=%d",
roam_req->early_stop_scan_enable,
params.roam_earlystop_thres_min,
params.roam_earlystop_thres_max);
+ WMA_LOGD("rssi_thresh_offset_5g = %d", params.rssi_thresh_offset_5g);
status = wmi_unified_roam_scan_offload_rssi_thresh_cmd(
wma_handle->wmi_handle, ¶ms);
@@ -955,9 +971,10 @@
roam_params->dense_min_aps_cnt,
roam_params->traffic_threshold,
roam_params->initial_dense_status);
- WMA_LOGD(FL("BG Scan Bad RSSI:%d, bitmap:0x%x"),
+ WMA_LOGD(FL("BG Scan Bad RSSI:%d, bitmap:0x%x Offset for 2G to 5G Roam:%d"),
roam_params->bg_scan_bad_rssi_thresh,
- roam_params->bg_scan_client_bitmap);
+ roam_params->bg_scan_client_bitmap,
+ roam_params->roam_bad_rssi_thresh_offset_2g);
return status;
}
@@ -1182,60 +1199,56 @@
#endif
/**
* wma_roam_scan_fill_ap_profile() - fill ap_profile
- * @wma_handle: wma handle
- * @pMac: Mac ptr
* @roam_req: roam offload scan request
- * @ap_profile_p: ap profile
+ * @profile: ap profile
*
* Fill ap_profile structure from configured parameters
*
* Return: none
*/
-void wma_roam_scan_fill_ap_profile(tp_wma_handle wma_handle,
- tpAniSirGlobal pMac,
- tSirRoamOffloadScanReq *roam_req,
- wmi_ap_profile *ap_profile_p)
+static void wma_roam_scan_fill_ap_profile(tSirRoamOffloadScanReq *roam_req,
+ struct ap_profile *profile)
{
uint32_t rsn_authmode;
- qdf_mem_zero(ap_profile_p, sizeof(wmi_ap_profile));
+ qdf_mem_zero(profile, sizeof(*profile));
if (roam_req == NULL) {
- ap_profile_p->ssid.ssid_len = 0;
- ap_profile_p->ssid.ssid[0] = 0;
- ap_profile_p->rsn_authmode = WMI_AUTH_NONE;
- ap_profile_p->rsn_ucastcipherset = WMI_CIPHER_NONE;
- ap_profile_p->rsn_mcastcipherset = WMI_CIPHER_NONE;
- ap_profile_p->rsn_mcastmgmtcipherset = WMI_CIPHER_NONE;
- ap_profile_p->rssi_threshold = WMA_ROAM_RSSI_DIFF_DEFAULT;
+ profile->ssid.length = 0;
+ profile->ssid.mac_ssid[0] = 0;
+ profile->rsn_authmode = WMI_AUTH_NONE;
+ profile->rsn_ucastcipherset = WMI_CIPHER_NONE;
+ profile->rsn_mcastcipherset = WMI_CIPHER_NONE;
+ profile->rsn_mcastmgmtcipherset = WMI_CIPHER_NONE;
+ profile->rssi_threshold = WMA_ROAM_RSSI_DIFF_DEFAULT;
} else {
- ap_profile_p->ssid.ssid_len =
+ profile->ssid.length =
roam_req->ConnectedNetwork.ssId.length;
- qdf_mem_copy(ap_profile_p->ssid.ssid,
+ qdf_mem_copy(profile->ssid.mac_ssid,
roam_req->ConnectedNetwork.ssId.ssId,
- ap_profile_p->ssid.ssid_len);
- ap_profile_p->rsn_authmode =
+ profile->ssid.length);
+ profile->rsn_authmode =
e_csr_auth_type_to_rsn_authmode(
roam_req->ConnectedNetwork.authentication,
roam_req->ConnectedNetwork.encryption);
- rsn_authmode = ap_profile_p->rsn_authmode;
+ rsn_authmode = profile->rsn_authmode;
if ((rsn_authmode == WMI_AUTH_CCKM_WPA) ||
(rsn_authmode == WMI_AUTH_CCKM_RSNA))
- ap_profile_p->rsn_authmode =
+ profile->rsn_authmode =
wma_roam_scan_get_cckm_mode(
roam_req, rsn_authmode);
- ap_profile_p->rsn_ucastcipherset =
+ profile->rsn_ucastcipherset =
e_csr_encryption_type_to_rsn_cipherset(
roam_req->ConnectedNetwork.encryption);
- ap_profile_p->rsn_mcastcipherset =
+ profile->rsn_mcastcipherset =
e_csr_encryption_type_to_rsn_cipherset(
roam_req->ConnectedNetwork.mcencryption);
- ap_profile_p->rsn_mcastmgmtcipherset =
- ap_profile_p->rsn_mcastcipherset;
- ap_profile_p->rssi_threshold = roam_req->RoamRssiDiff;
+ profile->rsn_mcastmgmtcipherset =
+ profile->rsn_mcastcipherset;
+ profile->rssi_threshold = roam_req->RoamRssiDiff;
#ifdef WLAN_FEATURE_11W
if (roam_req->ConnectedNetwork.mfp_enabled)
- ap_profile_p->flags |= WMI_AP_PROFILE_FLAG_PMF;
+ profile->flags |= WMI_AP_PROFILE_FLAG_PMF;
#endif
}
}
@@ -1553,19 +1566,23 @@
/**
* wma_roam_scan_offload_ap_profile() - set roam ap profile in fw
* @wma_handle: wma handle
- * @ap_profile_p: ap profile
- * @vdev_id: vdev id
+ * @mac_ctx: Mac ptr
+ * @roam_req: Request which contains the ap profile
*
* Send WMI_ROAM_AP_PROFILE to firmware
*
* Return: QDF status
*/
-QDF_STATUS wma_roam_scan_offload_ap_profile(tp_wma_handle wma_handle,
- wmi_ap_profile *ap_profile_p,
- uint32_t vdev_id)
+static QDF_STATUS wma_roam_scan_offload_ap_profile(tp_wma_handle wma_handle,
+ tSirRoamOffloadScanReq *roam_req)
{
+ struct ap_profile_params ap_profile;
+
+ ap_profile.vdev_id = roam_req->sessionId;
+ wma_roam_scan_fill_ap_profile(roam_req, &ap_profile.profile);
+ ap_profile.param = roam_req->score_params;
return wmi_unified_send_roam_scan_offload_ap_cmd(wma_handle->wmi_handle,
- ap_profile_p, vdev_id);
+ &ap_profile);
}
/**
@@ -1585,7 +1602,7 @@
{
int i;
QDF_STATUS status = QDF_STATUS_SUCCESS;
- uint32_t len = 0, num_bssid_black_list = 0, num_ssid_white_list = 0,
+ uint32_t num_bssid_black_list = 0, num_ssid_white_list = 0,
num_bssid_preferred_list = 0;
uint32_t op_bitmap = 0;
struct roam_ext_params *roam_params;
@@ -1606,30 +1623,20 @@
op_bitmap |= 0x1;
num_bssid_black_list =
roam_params->num_bssid_avoid_list;
- len = num_bssid_black_list * sizeof(wmi_mac_addr);
- len += WMI_TLV_HDR_SIZE;
break;
case REASON_ROAM_SET_SSID_ALLOWED:
op_bitmap |= 0x2;
num_ssid_white_list =
roam_params->num_ssid_allowed_list;
- len = num_ssid_white_list * sizeof(wmi_ssid);
- len += WMI_TLV_HDR_SIZE;
break;
case REASON_ROAM_SET_FAVORED_BSSID:
op_bitmap |= 0x4;
num_bssid_preferred_list =
roam_params->num_bssid_favored;
- len = num_bssid_preferred_list * sizeof(wmi_mac_addr);
- len += WMI_TLV_HDR_SIZE;
- len += num_bssid_preferred_list * sizeof(A_UINT32);
break;
case REASON_CTX_INIT:
if (roam_req->Command == ROAM_SCAN_OFFLOAD_START) {
params->lca_disallow_config_present = true;
- len += sizeof(
- wmi_roam_lca_disallow_config_tlv_param)
- + (4 * WMI_TLV_HDR_SIZE);
op_bitmap |= ROAM_FILTER_OP_BITMAP_LCA_DISALLOW;
} else {
WMA_LOGD("%s : Roam Filter need not be sent", __func__);
@@ -1649,11 +1656,7 @@
*/
op_bitmap = 0x2 | 0x4;
num_ssid_white_list = roam_params->num_ssid_allowed_list;
- len = num_ssid_white_list * sizeof(wmi_ssid);
num_bssid_preferred_list = roam_params->num_bssid_favored;
- len += num_bssid_preferred_list * sizeof(wmi_mac_addr);
- len += num_bssid_preferred_list * sizeof(A_UINT32);
- len += (2 * WMI_TLV_HDR_SIZE);
}
/* fill in fixed values */
@@ -1662,7 +1665,6 @@
params->num_bssid_black_list = num_bssid_black_list;
params->num_ssid_white_list = num_ssid_white_list;
params->num_bssid_preferred_list = num_bssid_preferred_list;
- params->len = len;
qdf_mem_copy(params->bssid_avoid_list, roam_params->bssid_avoid_list,
MAX_BSSID_AVOID_LIST * sizeof(struct qdf_mac_addr));
@@ -1770,7 +1772,6 @@
{
QDF_STATUS qdf_status = QDF_STATUS_SUCCESS;
wmi_start_scan_cmd_fixed_param scan_params;
- wmi_ap_profile ap_profile;
tpAniSirGlobal pMac = cds_get_context(QDF_MODULE_ID_PE);
uint32_t mode = 0;
struct wma_txrx_node *intr = NULL;
@@ -1855,11 +1856,8 @@
if (qdf_status != QDF_STATUS_SUCCESS)
break;
- wma_roam_scan_fill_ap_profile(wma_handle, pMac, roam_req,
- &ap_profile);
-
qdf_status = wma_roam_scan_offload_ap_profile(wma_handle,
- &ap_profile, roam_req->sessionId);
+ roam_req);
if (qdf_status != QDF_STATUS_SUCCESS)
break;
@@ -2085,10 +2083,8 @@
if (qdf_status != QDF_STATUS_SUCCESS)
break;
- wma_roam_scan_fill_ap_profile(wma_handle, pMac, roam_req,
- &ap_profile);
qdf_status = wma_roam_scan_offload_ap_profile(wma_handle,
- &ap_profile, roam_req->sessionId);
+ roam_req);
if (qdf_status != QDF_STATUS_SUCCESS)
break;
@@ -2448,6 +2444,23 @@
}
WMA_LOGI("LFR3: Received WMA_ROAM_OFFLOAD_SYNCH_IND");
+ /*
+ * All below length fields are unsigned and hence positive numbers.
+ * Maximum number during the addition would be (3 * MAX_LIMIT(UINT32) +
+ * few fixed fields).
+ */
+ if (sizeof(*synch_event) + synch_event->bcn_probe_rsp_len +
+ synch_event->reassoc_rsp_len +
+ synch_event->reassoc_req_len +
+ sizeof(wmi_channel) + sizeof(wmi_key_material) +
+ sizeof(uint32_t) > WMI_SVC_MSG_MAX_SIZE) {
+ WMA_LOGE("excess synch payload: LEN bcn:%d, req:%d, rsp:%d",
+ synch_event->bcn_probe_rsp_len,
+ synch_event->reassoc_req_len,
+ synch_event->reassoc_rsp_len);
+ goto cleanup_label;
+ }
+
cds_host_diag_log_work(&wma->roam_ho_wl,
WMA_ROAM_HO_WAKE_LOCK_DURATION,
WIFI_POWER_EVENT_WAKELOCK_WOW);
@@ -3161,7 +3174,8 @@
params->status = status;
WMA_LOGD("%s: sending WMA_SWITCH_CHANNEL_RSP, status = 0x%x",
__func__, status);
- wma_send_msg(wma, WMA_SWITCH_CHANNEL_RSP, (void *)params, 0);
+ wma_send_msg_high_priority(wma, WMA_SWITCH_CHANNEL_RSP,
+ (void *)params, 0);
}
#ifdef FEATURE_WLAN_SCAN_PNO
@@ -4190,7 +4204,8 @@
struct extscan_hotlist_match *dest_hotlist;
tSirWifiScanResult *dest_ap;
wmi_extscan_wlan_descriptor *src_hotlist;
- int numap, j, ap_found = 0;
+ uint32_t numap;
+ int j, ap_found = 0;
tpAniSirGlobal pMac = cds_get_context(QDF_MODULE_ID_PE);
if (!pMac) {
@@ -4215,6 +4230,11 @@
WMA_LOGE("%s: Hotlist AP's list invalid", __func__);
return -EINVAL;
}
+ if (numap > WMA_EXTSCAN_MAX_HOTLIST_ENTRIES) {
+ WMA_LOGE("%s: Total Entries %u greater than max",
+ __func__, numap);
+ numap = WMA_EXTSCAN_MAX_HOTLIST_ENTRIES;
+ }
dest_hotlist = qdf_mem_malloc(sizeof(*dest_hotlist) +
sizeof(*dest_ap) * numap);
if (!dest_hotlist) {
@@ -4461,6 +4481,8 @@
wmi_extscan_rssi_info *src_rssi;
int numap, i, moredata, scan_ids_cnt, buf_len;
tpAniSirGlobal pMac = cds_get_context(QDF_MODULE_ID_PE);
+ uint32_t total_len;
+ bool excess_data = false;
if (!pMac) {
WMA_LOGE("%s: Invalid pMac", __func__);
@@ -4506,6 +4528,44 @@
WMA_LOGD("%s: scan_ids_cnt %d", __func__, scan_ids_cnt);
dest_cachelist->num_scan_ids = scan_ids_cnt;
+ if (event->num_entries_in_page >
+ (WMI_SVC_MSG_MAX_SIZE - sizeof(*event))/sizeof(*src_hotlist)) {
+ WMA_LOGE("%s:excess num_entries_in_page %d in WMI event",
+ __func__, event->num_entries_in_page);
+ qdf_mem_free(dest_cachelist);
+ QDF_ASSERT(0);
+ return -EINVAL;
+ } else {
+ total_len = sizeof(*event) +
+ (event->num_entries_in_page * sizeof(*src_hotlist));
+ }
+ for (i = 0; i < event->num_entries_in_page; i++) {
+ if (src_hotlist[i].ie_length > WMI_SVC_MSG_MAX_SIZE -
+ total_len) {
+ excess_data = true;
+ break;
+ } else {
+ total_len += src_hotlist[i].ie_length;
+ WMA_LOGD("total len IE: %d", total_len);
+ }
+
+ if (src_hotlist[i].number_rssi_samples >
+ (WMI_SVC_MSG_MAX_SIZE - total_len)/sizeof(*src_rssi)) {
+ excess_data = true;
+ break;
+ } else {
+ total_len += (src_hotlist[i].number_rssi_samples *
+ sizeof(*src_rssi));
+ WMA_LOGD("total len RSSI samples: %d", total_len);
+ }
+ }
+ if (excess_data) {
+ WMA_LOGE("%s:excess data in WMI event",
+ __func__);
+ qdf_mem_free(dest_cachelist);
+ QDF_ASSERT(0);
+ return -EINVAL;
+ }
buf_len = sizeof(*dest_result) * scan_ids_cnt;
dest_cachelist->result = qdf_mem_malloc(buf_len);
if (!dest_cachelist->result) {
@@ -4569,6 +4629,8 @@
int moredata;
int rssi_num = 0;
tpAniSirGlobal pMac = cds_get_context(QDF_MODULE_ID_PE);
+ uint32_t buf_len;
+ bool excess_data = false;
if (!pMac) {
WMA_LOGE("%s: Invalid pMac", __func__);
@@ -4602,6 +4664,31 @@
} else {
moredata = 0;
}
+
+ do {
+ if (event->num_entries_in_page >
+ (WMI_SVC_MSG_MAX_SIZE - sizeof(*event))/
+ sizeof(*src_chglist)) {
+ excess_data = true;
+ break;
+ } else {
+ buf_len =
+ sizeof(*event) + (event->num_entries_in_page *
+ sizeof(*src_chglist));
+ }
+ if (rssi_num >
+ (WMI_SVC_MSG_MAX_SIZE - buf_len)/sizeof(int32_t)) {
+ excess_data = true;
+ break;
+ }
+ } while (0);
+
+ if (excess_data) {
+ WMA_LOGE("buffer len exceeds WMI payload,numap:%d, rssi_num:%d",
+ numap, rssi_num);
+ QDF_ASSERT(0);
+ return -EINVAL;
+ }
dest_chglist = qdf_mem_malloc(sizeof(*dest_chglist) +
sizeof(*dest_ap) * numap +
sizeof(int32_t) * rssi_num);
@@ -4676,6 +4763,18 @@
event = param_buf->fixed_param;
buf_ptr = (uint8_t *)param_buf->fixed_param;
+ /*
+ * All the below lengths are UINT32 and summing up and checking
+ * against a constant should not be an issue.
+ */
+ if ((sizeof(*event) + event->ie_length + event->anqp_length) >
+ WMI_SVC_MSG_MAX_SIZE) {
+ WMA_LOGE("IE Length: %d or ANQP Length: %d is huge",
+ event->ie_length, event->anqp_length);
+ QDF_ASSERT(0);
+ return -EINVAL;
+ }
+
dest_match = qdf_mem_malloc(sizeof(*dest_match) +
event->ie_length + event->anqp_length);
if (!dest_match) {
diff --git a/drivers/staging/qcacld-3.0/core/wma/src/wma_utils.c b/drivers/staging/qcacld-3.0/core/wma/src/wma_utils.c
index a4622fb..9c5827e 100644
--- a/drivers/staging/qcacld-3.0/core/wma/src/wma_utils.c
+++ b/drivers/staging/qcacld-3.0/core/wma/src/wma_utils.c
@@ -455,6 +455,12 @@
alloc_len = sizeof(tSirStatsExtEvent);
alloc_len += stats_ext_info->data_len;
+ if (stats_ext_info->data_len > (WMI_SVC_MSG_MAX_SIZE -
+ sizeof(*stats_ext_info))) {
+ WMA_LOGE("Excess data_len:%d", stats_ext_info->data_len);
+ QDF_ASSERT(0);
+ return -EINVAL;
+ }
stats_ext_event = (tSirStatsExtEvent *) qdf_mem_malloc(alloc_len);
if (NULL == stats_ext_event) {
WMA_LOGE("%s: Memory allocation failure", __func__);
@@ -1169,6 +1175,8 @@
uint32_t next_res_offset, next_peer_offset, next_rate_offset;
size_t peer_info_size, peer_stats_size, rate_stats_size;
size_t link_stats_results_size;
+ bool excess_data = false;
+ uint32_t buf_len;
tpAniSirGlobal pMac = cds_get_context(QDF_MODULE_ID_PE);
@@ -1204,6 +1212,33 @@
return -EINVAL;
}
+ do {
+ if (peer_stats->num_rates >
+ WMI_SVC_MSG_MAX_SIZE/sizeof(wmi_rate_stats)) {
+ excess_data = true;
+ break;
+ } else {
+ buf_len =
+ peer_stats->num_rates * sizeof(wmi_rate_stats);
+ }
+ if (fixed_param->num_peers >
+ WMI_SVC_MSG_MAX_SIZE/sizeof(wmi_peer_link_stats)) {
+ excess_data = true;
+ break;
+ } else {
+ buf_len += fixed_param->num_peers *
+ sizeof(wmi_peer_link_stats);
+ }
+ } while (0);
+
+ if (excess_data ||
+ (sizeof(*fixed_param) > WMI_SVC_MSG_MAX_SIZE - buf_len)) {
+ WMA_LOGE("excess wmi buffer: rates:%d, peers:%d",
+ peer_stats->num_rates, fixed_param->num_peers);
+ QDF_ASSERT(0);
+ return -EINVAL;
+ }
+
/*
* num_rates - sum of the rates of all the peers
*/
@@ -1372,6 +1407,13 @@
fixed_param->power_level_offset,
fixed_param->radio_id);
+ if (fixed_param->num_tx_power_levels > ((WMI_SVC_MSG_MAX_SIZE -
+ sizeof(*fixed_param)) / sizeof(uint32_t))) {
+ WMA_LOGE("%s: excess tx_power buffers:%d", __func__,
+ fixed_param->num_tx_power_levels);
+ QDF_ASSERT(0);
+ return -EINVAL;
+ }
rs_results = (tSirWifiRadioStat *) &link_stats_results->results[0] +
fixed_param->radio_id;
tx_power_level_values = (uint8_t *) param_tlvs->tx_time_per_power_level;
@@ -2665,7 +2707,7 @@
wmi_vdev_rate_ht_info *ht_info;
struct wma_txrx_node *intr = wma->interfaces;
uint8_t link_status = LINK_STATUS_LEGACY;
- int i;
+ uint32_t i;
param_buf =
(WMI_UPDATE_VDEV_RATE_STATS_EVENTID_param_tlvs *) cmd_param_info;
@@ -2679,6 +2721,14 @@
ht_info = (wmi_vdev_rate_ht_info *) param_buf->ht_info;
WMA_LOGD("num_vdev_stats: %d", event->num_vdev_stats);
+
+ if (event->num_vdev_stats > ((WMI_SVC_MSG_MAX_SIZE -
+ sizeof(*event)) / sizeof(*ht_info))) {
+ WMA_LOGE("%s: excess vdev_stats buffers:%d", __func__,
+ event->num_vdev_stats);
+ QDF_ASSERT(0);
+ return -EINVAL;
+ }
for (i = 0; (i < event->num_vdev_stats) && ht_info; i++) {
WMA_LOGD("%s vdevId:%d tx_nss:%d rx_nss:%d tx_preamble:%d rx_preamble:%d",
__func__, ht_info->vdevid, ht_info->tx_nss,
@@ -2869,7 +2919,10 @@
wmi_rssi_stats *rssi_stats;
wmi_per_chain_rssi_stats *rssi_event;
struct wma_txrx_node *node;
- uint8_t i, *temp;
+ uint8_t *temp;
+ uint32_t i;
+ uint32_t buf_len = 0;
+ bool excess_data = false;
wmi_congestion_stats *congestion_stats;
tpAniSirGlobal mac;
@@ -2881,6 +2934,53 @@
event = param_buf->fixed_param;
temp = (uint8_t *) param_buf->data;
+ do {
+ if (event->num_pdev_stats > ((WMI_SVC_MSG_MAX_SIZE -
+ sizeof(*event)) / sizeof(*pdev_stats))) {
+ excess_data = true;
+ break;
+ } else {
+ buf_len += event->num_pdev_stats * sizeof(*pdev_stats);
+ }
+
+ if (event->num_vdev_stats > ((WMI_SVC_MSG_MAX_SIZE -
+ sizeof(*event)) / sizeof(*vdev_stats))) {
+ excess_data = true;
+ break;
+ } else {
+ buf_len += event->num_vdev_stats * sizeof(*vdev_stats);
+ }
+
+ if (event->num_peer_stats > ((WMI_SVC_MSG_MAX_SIZE -
+ sizeof(*event)) / sizeof(*peer_stats))) {
+ excess_data = true;
+ break;
+ } else {
+ buf_len += event->num_peer_stats * sizeof(*peer_stats);
+ }
+
+ rssi_event =
+ (wmi_per_chain_rssi_stats *) param_buf->chain_stats;
+ if (rssi_event && (rssi_event->num_per_chain_rssi_stats >
+ ((WMI_SVC_MSG_MAX_SIZE - sizeof(*event)) /
+ sizeof(*peer_stats)))) {
+ excess_data = true;
+ break;
+ } else {
+ buf_len += rssi_event->num_per_chain_rssi_stats *
+ sizeof(*peer_stats);
+ }
+ } while (0);
+
+ if (excess_data ||
+ (sizeof(*event) > WMI_SVC_MSG_MAX_SIZE - buf_len)) {
+ WMA_LOGE("excess wmi buffer: stats pdev %d vdev %d peer %d",
+ event->num_pdev_stats, event->num_vdev_stats,
+ event->num_peer_stats);
+ QDF_ASSERT(0);
+ return -EINVAL;
+ }
+
if (event->num_pdev_stats > 0) {
for (i = 0; i < event->num_pdev_stats; i++) {
pdev_stats = (wmi_pdev_stats *) temp;
@@ -3402,9 +3502,27 @@
switch (nw_type) {
case eSIR_11B_NW_TYPE:
+#ifdef FEATURE_WLAN_TDLS
+ if (STA_ENTRY_TDLS_PEER == sta_type) {
+ if (is_vht) {
+ if (CH_WIDTH_80MHZ == ch_width)
+ phymode = MODE_11AC_VHT80;
+ else
+ phymode = (CH_WIDTH_40MHZ == ch_width) ?
+ MODE_11AC_VHT40 :
+ MODE_11AC_VHT20;
+ } else if (is_ht) {
+ phymode = (CH_WIDTH_40MHZ == ch_width) ?
+ MODE_11NG_HT40 : MODE_11NG_HT20;
+ } else
+ phymode = MODE_11B;
+ } else
+#endif /* FEATURE_WLAN_TDLS */
+ {
phymode = MODE_11B;
if (is_ht || is_vht)
WMA_LOGE("HT/VHT is enabled with 11B NW type");
+ }
break;
case eSIR_11G_NW_TYPE:
if (!(is_ht || is_vht)) {
diff --git a/drivers/staging/rdma/ehca/ehca_mrmw.c b/drivers/staging/rdma/ehca/ehca_mrmw.c
index f914b30..4d52ca4 100644
--- a/drivers/staging/rdma/ehca/ehca_mrmw.c
+++ b/drivers/staging/rdma/ehca/ehca_mrmw.c
@@ -1921,7 +1921,7 @@
u64 *kpage)
{
int ret = 0;
- u64 pgaddr, prev_pgaddr;
+ u64 pgaddr, prev_pgaddr = 0;
u32 j = 0;
int kpages_per_hwpage = pginfo->hwpage_size / PAGE_SIZE;
int nr_kpages = kpages_per_hwpage;
@@ -2417,6 +2417,7 @@
ehca_err(&shca->ib_device, "kpage alloc failed");
return -ENOMEM;
}
+ hret = H_SUCCESS;
for (top = 0; top < EHCA_MAP_ENTRIES; top++) {
if (!ehca_bmap_valid(ehca_bmap->top[top]))
continue;
diff --git a/drivers/staging/rtl8188eu/core/rtw_ap.c b/drivers/staging/rtl8188eu/core/rtw_ap.c
index 3cdb40f..f5cedbb 100644
--- a/drivers/staging/rtl8188eu/core/rtw_ap.c
+++ b/drivers/staging/rtl8188eu/core/rtw_ap.c
@@ -894,7 +894,7 @@
return _FAIL;
- if (len > MAX_IE_SZ)
+ if (len < 0 || len > MAX_IE_SZ)
return _FAIL;
pbss_network->IELength = len;
diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
index 82a7c27..c2d2c17 100644
--- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c
+++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
@@ -47,7 +47,9 @@
{USB_DEVICE(0x2001, 0x330F)}, /* DLink DWA-125 REV D1 */
{USB_DEVICE(0x2001, 0x3310)}, /* Dlink DWA-123 REV D1 */
{USB_DEVICE(0x2001, 0x3311)}, /* DLink GO-USB-N150 REV B1 */
+ {USB_DEVICE(0x2357, 0x010c)}, /* TP-Link TL-WN722N v2 */
{USB_DEVICE(0x0df6, 0x0076)}, /* Sitecom N150 v2 */
+ {USB_DEVICE(USB_VENDER_ID_REALTEK, 0xffef)}, /* Rosewill RNX-N150NUB */
{} /* Terminating entry */
};
diff --git a/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
index e9c4f97..7a8ceb9 100644
--- a/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
+++ b/drivers/staging/rtl8192e/rtl8192e/r8192E_dev.c
@@ -97,8 +97,9 @@
switch (variable) {
case HW_VAR_BSSID:
- rtl92e_writel(dev, BSSIDR, ((u32 *)(val))[0]);
- rtl92e_writew(dev, BSSIDR+2, ((u16 *)(val+2))[0]);
+ /* BSSIDR 2 byte alignment */
+ rtl92e_writew(dev, BSSIDR, *(u16 *)val);
+ rtl92e_writel(dev, BSSIDR + 2, *(u32 *)(val + 2));
break;
case HW_VAR_MEDIA_STATUS:
@@ -626,7 +627,7 @@
struct r8192_priv *priv = rtllib_priv(dev);
RT_TRACE(COMP_INIT, "===========>%s()\n", __func__);
- curCR = rtl92e_readl(dev, EPROM_CMD);
+ curCR = rtl92e_readw(dev, EPROM_CMD);
RT_TRACE(COMP_INIT, "read from Reg Cmd9346CR(%x):%x\n", EPROM_CMD,
curCR);
priv->epromtype = (curCR & EPROM_CMD_9356SEL) ? EEPROM_93C56 :
@@ -963,8 +964,8 @@
rtl92e_config_rate(dev, &rate_config);
priv->dot11CurrentPreambleMode = PREAMBLE_AUTO;
priv->basic_rate = rate_config &= 0x15f;
- rtl92e_writel(dev, BSSIDR, ((u32 *)net->bssid)[0]);
- rtl92e_writew(dev, BSSIDR+4, ((u16 *)net->bssid)[2]);
+ rtl92e_writew(dev, BSSIDR, *(u16 *)net->bssid);
+ rtl92e_writel(dev, BSSIDR + 2, *(u32 *)(net->bssid + 2));
if (priv->rtllib->iw_mode == IW_MODE_ADHOC) {
rtl92e_writew(dev, ATIMWND, 2);
@@ -1184,8 +1185,7 @@
struct cb_desc *cb_desc, struct sk_buff *skb)
{
struct r8192_priv *priv = rtllib_priv(dev);
- dma_addr_t mapping = pci_map_single(priv->pdev, skb->data, skb->len,
- PCI_DMA_TODEVICE);
+ dma_addr_t mapping;
struct tx_fwinfo_8190pci *pTxFwInfo = NULL;
pTxFwInfo = (struct tx_fwinfo_8190pci *)skb->data;
@@ -1196,8 +1196,6 @@
pTxFwInfo->Short = _rtl92e_query_is_short(pTxFwInfo->TxHT,
pTxFwInfo->TxRate, cb_desc);
- if (pci_dma_mapping_error(priv->pdev, mapping))
- netdev_err(dev, "%s(): DMA Mapping error\n", __func__);
if (cb_desc->bAMPDUEnable) {
pTxFwInfo->AllowAggregation = 1;
pTxFwInfo->RxMF = cb_desc->ampdu_factor;
@@ -1232,6 +1230,14 @@
}
memset((u8 *)pdesc, 0, 12);
+
+ mapping = pci_map_single(priv->pdev, skb->data, skb->len,
+ PCI_DMA_TODEVICE);
+ if (pci_dma_mapping_error(priv->pdev, mapping)) {
+ netdev_err(dev, "%s(): DMA Mapping error\n", __func__);
+ return;
+ }
+
pdesc->LINIP = 0;
pdesc->CmdInit = 1;
pdesc->Offset = sizeof(struct tx_fwinfo_8190pci) + 8;
diff --git a/drivers/staging/rts5208/rtsx_scsi.c b/drivers/staging/rts5208/rtsx_scsi.c
index 60871f3..12a3893 100644
--- a/drivers/staging/rts5208/rtsx_scsi.c
+++ b/drivers/staging/rts5208/rtsx_scsi.c
@@ -414,7 +414,7 @@
sense->ascq = ascq;
if (sns_key_info0 != 0) {
sense->sns_key_info[0] = SKSV | sns_key_info0;
- sense->sns_key_info[1] = (sns_key_info1 & 0xf0) >> 8;
+ sense->sns_key_info[1] = (sns_key_info1 & 0xf0) >> 4;
sense->sns_key_info[2] = sns_key_info1 & 0x0f;
}
}
diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c
index e405108..5a848fe 100644
--- a/drivers/staging/vt6656/main_usb.c
+++ b/drivers/staging/vt6656/main_usb.c
@@ -529,6 +529,9 @@
goto free_all;
}
+ if (vnt_key_init_table(priv))
+ goto free_all;
+
priv->int_interval = 1; /* bInterval is set to 1 */
vnt_int_start_interrupt(priv);
diff --git a/drivers/staging/vt6656/usbpipe.c b/drivers/staging/vt6656/usbpipe.c
index c975c3b..cfc3017 100644
--- a/drivers/staging/vt6656/usbpipe.c
+++ b/drivers/staging/vt6656/usbpipe.c
@@ -50,15 +50,25 @@
u16 index, u16 length, u8 *buffer)
{
int status = 0;
+ u8 *usb_buffer;
if (test_bit(DEVICE_FLAGS_DISCONNECTED, &priv->flags))
return STATUS_FAILURE;
mutex_lock(&priv->usb_lock);
+ usb_buffer = kmemdup(buffer, length, GFP_KERNEL);
+ if (!usb_buffer) {
+ mutex_unlock(&priv->usb_lock);
+ return -ENOMEM;
+ }
+
status = usb_control_msg(priv->usb,
- usb_sndctrlpipe(priv->usb, 0), request, 0x40, value,
- index, buffer, length, USB_CTL_WAIT);
+ usb_sndctrlpipe(priv->usb, 0),
+ request, 0x40, value,
+ index, usb_buffer, length, USB_CTL_WAIT);
+
+ kfree(usb_buffer);
mutex_unlock(&priv->usb_lock);
@@ -78,15 +88,28 @@
u16 index, u16 length, u8 *buffer)
{
int status;
+ u8 *usb_buffer;
if (test_bit(DEVICE_FLAGS_DISCONNECTED, &priv->flags))
return STATUS_FAILURE;
mutex_lock(&priv->usb_lock);
+ usb_buffer = kmalloc(length, GFP_KERNEL);
+ if (!usb_buffer) {
+ mutex_unlock(&priv->usb_lock);
+ return -ENOMEM;
+ }
+
status = usb_control_msg(priv->usb,
- usb_rcvctrlpipe(priv->usb, 0), request, 0xc0, value,
- index, buffer, length, USB_CTL_WAIT);
+ usb_rcvctrlpipe(priv->usb, 0),
+ request, 0xc0, value,
+ index, usb_buffer, length, USB_CTL_WAIT);
+
+ if (status == length)
+ memcpy(buffer, usb_buffer, length);
+
+ kfree(usb_buffer);
mutex_unlock(&priv->usb_lock);
diff --git a/drivers/staging/wlan-ng/p80211netdev.c b/drivers/staging/wlan-ng/p80211netdev.c
index a9c1e0b..e35fbec 100644
--- a/drivers/staging/wlan-ng/p80211netdev.c
+++ b/drivers/staging/wlan-ng/p80211netdev.c
@@ -232,7 +232,7 @@
struct p80211_hdr_a3 *hdr;
hdr = (struct p80211_hdr_a3 *) skb->data;
- if (p80211_rx_typedrop(wlandev, hdr->fc))
+ if (p80211_rx_typedrop(wlandev, le16_to_cpu(hdr->fc)))
return CONV_TO_ETHER_SKIPPED;
/* perform mcast filtering: allow my local address through but reject
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 6ed80b0..1ff1c83 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -418,6 +418,7 @@
return 0;
}
np->np_thread_state = ISCSI_NP_THREAD_RESET;
+ atomic_inc(&np->np_reset_count);
if (np->np_thread) {
spin_unlock_bh(&np->np_thread_lock);
@@ -1112,6 +1113,18 @@
*/
if (dump_payload)
goto after_immediate_data;
+ /*
+ * Check for underflow case where both EDTL and immediate data payload
+ * exceeds what is presented by CDB's TRANSFER LENGTH, and what has
+ * already been set in target_cmd_size_check() as se_cmd->data_length.
+ *
+ * For this special case, fail the command and dump the immediate data
+ * payload.
+ */
+ if (cmd->first_burst_len > cmd->se_cmd.data_length) {
+ cmd->sense_reason = TCM_INVALID_CDB_FIELD;
+ goto after_immediate_data;
+ }
immed_ret = iscsit_handle_immediate_data(cmd, hdr,
cmd->first_burst_len);
@@ -1984,6 +1997,7 @@
cmd->cmd_sn = be32_to_cpu(hdr->cmdsn);
cmd->exp_stat_sn = be32_to_cpu(hdr->exp_statsn);
cmd->data_direction = DMA_NONE;
+ kfree(cmd->text_in_ptr);
cmd->text_in_ptr = NULL;
return 0;
@@ -3953,6 +3967,8 @@
{
int ret = 0;
struct iscsi_conn *conn = arg;
+ bool conn_freed = false;
+
/*
* Allow ourselves to be interrupted by SIGINT so that a
* connection recovery / failure event can be triggered externally.
@@ -3978,12 +3994,14 @@
goto transport_err;
ret = iscsit_handle_response_queue(conn);
- if (ret == 1)
+ if (ret == 1) {
goto get_immediate;
- else if (ret == -ECONNRESET)
+ } else if (ret == -ECONNRESET) {
+ conn_freed = true;
goto out;
- else if (ret < 0)
+ } else if (ret < 0) {
goto transport_err;
+ }
}
transport_err:
@@ -3993,8 +4011,13 @@
* responsible for cleaning up the early connection failure.
*/
if (conn->conn_state != TARG_CONN_STATE_IN_LOGIN)
- iscsit_take_action_for_connection_exit(conn);
+ iscsit_take_action_for_connection_exit(conn, &conn_freed);
out:
+ if (!conn_freed) {
+ while (!kthread_should_stop()) {
+ msleep(100);
+ }
+ }
return 0;
}
@@ -4093,6 +4116,7 @@
u32 checksum = 0, digest = 0;
struct iscsi_conn *conn = arg;
struct kvec iov;
+ bool conn_freed = false;
/*
* Allow ourselves to be interrupted by SIGINT so that a
* connection recovery / failure event can be triggered externally.
@@ -4104,7 +4128,7 @@
*/
rc = wait_for_completion_interruptible(&conn->rx_login_comp);
if (rc < 0 || iscsi_target_check_conn_state(conn))
- return 0;
+ goto out;
if (conn->conn_transport->transport_type == ISCSI_INFINIBAND) {
struct completion comp;
@@ -4189,7 +4213,13 @@
transport_err:
if (!signal_pending(current))
atomic_set(&conn->transport_failed, 1);
- iscsit_take_action_for_connection_exit(conn);
+ iscsit_take_action_for_connection_exit(conn, &conn_freed);
+out:
+ if (!conn_freed) {
+ while (!kthread_should_stop()) {
+ msleep(100);
+ }
+ }
return 0;
}
@@ -4563,8 +4593,11 @@
* always sleep waiting for RX/TX thread shutdown to complete
* within iscsit_close_connection().
*/
- if (conn->conn_transport->transport_type == ISCSI_TCP)
+ if (conn->conn_transport->transport_type == ISCSI_TCP) {
sleep = cmpxchg(&conn->tx_thread_active, true, false);
+ if (!sleep)
+ return;
+ }
atomic_set(&conn->conn_logout_remove, 0);
complete(&conn->conn_logout_comp);
@@ -4580,8 +4613,11 @@
{
int sleep = 1;
- if (conn->conn_transport->transport_type == ISCSI_TCP)
+ if (conn->conn_transport->transport_type == ISCSI_TCP) {
sleep = cmpxchg(&conn->tx_thread_active, true, false);
+ if (!sleep)
+ return;
+ }
atomic_set(&conn->conn_logout_remove, 0);
complete(&conn->conn_logout_comp);
@@ -4821,6 +4857,7 @@
continue;
}
atomic_set(&sess->session_reinstatement, 1);
+ atomic_set(&sess->session_fall_back_to_erl0, 1);
spin_unlock(&sess->conn_lock);
list_move_tail(&se_sess->sess_list, &free_list);
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
index b4bfd70..634ad36 100644
--- a/drivers/target/iscsi/iscsi_target_configfs.c
+++ b/drivers/target/iscsi/iscsi_target_configfs.c
@@ -725,11 +725,8 @@
if (iscsit_get_tpg(tpg) < 0)
return -EINVAL;
- /*
- * iscsit_tpg_set_initiator_node_queue_depth() assumes force=1
- */
- ret = iscsit_tpg_set_initiator_node_queue_depth(tpg,
- config_item_name(acl_ci), cmdsn_depth, 1);
+
+ ret = core_tpg_set_initiator_node_queue_depth(se_nacl, cmdsn_depth);
pr_debug("LIO_Target_ConfigFS: %s/%s Set CmdSN Window: %u for"
"InitiatorName: %s\n", config_item_name(wwn_ci),
@@ -871,6 +868,7 @@
DEF_TPG_ATTRIB(t10_pi);
DEF_TPG_ATTRIB(fabric_prot_type);
DEF_TPG_ATTRIB(tpg_enabled_sendtargets);
+DEF_TPG_ATTRIB(login_keys_workaround);
static struct configfs_attribute *lio_target_tpg_attrib_attrs[] = {
&iscsi_tpg_attrib_attr_authentication,
@@ -886,6 +884,7 @@
&iscsi_tpg_attrib_attr_t10_pi,
&iscsi_tpg_attrib_attr_fabric_prot_type,
&iscsi_tpg_attrib_attr_tpg_enabled_sendtargets,
+ &iscsi_tpg_attrib_attr_login_keys_workaround,
NULL,
};
@@ -1593,42 +1592,31 @@
}
/*
- * Called with spin_lock_irq(struct se_portal_group->session_lock) held
- * or not held.
- *
- * Also, this function calls iscsit_inc_session_usage_count() on the
+ * This function calls iscsit_inc_session_usage_count() on the
* struct iscsi_session in question.
*/
static int lio_tpg_shutdown_session(struct se_session *se_sess)
{
struct iscsi_session *sess = se_sess->fabric_sess_ptr;
- struct se_portal_group *se_tpg = se_sess->se_tpg;
- bool local_lock = false;
+ struct se_portal_group *se_tpg = &sess->tpg->tpg_se_tpg;
- if (!spin_is_locked(&se_tpg->session_lock)) {
- spin_lock_irq(&se_tpg->session_lock);
- local_lock = true;
- }
-
+ spin_lock_bh(&se_tpg->session_lock);
spin_lock(&sess->conn_lock);
if (atomic_read(&sess->session_fall_back_to_erl0) ||
atomic_read(&sess->session_logout) ||
(sess->time2retain_timer_flags & ISCSI_TF_EXPIRED)) {
spin_unlock(&sess->conn_lock);
- if (local_lock)
- spin_unlock_irq(&sess->conn_lock);
+ spin_unlock_bh(&se_tpg->session_lock);
return 0;
}
atomic_set(&sess->session_reinstatement, 1);
+ atomic_set(&sess->session_fall_back_to_erl0, 1);
spin_unlock(&sess->conn_lock);
iscsit_stop_time2retain_timer(sess);
- spin_unlock_irq(&se_tpg->session_lock);
+ spin_unlock_bh(&se_tpg->session_lock);
iscsit_stop_session(sess, 1, 1);
- if (!local_lock)
- spin_lock_irq(&se_tpg->session_lock);
-
return 1;
}
diff --git a/drivers/target/iscsi/iscsi_target_erl0.c b/drivers/target/iscsi/iscsi_target_erl0.c
index 210f6e4..6c88fb0 100644
--- a/drivers/target/iscsi/iscsi_target_erl0.c
+++ b/drivers/target/iscsi/iscsi_target_erl0.c
@@ -930,8 +930,10 @@
}
}
-void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn)
+void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn, bool *conn_freed)
{
+ *conn_freed = false;
+
spin_lock_bh(&conn->state_lock);
if (atomic_read(&conn->connection_exit)) {
spin_unlock_bh(&conn->state_lock);
@@ -942,6 +944,7 @@
if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT) {
spin_unlock_bh(&conn->state_lock);
iscsit_close_connection(conn);
+ *conn_freed = true;
return;
}
@@ -955,4 +958,5 @@
spin_unlock_bh(&conn->state_lock);
iscsit_handle_connection_cleanup(conn);
+ *conn_freed = true;
}
diff --git a/drivers/target/iscsi/iscsi_target_erl0.h b/drivers/target/iscsi/iscsi_target_erl0.h
index a9e2f94..fbc1d84 100644
--- a/drivers/target/iscsi/iscsi_target_erl0.h
+++ b/drivers/target/iscsi/iscsi_target_erl0.h
@@ -9,6 +9,6 @@
extern void iscsit_connection_reinstatement_rcfr(struct iscsi_conn *);
extern void iscsit_cause_connection_reinstatement(struct iscsi_conn *, int);
extern void iscsit_fall_back_to_erl0(struct iscsi_session *);
-extern void iscsit_take_action_for_connection_exit(struct iscsi_conn *);
+extern void iscsit_take_action_for_connection_exit(struct iscsi_conn *, bool *);
#endif /*** ISCSI_TARGET_ERL0_H ***/
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index 316f661..bc2cbff 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -195,6 +195,7 @@
initiatorname_param->value) &&
(sess_p->sess_ops->SessionType == sessiontype))) {
atomic_set(&sess_p->session_reinstatement, 1);
+ atomic_set(&sess_p->session_fall_back_to_erl0, 1);
spin_unlock(&sess_p->conn_lock);
iscsit_inc_session_usage_count(sess_p);
iscsit_stop_time2retain_timer(sess_p);
@@ -1218,9 +1219,11 @@
flush_signals(current);
spin_lock_bh(&np->np_thread_lock);
- if (np->np_thread_state == ISCSI_NP_THREAD_RESET) {
+ if (atomic_dec_if_positive(&np->np_reset_count) >= 0) {
np->np_thread_state = ISCSI_NP_THREAD_ACTIVE;
+ spin_unlock_bh(&np->np_thread_lock);
complete(&np->np_restart_comp);
+ return 1;
} else if (np->np_thread_state == ISCSI_NP_THREAD_SHUTDOWN) {
spin_unlock_bh(&np->np_thread_lock);
goto exit;
@@ -1253,7 +1256,8 @@
goto exit;
} else if (rc < 0) {
spin_lock_bh(&np->np_thread_lock);
- if (np->np_thread_state == ISCSI_NP_THREAD_RESET) {
+ if (atomic_dec_if_positive(&np->np_reset_count) >= 0) {
+ np->np_thread_state = ISCSI_NP_THREAD_ACTIVE;
spin_unlock_bh(&np->np_thread_lock);
complete(&np->np_restart_comp);
iscsit_put_transport(conn->conn_transport);
@@ -1435,5 +1439,9 @@
break;
}
+ while (!kthread_should_stop()) {
+ msleep(100);
+ }
+
return 0;
}
diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
index 9fc9117..58c629a 100644
--- a/drivers/target/iscsi/iscsi_target_nego.c
+++ b/drivers/target/iscsi/iscsi_target_nego.c
@@ -489,14 +489,60 @@
static int iscsi_target_do_login(struct iscsi_conn *, struct iscsi_login *);
-static bool iscsi_target_sk_state_check(struct sock *sk)
+static bool __iscsi_target_sk_check_close(struct sock *sk)
{
if (sk->sk_state == TCP_CLOSE_WAIT || sk->sk_state == TCP_CLOSE) {
- pr_debug("iscsi_target_sk_state_check: TCP_CLOSE_WAIT|TCP_CLOSE,"
+ pr_debug("__iscsi_target_sk_check_close: TCP_CLOSE_WAIT|TCP_CLOSE,"
"returning FALSE\n");
- return false;
+ return true;
}
- return true;
+ return false;
+}
+
+static bool iscsi_target_sk_check_close(struct iscsi_conn *conn)
+{
+ bool state = false;
+
+ if (conn->sock) {
+ struct sock *sk = conn->sock->sk;
+
+ read_lock_bh(&sk->sk_callback_lock);
+ state = (__iscsi_target_sk_check_close(sk) ||
+ test_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags));
+ read_unlock_bh(&sk->sk_callback_lock);
+ }
+ return state;
+}
+
+static bool iscsi_target_sk_check_flag(struct iscsi_conn *conn, unsigned int flag)
+{
+ bool state = false;
+
+ if (conn->sock) {
+ struct sock *sk = conn->sock->sk;
+
+ read_lock_bh(&sk->sk_callback_lock);
+ state = test_bit(flag, &conn->login_flags);
+ read_unlock_bh(&sk->sk_callback_lock);
+ }
+ return state;
+}
+
+static bool iscsi_target_sk_check_and_clear(struct iscsi_conn *conn, unsigned int flag)
+{
+ bool state = false;
+
+ if (conn->sock) {
+ struct sock *sk = conn->sock->sk;
+
+ write_lock_bh(&sk->sk_callback_lock);
+ state = (__iscsi_target_sk_check_close(sk) ||
+ test_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags));
+ if (!state)
+ clear_bit(flag, &conn->login_flags);
+ write_unlock_bh(&sk->sk_callback_lock);
+ }
+ return state;
}
static void iscsi_target_login_drop(struct iscsi_conn *conn, struct iscsi_login *login)
@@ -536,6 +582,20 @@
pr_debug("entering iscsi_target_do_login_rx, conn: %p, %s:%d\n",
conn, current->comm, current->pid);
+ /*
+ * If iscsi_target_do_login_rx() has been invoked by ->sk_data_ready()
+ * before initial PDU processing in iscsi_target_start_negotiation()
+ * has completed, go ahead and retry until it's cleared.
+ *
+ * Otherwise if the TCP connection drops while this is occuring,
+ * iscsi_target_start_negotiation() will detect the failure, call
+ * cancel_delayed_work_sync(&conn->login_work), and cleanup the
+ * remaining iscsi connection resources from iscsi_np process context.
+ */
+ if (iscsi_target_sk_check_flag(conn, LOGIN_FLAGS_INITIAL_PDU)) {
+ schedule_delayed_work(&conn->login_work, msecs_to_jiffies(10));
+ return;
+ }
spin_lock(&tpg->tpg_state_lock);
state = (tpg->tpg_state == TPG_STATE_ACTIVE);
@@ -543,26 +603,12 @@
if (!state) {
pr_debug("iscsi_target_do_login_rx: tpg_state != TPG_STATE_ACTIVE\n");
- iscsi_target_restore_sock_callbacks(conn);
- iscsi_target_login_drop(conn, login);
- iscsit_deaccess_np(np, tpg, tpg_np);
- return;
+ goto err;
}
- if (conn->sock) {
- struct sock *sk = conn->sock->sk;
-
- read_lock_bh(&sk->sk_callback_lock);
- state = iscsi_target_sk_state_check(sk);
- read_unlock_bh(&sk->sk_callback_lock);
-
- if (!state) {
- pr_debug("iscsi_target_do_login_rx, TCP state CLOSE\n");
- iscsi_target_restore_sock_callbacks(conn);
- iscsi_target_login_drop(conn, login);
- iscsit_deaccess_np(np, tpg, tpg_np);
- return;
- }
+ if (iscsi_target_sk_check_close(conn)) {
+ pr_debug("iscsi_target_do_login_rx, TCP state CLOSE\n");
+ goto err;
}
conn->login_kworker = current;
@@ -580,34 +626,29 @@
flush_signals(current);
conn->login_kworker = NULL;
- if (rc < 0) {
- iscsi_target_restore_sock_callbacks(conn);
- iscsi_target_login_drop(conn, login);
- iscsit_deaccess_np(np, tpg, tpg_np);
- return;
- }
+ if (rc < 0)
+ goto err;
pr_debug("iscsi_target_do_login_rx after rx_login_io, %p, %s:%d\n",
conn, current->comm, current->pid);
rc = iscsi_target_do_login(conn, login);
if (rc < 0) {
- iscsi_target_restore_sock_callbacks(conn);
- iscsi_target_login_drop(conn, login);
- iscsit_deaccess_np(np, tpg, tpg_np);
+ goto err;
} else if (!rc) {
- if (conn->sock) {
- struct sock *sk = conn->sock->sk;
-
- write_lock_bh(&sk->sk_callback_lock);
- clear_bit(LOGIN_FLAGS_READ_ACTIVE, &conn->login_flags);
- write_unlock_bh(&sk->sk_callback_lock);
- }
+ if (iscsi_target_sk_check_and_clear(conn, LOGIN_FLAGS_READ_ACTIVE))
+ goto err;
} else if (rc == 1) {
iscsi_target_nego_release(conn);
iscsi_post_login_handler(np, conn, zero_tsih);
iscsit_deaccess_np(np, tpg, tpg_np);
}
+ return;
+
+err:
+ iscsi_target_restore_sock_callbacks(conn);
+ iscsi_target_login_drop(conn, login);
+ iscsit_deaccess_np(np, tpg, tpg_np);
}
static void iscsi_target_do_cleanup(struct work_struct *work)
@@ -655,31 +696,54 @@
orig_state_change(sk);
return;
}
+ state = __iscsi_target_sk_check_close(sk);
+ pr_debug("__iscsi_target_sk_close_change: state: %d\n", state);
+
if (test_bit(LOGIN_FLAGS_READ_ACTIVE, &conn->login_flags)) {
pr_debug("Got LOGIN_FLAGS_READ_ACTIVE=1 sk_state_change"
" conn: %p\n", conn);
+ if (state)
+ set_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags);
write_unlock_bh(&sk->sk_callback_lock);
orig_state_change(sk);
return;
}
- if (test_and_set_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags)) {
+ if (test_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags)) {
pr_debug("Got LOGIN_FLAGS_CLOSED=1 sk_state_change conn: %p\n",
conn);
write_unlock_bh(&sk->sk_callback_lock);
orig_state_change(sk);
return;
}
-
- state = iscsi_target_sk_state_check(sk);
- write_unlock_bh(&sk->sk_callback_lock);
-
- pr_debug("iscsi_target_sk_state_change: state: %d\n", state);
-
- if (!state) {
+ /*
+ * If the TCP connection has dropped, go ahead and set LOGIN_FLAGS_CLOSED,
+ * but only queue conn->login_work -> iscsi_target_do_login_rx()
+ * processing if LOGIN_FLAGS_INITIAL_PDU has already been cleared.
+ *
+ * When iscsi_target_do_login_rx() runs, iscsi_target_sk_check_close()
+ * will detect the dropped TCP connection from delayed workqueue context.
+ *
+ * If LOGIN_FLAGS_INITIAL_PDU is still set, which means the initial
+ * iscsi_target_start_negotiation() is running, iscsi_target_do_login()
+ * via iscsi_target_sk_check_close() or iscsi_target_start_negotiation()
+ * via iscsi_target_sk_check_and_clear() is responsible for detecting the
+ * dropped TCP connection in iscsi_np process context, and cleaning up
+ * the remaining iscsi connection resources.
+ */
+ if (state) {
pr_debug("iscsi_target_sk_state_change got failed state\n");
- schedule_delayed_work(&conn->login_cleanup_work, 0);
+ set_bit(LOGIN_FLAGS_CLOSED, &conn->login_flags);
+ state = test_bit(LOGIN_FLAGS_INITIAL_PDU, &conn->login_flags);
+ write_unlock_bh(&sk->sk_callback_lock);
+
+ orig_state_change(sk);
+
+ if (!state)
+ schedule_delayed_work(&conn->login_work, 0);
return;
}
+ write_unlock_bh(&sk->sk_callback_lock);
+
orig_state_change(sk);
}
@@ -818,7 +882,8 @@
SENDER_TARGET,
login->rsp_buf,
&login->rsp_length,
- conn->param_list);
+ conn->param_list,
+ conn->tpg->tpg_attrib.login_keys_workaround);
if (ret < 0)
return -1;
@@ -888,7 +953,8 @@
SENDER_TARGET,
login->rsp_buf,
&login->rsp_length,
- conn->param_list);
+ conn->param_list,
+ conn->tpg->tpg_attrib.login_keys_workaround);
if (ret < 0) {
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR,
ISCSI_LOGIN_STATUS_INIT_ERR);
@@ -942,6 +1008,15 @@
if (iscsi_target_handle_csg_one(conn, login) < 0)
return -1;
if (login_rsp->flags & ISCSI_FLAG_LOGIN_TRANSIT) {
+ /*
+ * Check to make sure the TCP connection has not
+ * dropped asynchronously while session reinstatement
+ * was occuring in this kthread context, before
+ * transitioning to full feature phase operation.
+ */
+ if (iscsi_target_sk_check_close(conn))
+ return -1;
+
login->tsih = conn->sess->tsih;
login->login_complete = 1;
iscsi_target_restore_sock_callbacks(conn);
@@ -968,21 +1043,6 @@
break;
}
- if (conn->sock) {
- struct sock *sk = conn->sock->sk;
- bool state;
-
- read_lock_bh(&sk->sk_callback_lock);
- state = iscsi_target_sk_state_check(sk);
- read_unlock_bh(&sk->sk_callback_lock);
-
- if (!state) {
- pr_debug("iscsi_target_do_login() failed state for"
- " conn: %p\n", conn);
- return -1;
- }
- }
-
return 0;
}
@@ -1246,16 +1306,28 @@
{
int ret;
- ret = iscsi_target_do_login(conn, login);
- if (!ret) {
- if (conn->sock) {
- struct sock *sk = conn->sock->sk;
+ if (conn->sock) {
+ struct sock *sk = conn->sock->sk;
- write_lock_bh(&sk->sk_callback_lock);
- set_bit(LOGIN_FLAGS_READY, &conn->login_flags);
- write_unlock_bh(&sk->sk_callback_lock);
- }
- } else if (ret < 0) {
+ write_lock_bh(&sk->sk_callback_lock);
+ set_bit(LOGIN_FLAGS_READY, &conn->login_flags);
+ set_bit(LOGIN_FLAGS_INITIAL_PDU, &conn->login_flags);
+ write_unlock_bh(&sk->sk_callback_lock);
+ }
+ /*
+ * If iscsi_target_do_login returns zero to signal more PDU
+ * exchanges are required to complete the login, go ahead and
+ * clear LOGIN_FLAGS_INITIAL_PDU but only if the TCP connection
+ * is still active.
+ *
+ * Otherwise if TCP connection dropped asynchronously, go ahead
+ * and perform connection cleanup now.
+ */
+ ret = iscsi_target_do_login(conn, login);
+ if (!ret && iscsi_target_sk_check_and_clear(conn, LOGIN_FLAGS_INITIAL_PDU))
+ ret = -1;
+
+ if (ret < 0) {
cancel_delayed_work_sync(&conn->login_work);
cancel_delayed_work_sync(&conn->login_cleanup_work);
iscsi_target_restore_sock_callbacks(conn);
diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c
index 2cbea2a..76bde76 100644
--- a/drivers/target/iscsi/iscsi_target_parameters.c
+++ b/drivers/target/iscsi/iscsi_target_parameters.c
@@ -764,7 +764,8 @@
return 0;
}
-static void iscsi_check_proposer_for_optional_reply(struct iscsi_param *param)
+static void iscsi_check_proposer_for_optional_reply(struct iscsi_param *param,
+ bool keys_workaround)
{
if (IS_TYPE_BOOL_AND(param)) {
if (!strcmp(param->value, NO))
@@ -772,35 +773,31 @@
} else if (IS_TYPE_BOOL_OR(param)) {
if (!strcmp(param->value, YES))
SET_PSTATE_REPLY_OPTIONAL(param);
- /*
- * Required for gPXE iSCSI boot client
- */
- if (!strcmp(param->name, IMMEDIATEDATA))
- SET_PSTATE_REPLY_OPTIONAL(param);
+
+ if (keys_workaround) {
+ /*
+ * Required for gPXE iSCSI boot client
+ */
+ if (!strcmp(param->name, IMMEDIATEDATA))
+ SET_PSTATE_REPLY_OPTIONAL(param);
+ }
} else if (IS_TYPE_NUMBER(param)) {
if (!strcmp(param->name, MAXRECVDATASEGMENTLENGTH))
SET_PSTATE_REPLY_OPTIONAL(param);
- /*
- * The GlobalSAN iSCSI Initiator for MacOSX does
- * not respond to MaxBurstLength, FirstBurstLength,
- * DefaultTime2Wait or DefaultTime2Retain parameter keys.
- * So, we set them to 'reply optional' here, and assume the
- * the defaults from iscsi_parameters.h if the initiator
- * is not RFC compliant and the keys are not negotiated.
- */
- if (!strcmp(param->name, MAXBURSTLENGTH))
- SET_PSTATE_REPLY_OPTIONAL(param);
- if (!strcmp(param->name, FIRSTBURSTLENGTH))
- SET_PSTATE_REPLY_OPTIONAL(param);
- if (!strcmp(param->name, DEFAULTTIME2WAIT))
- SET_PSTATE_REPLY_OPTIONAL(param);
- if (!strcmp(param->name, DEFAULTTIME2RETAIN))
- SET_PSTATE_REPLY_OPTIONAL(param);
- /*
- * Required for gPXE iSCSI boot client
- */
- if (!strcmp(param->name, MAXCONNECTIONS))
- SET_PSTATE_REPLY_OPTIONAL(param);
+
+ if (keys_workaround) {
+ /*
+ * Required for Mellanox Flexboot PXE boot ROM
+ */
+ if (!strcmp(param->name, FIRSTBURSTLENGTH))
+ SET_PSTATE_REPLY_OPTIONAL(param);
+
+ /*
+ * Required for gPXE iSCSI boot client
+ */
+ if (!strcmp(param->name, MAXCONNECTIONS))
+ SET_PSTATE_REPLY_OPTIONAL(param);
+ }
} else if (IS_PHASE_DECLARATIVE(param))
SET_PSTATE_REPLY_OPTIONAL(param);
}
@@ -1437,7 +1434,8 @@
u8 sender,
char *textbuf,
u32 *length,
- struct iscsi_param_list *param_list)
+ struct iscsi_param_list *param_list,
+ bool keys_workaround)
{
char *output_buf = NULL;
struct iscsi_extra_response *er;
@@ -1473,7 +1471,8 @@
*length += 1;
output_buf = textbuf + *length;
SET_PSTATE_PROPOSER(param);
- iscsi_check_proposer_for_optional_reply(param);
+ iscsi_check_proposer_for_optional_reply(param,
+ keys_workaround);
pr_debug("Sending key: %s=%s\n",
param->name, param->value);
}
diff --git a/drivers/target/iscsi/iscsi_target_parameters.h b/drivers/target/iscsi/iscsi_target_parameters.h
index a0751e3..17a58c2 100644
--- a/drivers/target/iscsi/iscsi_target_parameters.h
+++ b/drivers/target/iscsi/iscsi_target_parameters.h
@@ -40,7 +40,7 @@
extern int iscsi_update_param_value(struct iscsi_param *, char *);
extern int iscsi_decode_text_input(u8, u8, char *, u32, struct iscsi_conn *);
extern int iscsi_encode_text_output(u8, u8, char *, u32 *,
- struct iscsi_param_list *);
+ struct iscsi_param_list *, bool);
extern int iscsi_check_negotiated_keys(struct iscsi_param_list *);
extern void iscsi_set_connection_parameters(struct iscsi_conn_ops *,
struct iscsi_param_list *);
diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c
index 68261b7..63e1dcc 100644
--- a/drivers/target/iscsi/iscsi_target_tpg.c
+++ b/drivers/target/iscsi/iscsi_target_tpg.c
@@ -227,6 +227,7 @@
a->t10_pi = TA_DEFAULT_T10_PI;
a->fabric_prot_type = TA_DEFAULT_FABRIC_PROT_TYPE;
a->tpg_enabled_sendtargets = TA_DEFAULT_TPG_ENABLED_SENDTARGETS;
+ a->login_keys_workaround = TA_DEFAULT_LOGIN_KEYS_WORKAROUND;
}
int iscsit_tpg_add_portal_group(struct iscsi_tiqn *tiqn, struct iscsi_portal_group *tpg)
@@ -589,16 +590,6 @@
return iscsit_tpg_release_np(tpg_np, tpg, np);
}
-int iscsit_tpg_set_initiator_node_queue_depth(
- struct iscsi_portal_group *tpg,
- unsigned char *initiatorname,
- u32 queue_depth,
- int force)
-{
- return core_tpg_set_initiator_node_queue_depth(&tpg->tpg_se_tpg,
- initiatorname, queue_depth, force);
-}
-
int iscsit_ta_authentication(struct iscsi_portal_group *tpg, u32 authentication)
{
unsigned char buf1[256], buf2[256], *none = NULL;
@@ -909,3 +900,21 @@
return 0;
}
+
+int iscsit_ta_login_keys_workaround(
+ struct iscsi_portal_group *tpg,
+ u32 flag)
+{
+ struct iscsi_tpg_attrib *a = &tpg->tpg_attrib;
+
+ if ((flag != 0) && (flag != 1)) {
+ pr_err("Illegal value %d\n", flag);
+ return -EINVAL;
+ }
+
+ a->login_keys_workaround = flag;
+ pr_debug("iSCSI_TPG[%hu] - TPG enabled bit for login keys workaround: %s ",
+ tpg->tpgt, (a->login_keys_workaround) ? "ON" : "OFF");
+
+ return 0;
+}
diff --git a/drivers/target/iscsi/iscsi_target_tpg.h b/drivers/target/iscsi/iscsi_target_tpg.h
index 9db32bd..901a712 100644
--- a/drivers/target/iscsi/iscsi_target_tpg.h
+++ b/drivers/target/iscsi/iscsi_target_tpg.h
@@ -26,8 +26,6 @@
int);
extern int iscsit_tpg_del_network_portal(struct iscsi_portal_group *,
struct iscsi_tpg_np *);
-extern int iscsit_tpg_set_initiator_node_queue_depth(struct iscsi_portal_group *,
- unsigned char *, u32, int);
extern int iscsit_ta_authentication(struct iscsi_portal_group *, u32);
extern int iscsit_ta_login_timeout(struct iscsi_portal_group *, u32);
extern int iscsit_ta_netif_timeout(struct iscsi_portal_group *, u32);
@@ -41,5 +39,6 @@
extern int iscsit_ta_t10_pi(struct iscsi_portal_group *, u32);
extern int iscsit_ta_fabric_prot_type(struct iscsi_portal_group *, u32);
extern int iscsit_ta_tpg_enabled_sendtargets(struct iscsi_portal_group *, u32);
+extern int iscsit_ta_login_keys_workaround(struct iscsi_portal_group *, u32);
#endif /* ISCSI_TARGET_TPG_H */
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
index 428b0d9..93590521 100644
--- a/drivers/target/iscsi/iscsi_target_util.c
+++ b/drivers/target/iscsi/iscsi_target_util.c
@@ -731,21 +731,23 @@
{
struct se_cmd *se_cmd = NULL;
int rc;
+ bool op_scsi = false;
/*
* Determine if a struct se_cmd is associated with
* this struct iscsi_cmd.
*/
switch (cmd->iscsi_opcode) {
case ISCSI_OP_SCSI_CMD:
- se_cmd = &cmd->se_cmd;
- __iscsit_free_cmd(cmd, true, shutdown);
+ op_scsi = true;
/*
* Fallthrough
*/
case ISCSI_OP_SCSI_TMFUNC:
- rc = transport_generic_free_cmd(&cmd->se_cmd, shutdown);
- if (!rc && shutdown && se_cmd && se_cmd->se_sess) {
- __iscsit_free_cmd(cmd, true, shutdown);
+ se_cmd = &cmd->se_cmd;
+ __iscsit_free_cmd(cmd, op_scsi, shutdown);
+ rc = transport_generic_free_cmd(se_cmd, shutdown);
+ if (!rc && shutdown && se_cmd->se_sess) {
+ __iscsit_free_cmd(cmd, op_scsi, shutdown);
target_put_sess_cmd(se_cmd);
}
break;
diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c
index f916d18..b070ddf 100644
--- a/drivers/target/target_core_fabric_configfs.c
+++ b/drivers/target/target_core_fabric_configfs.c
@@ -92,6 +92,11 @@
pr_err("Source se_lun->lun_se_dev does not exist\n");
return -EINVAL;
}
+ if (lun->lun_shutdown) {
+ pr_err("Unable to create mappedlun symlink because"
+ " lun->lun_shutdown=true\n");
+ return -EINVAL;
+ }
se_tpg = lun->lun_tpg;
nacl_ci = &lun_acl_ci->ci_parent->ci_group->cg_item;
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index 7929186..041a569 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -594,8 +594,7 @@
if (ret < 0)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- if (ret)
- target_complete_cmd(cmd, SAM_STAT_GOOD);
+ target_complete_cmd(cmd, SAM_STAT_GOOD);
return 0;
}
diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h
index 253a91b..272e6f7 100644
--- a/drivers/target/target_core_internal.h
+++ b/drivers/target/target_core_internal.h
@@ -132,7 +132,7 @@
void release_se_kmem_caches(void);
u32 scsi_get_new_index(scsi_index_t);
void transport_subsystem_check_init(void);
-void transport_cmd_finish_abort(struct se_cmd *, int);
+int transport_cmd_finish_abort(struct se_cmd *, int);
unsigned char *transport_dump_cmd_direction(struct se_cmd *);
void transport_dump_dev_state(struct se_device *, char *, int *);
void transport_dump_dev_info(struct se_device *, struct se_lun *,
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index de18790..d72a405 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -154,7 +154,7 @@
buf = kzalloc(12, GFP_KERNEL);
if (!buf)
- return;
+ goto out_free;
memset(cdb, 0, MAX_COMMAND_SIZE);
cdb[0] = MODE_SENSE;
@@ -169,9 +169,10 @@
* If MODE_SENSE still returns zero, set the default value to 1024.
*/
sdev->sector_size = (buf[9] << 16) | (buf[10] << 8) | (buf[11]);
+out_free:
if (!sdev->sector_size)
sdev->sector_size = 1024;
-out_free:
+
kfree(buf);
}
@@ -314,9 +315,10 @@
sd->lun, sd->queue_depth);
}
- dev->dev_attrib.hw_block_size = sd->sector_size;
+ dev->dev_attrib.hw_block_size =
+ min_not_zero((int)sd->sector_size, 512);
dev->dev_attrib.hw_max_sectors =
- min_t(int, sd->host->max_sectors, queue_max_hw_sectors(q));
+ min_not_zero(sd->host->max_sectors, queue_max_hw_sectors(q));
dev->dev_attrib.hw_queue_depth = sd->queue_depth;
/*
@@ -339,8 +341,10 @@
/*
* For TYPE_TAPE, attempt to determine blocksize with MODE_SENSE.
*/
- if (sd->type == TYPE_TAPE)
+ if (sd->type == TYPE_TAPE) {
pscsi_tape_read_blocksize(dev, sd);
+ dev->dev_attrib.hw_block_size = sd->sector_size;
+ }
return 0;
}
@@ -406,7 +410,7 @@
/*
* Called with struct Scsi_Host->host_lock called.
*/
-static int pscsi_create_type_rom(struct se_device *dev, struct scsi_device *sd)
+static int pscsi_create_type_nondisk(struct se_device *dev, struct scsi_device *sd)
__releases(sh->host_lock)
{
struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr;
@@ -433,28 +437,6 @@
return 0;
}
-/*
- * Called with struct Scsi_Host->host_lock called.
- */
-static int pscsi_create_type_other(struct se_device *dev,
- struct scsi_device *sd)
- __releases(sh->host_lock)
-{
- struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr;
- struct Scsi_Host *sh = sd->host;
- int ret;
-
- spin_unlock_irq(sh->host_lock);
- ret = pscsi_add_device_to_list(dev, sd);
- if (ret)
- return ret;
-
- pr_debug("CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%llu\n",
- phv->phv_host_id, scsi_device_type(sd->type), sh->host_no,
- sd->channel, sd->id, sd->lun);
- return 0;
-}
-
static int pscsi_configure_device(struct se_device *dev)
{
struct se_hba *hba = dev->se_hba;
@@ -542,11 +524,8 @@
case TYPE_DISK:
ret = pscsi_create_type_disk(dev, sd);
break;
- case TYPE_ROM:
- ret = pscsi_create_type_rom(dev, sd);
- break;
default:
- ret = pscsi_create_type_other(dev, sd);
+ ret = pscsi_create_type_nondisk(dev, sd);
break;
}
@@ -611,8 +590,7 @@
else if (pdv->pdv_lld_host)
scsi_host_put(pdv->pdv_lld_host);
- if ((sd->type == TYPE_DISK) || (sd->type == TYPE_ROM))
- scsi_device_put(sd);
+ scsi_device_put(sd);
pdv->pdv_sd = NULL;
}
@@ -1088,7 +1066,6 @@
if (pdv->pdv_bd && pdv->pdv_bd->bd_part)
return pdv->pdv_bd->bd_part->nr_sects;
- dump_stack();
return 0;
}
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index 2e27b10..6081178 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -498,8 +498,11 @@
* been failed with a non-zero SCSI status.
*/
if (cmd->scsi_status) {
- pr_err("compare_and_write_callback: non zero scsi_status:"
+ pr_debug("compare_and_write_callback: non zero scsi_status:"
" 0x%02x\n", cmd->scsi_status);
+ *post_ret = 1;
+ if (cmd->scsi_status == SAM_STAT_CHECK_CONDITION)
+ ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
goto out;
}
@@ -1096,9 +1099,15 @@
return ret;
break;
case VERIFY:
+ case VERIFY_16:
size = 0;
- sectors = transport_get_sectors_10(cdb);
- cmd->t_task_lba = transport_lba_32(cdb);
+ if (cdb[0] == VERIFY) {
+ sectors = transport_get_sectors_10(cdb);
+ cmd->t_task_lba = transport_lba_32(cdb);
+ } else {
+ sectors = transport_get_sectors_16(cdb);
+ cmd->t_task_lba = transport_lba_64(cdb);
+ }
cmd->execute_cmd = sbc_emulate_noop;
goto check_lba;
case REZERO_UNIT:
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index 46b1991..c9be953 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -75,7 +75,7 @@
kfree(tmr);
}
-static void core_tmr_handle_tas_abort(struct se_cmd *cmd, int tas)
+static int core_tmr_handle_tas_abort(struct se_cmd *cmd, int tas)
{
unsigned long flags;
bool remove = true, send_tas;
@@ -91,7 +91,7 @@
transport_send_task_abort(cmd);
}
- transport_cmd_finish_abort(cmd, remove);
+ return transport_cmd_finish_abort(cmd, remove);
}
static int target_check_cdb_and_preempt(struct list_head *list,
@@ -185,8 +185,8 @@
cancel_work_sync(&se_cmd->work);
transport_wait_for_tasks(se_cmd);
- transport_cmd_finish_abort(se_cmd, true);
- target_put_sess_cmd(se_cmd);
+ if (!transport_cmd_finish_abort(se_cmd, true))
+ target_put_sess_cmd(se_cmd);
printk("ABORT_TASK: Sending TMR_FUNCTION_COMPLETE for"
" ref_tag: %llu\n", ref_tag);
@@ -286,8 +286,8 @@
cancel_work_sync(&cmd->work);
transport_wait_for_tasks(cmd);
- transport_cmd_finish_abort(cmd, 1);
- target_put_sess_cmd(cmd);
+ if (!transport_cmd_finish_abort(cmd, 1))
+ target_put_sess_cmd(cmd);
}
}
@@ -385,8 +385,8 @@
cancel_work_sync(&cmd->work);
transport_wait_for_tasks(cmd);
- core_tmr_handle_tas_abort(cmd, tas);
- target_put_sess_cmd(cmd);
+ if (!core_tmr_handle_tas_abort(cmd, tas))
+ target_put_sess_cmd(cmd);
}
}
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
index 2794c6e..f69f490 100644
--- a/drivers/target/target_core_tpg.c
+++ b/drivers/target/target_core_tpg.c
@@ -169,28 +169,25 @@
mutex_unlock(&tpg->tpg_lun_mutex);
}
-/* core_set_queue_depth_for_node():
- *
- *
- */
-static int core_set_queue_depth_for_node(
- struct se_portal_group *tpg,
- struct se_node_acl *acl)
+static void
+target_set_nacl_queue_depth(struct se_portal_group *tpg,
+ struct se_node_acl *acl, u32 queue_depth)
{
+ acl->queue_depth = queue_depth;
+
if (!acl->queue_depth) {
- pr_err("Queue depth for %s Initiator Node: %s is 0,"
+ pr_warn("Queue depth for %s Initiator Node: %s is 0,"
"defaulting to 1.\n", tpg->se_tpg_tfo->get_fabric_name(),
acl->initiatorname);
acl->queue_depth = 1;
}
-
- return 0;
}
static struct se_node_acl *target_alloc_node_acl(struct se_portal_group *tpg,
const unsigned char *initiatorname)
{
struct se_node_acl *acl;
+ u32 queue_depth;
acl = kzalloc(max(sizeof(*acl), tpg->se_tpg_tfo->node_acl_size),
GFP_KERNEL);
@@ -205,24 +202,20 @@
spin_lock_init(&acl->nacl_sess_lock);
mutex_init(&acl->lun_entry_mutex);
atomic_set(&acl->acl_pr_ref_count, 0);
+
if (tpg->se_tpg_tfo->tpg_get_default_depth)
- acl->queue_depth = tpg->se_tpg_tfo->tpg_get_default_depth(tpg);
+ queue_depth = tpg->se_tpg_tfo->tpg_get_default_depth(tpg);
else
- acl->queue_depth = 1;
+ queue_depth = 1;
+ target_set_nacl_queue_depth(tpg, acl, queue_depth);
+
snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
acl->se_tpg = tpg;
acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
tpg->se_tpg_tfo->set_default_node_attributes(acl);
- if (core_set_queue_depth_for_node(tpg, acl) < 0)
- goto out_free_acl;
-
return acl;
-
-out_free_acl:
- kfree(acl);
- return NULL;
}
static void target_add_node_acl(struct se_node_acl *acl)
@@ -369,7 +362,8 @@
if (sess->sess_tearing_down != 0)
continue;
- target_get_session(sess);
+ if (!target_get_session(sess))
+ continue;
list_move(&sess->sess_acl_list, &sess_list);
}
spin_unlock_irqrestore(&acl->nacl_sess_lock, flags);
@@ -406,108 +400,52 @@
*
*/
int core_tpg_set_initiator_node_queue_depth(
- struct se_portal_group *tpg,
- unsigned char *initiatorname,
- u32 queue_depth,
- int force)
+ struct se_node_acl *acl,
+ u32 queue_depth)
{
- struct se_session *sess, *init_sess = NULL;
- struct se_node_acl *acl;
+ LIST_HEAD(sess_list);
+ struct se_portal_group *tpg = acl->se_tpg;
+ struct se_session *sess, *sess_tmp;
unsigned long flags;
- int dynamic_acl = 0;
-
- mutex_lock(&tpg->acl_node_mutex);
- acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
- if (!acl) {
- pr_err("Access Control List entry for %s Initiator"
- " Node %s does not exists for TPG %hu, ignoring"
- " request.\n", tpg->se_tpg_tfo->get_fabric_name(),
- initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg));
- mutex_unlock(&tpg->acl_node_mutex);
- return -ENODEV;
- }
- if (acl->dynamic_node_acl) {
- acl->dynamic_node_acl = 0;
- dynamic_acl = 1;
- }
- mutex_unlock(&tpg->acl_node_mutex);
-
- spin_lock_irqsave(&tpg->session_lock, flags);
- list_for_each_entry(sess, &tpg->tpg_sess_list, sess_list) {
- if (sess->se_node_acl != acl)
- continue;
-
- if (!force) {
- pr_err("Unable to change queue depth for %s"
- " Initiator Node: %s while session is"
- " operational. To forcefully change the queue"
- " depth and force session reinstatement"
- " use the \"force=1\" parameter.\n",
- tpg->se_tpg_tfo->get_fabric_name(), initiatorname);
- spin_unlock_irqrestore(&tpg->session_lock, flags);
-
- mutex_lock(&tpg->acl_node_mutex);
- if (dynamic_acl)
- acl->dynamic_node_acl = 1;
- mutex_unlock(&tpg->acl_node_mutex);
- return -EEXIST;
- }
- /*
- * Determine if the session needs to be closed by our context.
- */
- if (!tpg->se_tpg_tfo->shutdown_session(sess))
- continue;
-
- init_sess = sess;
- break;
- }
+ int rc;
/*
* User has requested to change the queue depth for a Initiator Node.
* Change the value in the Node's struct se_node_acl, and call
- * core_set_queue_depth_for_node() to add the requested queue depth.
- *
- * Finally call tpg->se_tpg_tfo->close_session() to force session
- * reinstatement to occur if there is an active session for the
- * $FABRIC_MOD Initiator Node in question.
+ * target_set_nacl_queue_depth() to set the new queue depth.
*/
- acl->queue_depth = queue_depth;
+ target_set_nacl_queue_depth(tpg, acl, queue_depth);
- if (core_set_queue_depth_for_node(tpg, acl) < 0) {
- spin_unlock_irqrestore(&tpg->session_lock, flags);
+ spin_lock_irqsave(&acl->nacl_sess_lock, flags);
+ list_for_each_entry_safe(sess, sess_tmp, &acl->acl_sess_list,
+ sess_acl_list) {
+ if (sess->sess_tearing_down != 0)
+ continue;
+ if (!target_get_session(sess))
+ continue;
+ spin_unlock_irqrestore(&acl->nacl_sess_lock, flags);
+
/*
- * Force session reinstatement if
- * core_set_queue_depth_for_node() failed, because we assume
- * the $FABRIC_MOD has already the set session reinstatement
- * bit from tpg->se_tpg_tfo->shutdown_session() called above.
+ * Finally call tpg->se_tpg_tfo->close_session() to force session
+ * reinstatement to occur if there is an active session for the
+ * $FABRIC_MOD Initiator Node in question.
*/
- if (init_sess)
- tpg->se_tpg_tfo->close_session(init_sess);
-
- mutex_lock(&tpg->acl_node_mutex);
- if (dynamic_acl)
- acl->dynamic_node_acl = 1;
- mutex_unlock(&tpg->acl_node_mutex);
- return -EINVAL;
+ rc = tpg->se_tpg_tfo->shutdown_session(sess);
+ target_put_session(sess);
+ if (!rc) {
+ spin_lock_irqsave(&acl->nacl_sess_lock, flags);
+ continue;
+ }
+ target_put_session(sess);
+ spin_lock_irqsave(&acl->nacl_sess_lock, flags);
}
- spin_unlock_irqrestore(&tpg->session_lock, flags);
- /*
- * If the $FABRIC_MOD session for the Initiator Node ACL exists,
- * forcefully shutdown the $FABRIC_MOD session/nexus.
- */
- if (init_sess)
- tpg->se_tpg_tfo->close_session(init_sess);
+ spin_unlock_irqrestore(&acl->nacl_sess_lock, flags);
pr_debug("Successfully changed queue depth to: %d for Initiator"
- " Node: %s on %s Target Portal Group: %u\n", queue_depth,
- initiatorname, tpg->se_tpg_tfo->get_fabric_name(),
+ " Node: %s on %s Target Portal Group: %u\n", acl->queue_depth,
+ acl->initiatorname, tpg->se_tpg_tfo->get_fabric_name(),
tpg->se_tpg_tfo->tpg_get_tag(tpg));
- mutex_lock(&tpg->acl_node_mutex);
- if (dynamic_acl)
- acl->dynamic_node_acl = 1;
- mutex_unlock(&tpg->acl_node_mutex);
-
return 0;
}
EXPORT_SYMBOL(core_tpg_set_initiator_node_queue_depth);
@@ -735,6 +673,8 @@
*/
struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
+ lun->lun_shutdown = true;
+
core_clear_lun_from_tpg(lun, tpg);
/*
* Wait for any active I/O references to percpu se_lun->lun_ref to
@@ -756,6 +696,8 @@
}
if (!(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE))
hlist_del_rcu(&lun->link);
+
+ lun->lun_shutdown = false;
mutex_unlock(&tpg->tpg_lun_mutex);
percpu_ref_exit(&lun->lun_ref);
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index df20599..f71bede 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -383,9 +383,9 @@
se_tpg->se_tpg_tfo->close_session(se_sess);
}
-void target_get_session(struct se_session *se_sess)
+int target_get_session(struct se_session *se_sess)
{
- kref_get(&se_sess->sess_kref);
+ return kref_get_unless_zero(&se_sess->sess_kref);
}
EXPORT_SYMBOL(target_get_session);
@@ -639,9 +639,10 @@
percpu_ref_put(&lun->lun_ref);
}
-void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
+int transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
{
bool ack_kref = (cmd->se_cmd_flags & SCF_ACK_KREF);
+ int ret = 0;
if (cmd->se_cmd_flags & SCF_SE_LUN_CMD)
transport_lun_remove_cmd(cmd);
@@ -653,9 +654,11 @@
cmd->se_tfo->aborted_task(cmd);
if (transport_cmd_check_stop_to_fabric(cmd))
- return;
+ return 1;
if (remove && ack_kref)
- transport_put_cmd(cmd);
+ ret = transport_put_cmd(cmd);
+
+ return ret;
}
static void target_complete_failure_work(struct work_struct *work)
@@ -725,6 +728,15 @@
if (cmd->transport_state & CMD_T_ABORTED ||
cmd->transport_state & CMD_T_STOP) {
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+ /*
+ * If COMPARE_AND_WRITE was stopped by __transport_wait_for_tasks(),
+ * release se_device->caw_sem obtained by sbc_compare_and_write()
+ * since target_complete_ok_work() or target_complete_failure_work()
+ * won't be called to invoke the normal CAW completion callbacks.
+ */
+ if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) {
+ up(&dev->caw_sem);
+ }
complete_all(&cmd->t_transport_stop_comp);
return;
} else if (!success) {
@@ -1154,15 +1166,28 @@
if (cmd->unknown_data_length) {
cmd->data_length = size;
} else if (size != cmd->data_length) {
- pr_warn("TARGET_CORE[%s]: Expected Transfer Length:"
+ pr_warn_ratelimited("TARGET_CORE[%s]: Expected Transfer Length:"
" %u does not match SCSI CDB Length: %u for SAM Opcode:"
" 0x%02x\n", cmd->se_tfo->get_fabric_name(),
cmd->data_length, size, cmd->t_task_cdb[0]);
- if (cmd->data_direction == DMA_TO_DEVICE &&
- cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) {
- pr_err("Rejecting underflow/overflow WRITE data\n");
- return TCM_INVALID_CDB_FIELD;
+ if (cmd->data_direction == DMA_TO_DEVICE) {
+ if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) {
+ pr_err_ratelimited("Rejecting underflow/overflow"
+ " for WRITE data CDB\n");
+ return TCM_INVALID_CDB_FIELD;
+ }
+ /*
+ * Some fabric drivers like iscsi-target still expect to
+ * always reject overflow writes. Reject this case until
+ * full fabric driver level support for overflow writes
+ * is introduced tree-wide.
+ */
+ if (size > cmd->data_length) {
+ pr_err_ratelimited("Rejecting overflow for"
+ " WRITE control CDB\n");
+ return TCM_INVALID_CDB_FIELD;
+ }
}
/*
* Reject READ_* or WRITE_* with overflow/underflow for
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c
index 65c7033..632fa8c 100644
--- a/drivers/thermal/cpu_cooling.c
+++ b/drivers/thermal/cpu_cooling.c
@@ -192,8 +192,10 @@
mutex_lock(&cooling_list_lock);
list_for_each_entry(cpufreq_dev, &cpufreq_dev_list, node) {
if (cpumask_test_cpu(cpu, &cpufreq_dev->allowed_cpus)) {
+ unsigned long level = get_level(cpufreq_dev, freq);
+
mutex_unlock(&cooling_list_lock);
- return get_level(cpufreq_dev, freq);
+ return level;
}
}
mutex_unlock(&cooling_list_lock);
diff --git a/drivers/thermal/msm_thermal.c b/drivers/thermal/msm_thermal.c
index 40dca31..012dcea 100644
--- a/drivers/thermal/msm_thermal.c
+++ b/drivers/thermal/msm_thermal.c
@@ -200,6 +200,7 @@
static bool cluster_info_nodes_called;
static bool in_suspend, retry_in_progress;
static bool lmh_dcvs_available;
+static bool lmh_dcvs_is_supported;
static int *tsens_id_map;
static int *zone_id_tsens_map;
static DEFINE_MUTEX(vdd_rstr_mutex);
@@ -995,7 +996,7 @@
switch (event) {
case CPUFREQ_ADJUST:
- max_freq_req = (lmh_dcvs_available) ? UINT_MAX :
+ max_freq_req = (lmh_dcvs_is_supported) ? UINT_MAX :
cpus[policy->cpu].parent_ptr->limited_max_freq;
min_freq_req = cpus[policy->cpu].parent_ptr->limited_min_freq;
pr_debug("mitigating CPU%d to freq max: %u min: %u\n",
@@ -5386,7 +5387,7 @@
if (ret)
pr_err("cannot register cpufreq notifier. err:%d\n", ret);
- if (!lmh_dcvs_available) {
+ if (!lmh_dcvs_is_supported) {
register_reboot_notifier(&msm_thermal_reboot_notifier);
pm_notifier(msm_thermal_suspend_callback, 0);
}
@@ -7421,6 +7422,7 @@
goto probe_exit;
probe_sensor_info(node, &data, pdev);
+ lmh_dcvs_is_supported = of_property_read_bool(node, "clock-names");
probe_cc(node, &data, pdev);
probe_freq_mitigation(node, &data, pdev);
probe_cx_phase_ctrl(node, &data, pdev);
diff --git a/drivers/tty/nozomi.c b/drivers/tty/nozomi.c
index 80f9de9..5cc80b8 100644
--- a/drivers/tty/nozomi.c
+++ b/drivers/tty/nozomi.c
@@ -823,7 +823,7 @@
struct tty_struct *tty = tty_port_tty_get(&port->port);
int i, ret;
- read_mem32((u32 *) &size, addr, 4);
+ size = __le32_to_cpu(readl(addr));
/* DBG1( "%d bytes port: %d", size, index); */
if (tty && test_bit(TTY_THROTTLED, &tty->flags)) {
diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
index 807d8014..96aa0ad 100644
--- a/drivers/tty/pty.c
+++ b/drivers/tty/pty.c
@@ -216,16 +216,11 @@
static void pty_flush_buffer(struct tty_struct *tty)
{
struct tty_struct *to = tty->link;
- struct tty_ldisc *ld;
if (!to)
return;
- ld = tty_ldisc_ref(to);
- tty_buffer_flush(to, ld);
- if (ld)
- tty_ldisc_deref(ld);
-
+ tty_buffer_flush(to, NULL);
if (to->packet) {
spin_lock_irq(&tty->ctrl_lock);
tty->ctrl_status |= TIOCPKT_FLUSHWRITE;
diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c
index a2c0734..e8dd296 100644
--- a/drivers/tty/serial/8250/8250_omap.c
+++ b/drivers/tty/serial/8250/8250_omap.c
@@ -1235,7 +1235,8 @@
pm_runtime_put_autosuspend(&pdev->dev);
return 0;
err:
- pm_runtime_put(&pdev->dev);
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
+ pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
return ret;
}
@@ -1244,6 +1245,7 @@
{
struct omap8250_priv *priv = platform_get_drvdata(pdev);
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
serial8250_unregister_port(priv->line);
@@ -1343,6 +1345,10 @@
struct omap8250_priv *priv = dev_get_drvdata(dev);
struct uart_8250_port *up;
+ /* In case runtime-pm tries this before we are setup */
+ if (!priv)
+ return 0;
+
up = serial8250_get_port(priv->line);
/*
* When using 'no_console_suspend', the console UART must not be
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index 5b24ffd..cf3da51 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -57,6 +57,7 @@
unsigned int nr;
void __iomem *remapped_bar[PCI_NUM_BAR_RESOURCES];
struct pci_serial_quirk *quirk;
+ const struct pciserial_board *board;
int line[0];
};
@@ -4058,6 +4059,7 @@
}
}
priv->nr = i;
+ priv->board = board;
return priv;
err_deinit:
@@ -4068,7 +4070,7 @@
}
EXPORT_SYMBOL_GPL(pciserial_init_ports);
-void pciserial_remove_ports(struct serial_private *priv)
+void pciserial_detach_ports(struct serial_private *priv)
{
struct pci_serial_quirk *quirk;
int i;
@@ -4088,7 +4090,11 @@
quirk = find_quirk(priv->dev);
if (quirk->exit)
quirk->exit(priv->dev);
+}
+void pciserial_remove_ports(struct serial_private *priv)
+{
+ pciserial_detach_ports(priv);
kfree(priv);
}
EXPORT_SYMBOL_GPL(pciserial_remove_ports);
@@ -5819,7 +5825,7 @@
return PCI_ERS_RESULT_DISCONNECT;
if (priv)
- pciserial_suspend_ports(priv);
+ pciserial_detach_ports(priv);
pci_disable_device(dev);
@@ -5844,9 +5850,16 @@
static void serial8250_io_resume(struct pci_dev *dev)
{
struct serial_private *priv = pci_get_drvdata(dev);
+ struct serial_private *new;
- if (priv)
- pciserial_resume_ports(priv);
+ if (!priv)
+ return;
+
+ new = pciserial_init_ports(dev, priv->board);
+ if (!IS_ERR(new)) {
+ pci_set_drvdata(dev, new);
+ kfree(priv);
+ }
}
static const struct pci_error_handlers serial8250_err_handler = {
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index a0f9116..53e4d50 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -810,6 +810,11 @@
*/
if (!uart_circ_empty(xmit))
tasklet_schedule(&atmel_port->tasklet);
+ else if ((port->rs485.flags & SER_RS485_ENABLED) &&
+ !(port->rs485.flags & SER_RS485_RX_DURING_TX)) {
+ /* DMA done, stop TX, start RX for RS485 */
+ atmel_start_rx(port);
+ }
spin_unlock_irqrestore(&port->lock, flags);
}
@@ -912,12 +917,6 @@
desc->callback = atmel_complete_tx_dma;
desc->callback_param = atmel_port;
atmel_port->cookie_tx = dmaengine_submit(desc);
-
- } else {
- if (port->rs485.flags & SER_RS485_ENABLED) {
- /* DMA done, stop TX, start RX for RS485 */
- atmel_start_rx(port);
- }
}
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
@@ -1987,6 +1986,11 @@
atmel_uart_writel(port, ATMEL_PDC_TCR, 0);
atmel_port->pdc_tx.ofs = 0;
}
+ /*
+ * in uart_flush_buffer(), the xmit circular buffer has just
+ * been cleared, so we have to reset tx_len accordingly.
+ */
+ atmel_port->tx_len = 0;
}
/*
@@ -2499,6 +2503,9 @@
pdc_tx = atmel_uart_readl(port, ATMEL_PDC_PTSR) & ATMEL_PDC_TXTEN;
atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS);
+ /* Make sure that tx path is actually able to send characters */
+ atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN);
+
uart_console_write(port, s, count, atmel_console_putchar);
/*
diff --git a/drivers/tty/serial/efm32-uart.c b/drivers/tty/serial/efm32-uart.c
index 195acc8..5d47691 100644
--- a/drivers/tty/serial/efm32-uart.c
+++ b/drivers/tty/serial/efm32-uart.c
@@ -27,6 +27,7 @@
#define UARTn_FRAME 0x04
#define UARTn_FRAME_DATABITS__MASK 0x000f
#define UARTn_FRAME_DATABITS(n) ((n) - 3)
+#define UARTn_FRAME_PARITY__MASK 0x0300
#define UARTn_FRAME_PARITY_NONE 0x0000
#define UARTn_FRAME_PARITY_EVEN 0x0200
#define UARTn_FRAME_PARITY_ODD 0x0300
@@ -572,12 +573,16 @@
16 * (4 + (clkdiv >> 6)));
frame = efm32_uart_read32(efm_port, UARTn_FRAME);
- if (frame & UARTn_FRAME_PARITY_ODD)
+ switch (frame & UARTn_FRAME_PARITY__MASK) {
+ case UARTn_FRAME_PARITY_ODD:
*parity = 'o';
- else if (frame & UARTn_FRAME_PARITY_EVEN)
+ break;
+ case UARTn_FRAME_PARITY_EVEN:
*parity = 'e';
- else
+ break;
+ default:
*parity = 'n';
+ }
*bits = (frame & UARTn_FRAME_DATABITS__MASK) -
UARTn_FRAME_DATABITS(4) + 4;
diff --git a/drivers/tty/serial/ifx6x60.c b/drivers/tty/serial/ifx6x60.c
index 88246f7..0f23dda 100644
--- a/drivers/tty/serial/ifx6x60.c
+++ b/drivers/tty/serial/ifx6x60.c
@@ -1378,9 +1378,9 @@
static void __exit ifx_spi_exit(void)
{
/* unregister */
+ spi_unregister_driver(&ifx_spi_driver);
tty_unregister_driver(tty_drv);
put_tty_driver(tty_drv);
- spi_unregister_driver(&ifx_spi_driver);
unregister_reboot_notifier(&ifx_modem_reboot_notifier_block);
}
diff --git a/drivers/tty/serial/msm_serial_hs.c b/drivers/tty/serial/msm_serial_hs.c
index 10e2a24..640bcdd 100644
--- a/drivers/tty/serial/msm_serial_hs.c
+++ b/drivers/tty/serial/msm_serial_hs.c
@@ -1969,7 +1969,7 @@
uport = &(msm_uport->uport);
msm_uport->notify = *notify;
- MSM_HS_INFO("rx_cb: addr=0x%pa, size=0x%x, flags=0x%x\n",
+ MSM_HS_INFO("rx_cb: addr=0x%paP, size=0x%x, flags=0x%x\n",
&addr, notify->data.transfer.iovec.size,
notify->data.transfer.iovec.flags);
@@ -2236,10 +2236,18 @@
if (!(msm_uport->wakeup.enabled)) {
spin_lock_irqsave(&uport->lock, flags);
msm_uport->wakeup.ignore = 1;
- msm_uport->wakeup.enabled = true;
+ /* Keep this disabled for 1 msec */
+ msm_uport->wakeup.enabled = false;
spin_unlock_irqrestore(&uport->lock, flags);
disable_irq(uport->irq);
enable_irq(msm_uport->wakeup.irq);
+
+ /* Add delay before enabling wakeup irq */
+ udelay(1000);
+ spin_lock_irqsave(&uport->lock, flags);
+ if (msm_uport->wakeup.ignore == 1)
+ msm_uport->wakeup.enabled = true;
+ spin_unlock_irqrestore(&uport->lock, flags);
} else {
MSM_HS_WARN("%s:Wake up IRQ already enabled", __func__);
}
@@ -2398,6 +2406,10 @@
struct uart_port *uport = &msm_uport->uport;
struct tty_struct *tty = NULL;
+ /* Do not serve ISR if this flag is false */
+ if (!msm_uport->wakeup.enabled)
+ return IRQ_HANDLED;
+
spin_lock_irqsave(&uport->lock, flags);
if (msm_uport->wakeup.ignore)
diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c
index 24280d9..de1c143 100644
--- a/drivers/tty/serial/omap-serial.c
+++ b/drivers/tty/serial/omap-serial.c
@@ -1712,7 +1712,8 @@
return 0;
err_add_port:
- pm_runtime_put(&pdev->dev);
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
+ pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
pm_qos_remove_request(&up->pm_qos_request);
device_init_wakeup(up->dev, false);
@@ -1725,9 +1726,13 @@
{
struct uart_omap_port *up = platform_get_drvdata(dev);
+ pm_runtime_get_sync(up->dev);
+
+ uart_remove_one_port(&serial_omap_reg, &up->port);
+
+ pm_runtime_dont_use_autosuspend(up->dev);
pm_runtime_put_sync(up->dev);
pm_runtime_disable(up->dev);
- uart_remove_one_port(&serial_omap_reg, &up->port);
pm_qos_remove_request(&up->pm_qos_request);
device_init_wakeup(&dev->dev, false);
diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
index cad76b1..df64249 100644
--- a/drivers/tty/serial/samsung.c
+++ b/drivers/tty/serial/samsung.c
@@ -900,14 +900,13 @@
return -ENOMEM;
}
- dma->rx_addr = dma_map_single(dma->rx_chan->device->dev, dma->rx_buf,
+ dma->rx_addr = dma_map_single(p->port.dev, dma->rx_buf,
dma->rx_size, DMA_FROM_DEVICE);
spin_lock_irqsave(&p->port.lock, flags);
/* TX buffer */
- dma->tx_addr = dma_map_single(dma->tx_chan->device->dev,
- p->port.state->xmit.buf,
+ dma->tx_addr = dma_map_single(p->port.dev, p->port.state->xmit.buf,
UART_XMIT_SIZE, DMA_TO_DEVICE);
spin_unlock_irqrestore(&p->port.lock, flags);
@@ -921,7 +920,7 @@
if (dma->rx_chan) {
dmaengine_terminate_all(dma->rx_chan);
- dma_unmap_single(dma->rx_chan->device->dev, dma->rx_addr,
+ dma_unmap_single(p->port.dev, dma->rx_addr,
dma->rx_size, DMA_FROM_DEVICE);
kfree(dma->rx_buf);
dma_release_channel(dma->rx_chan);
@@ -930,7 +929,7 @@
if (dma->tx_chan) {
dmaengine_terminate_all(dma->tx_chan);
- dma_unmap_single(dma->tx_chan->device->dev, dma->tx_addr,
+ dma_unmap_single(p->port.dev, dma->tx_addr,
UART_XMIT_SIZE, DMA_TO_DEVICE);
dma_release_channel(dma->tx_chan);
dma->tx_chan = NULL;
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index 63a06ab..235e150 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -1800,12 +1800,14 @@
dev_dbg(port->dev, "%s(%d)\n", __func__, port->line);
- ret = sci_request_irq(s);
- if (unlikely(ret < 0))
- return ret;
-
sci_request_dma(port);
+ ret = sci_request_irq(s);
+ if (unlikely(ret < 0)) {
+ sci_free_dma(port);
+ return ret;
+ }
+
spin_lock_irqsave(&port->lock, flags);
sci_start_tx(port);
sci_start_rx(port);
@@ -1834,8 +1836,8 @@
}
#endif
- sci_free_dma(port);
sci_free_irq(s);
+ sci_free_dma(port);
}
static unsigned int sci_scbrr_calc(struct sci_port *s, unsigned int bps,
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 7cef543..1bb629a 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -2070,13 +2070,12 @@
if (tty) {
mutex_unlock(&tty_mutex);
retval = tty_lock_interruptible(tty);
+ tty_kref_put(tty); /* drop kref from tty_driver_lookup_tty() */
if (retval) {
if (retval == -EINTR)
retval = -ERESTARTSYS;
goto err_unref;
}
- /* safe to drop the kref from tty_driver_lookup_tty() */
- tty_kref_put(tty);
retval = tty_reopen(tty);
if (retval < 0) {
tty_unlock(tty);
diff --git a/drivers/tty/tty_mutex.c b/drivers/tty/tty_mutex.c
index d09293b..cff304a 100644
--- a/drivers/tty/tty_mutex.c
+++ b/drivers/tty/tty_mutex.c
@@ -24,10 +24,15 @@
int tty_lock_interruptible(struct tty_struct *tty)
{
+ int ret;
+
if (WARN(tty->magic != TTY_MAGIC, "L Bad %p\n", tty))
return -EIO;
tty_kref_get(tty);
- return mutex_lock_interruptible(&tty->legacy_mutex);
+ ret = mutex_lock_interruptible(&tty->legacy_mutex);
+ if (ret)
+ tty_kref_put(tty);
+ return ret;
}
void __lockfunc tty_unlock(struct tty_struct *tty)
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index 5ab54ef..e4f69bd 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -2708,13 +2708,13 @@
* related to the kernel should not use this.
*/
data = vt_get_shift_state();
- ret = __put_user(data, p);
+ ret = put_user(data, p);
break;
case TIOCL_GETMOUSEREPORTING:
console_lock(); /* May be overkill */
data = mouse_reporting();
console_unlock();
- ret = __put_user(data, p);
+ ret = put_user(data, p);
break;
case TIOCL_SETVESABLANK:
console_lock();
@@ -2723,7 +2723,7 @@
break;
case TIOCL_GETKMSGREDIRECT:
data = vt_get_kmsg_redirect();
- ret = __put_user(data, p);
+ ret = put_user(data, p);
break;
case TIOCL_SETKMSGREDIRECT:
if (!capable(CAP_SYS_ADMIN)) {
diff --git a/drivers/usb/chipidea/ci.h b/drivers/usb/chipidea/ci.h
index 41d7cf6..858c308 100644
--- a/drivers/usb/chipidea/ci.h
+++ b/drivers/usb/chipidea/ci.h
@@ -428,9 +428,6 @@
u8 hw_port_test_get(struct ci_hdrc *ci);
-int hw_wait_reg(struct ci_hdrc *ci, enum ci_hw_regs reg, u32 mask,
- u32 value, unsigned int timeout_ms);
-
void ci_platform_configure(struct ci_hdrc *ci);
#endif /* __DRIVERS_USB_CHIPIDEA_CI_H */
diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
index ba4a2a1..939c6ad 100644
--- a/drivers/usb/chipidea/core.c
+++ b/drivers/usb/chipidea/core.c
@@ -518,38 +518,6 @@
return 0;
}
-/**
- * hw_wait_reg: wait the register value
- *
- * Sometimes, it needs to wait register value before going on.
- * Eg, when switch to device mode, the vbus value should be lower
- * than OTGSC_BSV before connects to host.
- *
- * @ci: the controller
- * @reg: register index
- * @mask: mast bit
- * @value: the bit value to wait
- * @timeout_ms: timeout in millisecond
- *
- * This function returns an error code if timeout
- */
-int hw_wait_reg(struct ci_hdrc *ci, enum ci_hw_regs reg, u32 mask,
- u32 value, unsigned int timeout_ms)
-{
- unsigned long elapse = jiffies + msecs_to_jiffies(timeout_ms);
-
- while (hw_read(ci, reg, mask) != value) {
- if (time_after(jiffies, elapse)) {
- dev_err(ci->dev, "timeout waiting for %08x in %d\n",
- mask, reg);
- return -ETIMEDOUT;
- }
- msleep(20);
- }
-
- return 0;
-}
-
static irqreturn_t ci_irq(int irq, void *data)
{
struct ci_hdrc *ci = data;
diff --git a/drivers/usb/chipidea/debug.c b/drivers/usb/chipidea/debug.c
index 58c8485..9233799 100644
--- a/drivers/usb/chipidea/debug.c
+++ b/drivers/usb/chipidea/debug.c
@@ -295,7 +295,8 @@
{
struct ci_hdrc *ci = s->private;
- seq_printf(s, "%s\n", ci_role(ci)->name);
+ if (ci->role != CI_ROLE_END)
+ seq_printf(s, "%s\n", ci_role(ci)->name);
return 0;
}
diff --git a/drivers/usb/chipidea/otg.c b/drivers/usb/chipidea/otg.c
index 03b6743..0cf149ed 100644
--- a/drivers/usb/chipidea/otg.c
+++ b/drivers/usb/chipidea/otg.c
@@ -44,12 +44,15 @@
else
val &= ~OTGSC_BSVIS;
- cable->changed = false;
-
if (cable->state)
val |= OTGSC_BSV;
else
val &= ~OTGSC_BSV;
+
+ if (cable->enabled)
+ val |= OTGSC_BSVIE;
+ else
+ val &= ~OTGSC_BSVIE;
}
cable = &ci->platdata->id_extcon;
@@ -59,15 +62,18 @@
else
val &= ~OTGSC_IDIS;
- cable->changed = false;
-
if (cable->state)
val |= OTGSC_ID;
else
val &= ~OTGSC_ID;
+
+ if (cable->enabled)
+ val |= OTGSC_IDIE;
+ else
+ val &= ~OTGSC_IDIE;
}
- return val;
+ return val & mask;
}
/**
@@ -77,6 +83,36 @@
*/
void hw_write_otgsc(struct ci_hdrc *ci, u32 mask, u32 data)
{
+ struct ci_hdrc_cable *cable;
+
+ cable = &ci->platdata->vbus_extcon;
+ if (!IS_ERR(cable->edev)) {
+ if (data & mask & OTGSC_BSVIS)
+ cable->changed = false;
+
+ /* Don't enable vbus interrupt if using external notifier */
+ if (data & mask & OTGSC_BSVIE) {
+ cable->enabled = true;
+ data &= ~OTGSC_BSVIE;
+ } else if (mask & OTGSC_BSVIE) {
+ cable->enabled = false;
+ }
+ }
+
+ cable = &ci->platdata->id_extcon;
+ if (!IS_ERR(cable->edev)) {
+ if (data & mask & OTGSC_IDIS)
+ cable->changed = false;
+
+ /* Don't enable id interrupt if using external notifier */
+ if (data & mask & OTGSC_IDIE) {
+ cable->enabled = true;
+ data &= ~OTGSC_IDIE;
+ } else if (mask & OTGSC_IDIE) {
+ cable->enabled = false;
+ }
+ }
+
hw_write(ci, OP_OTGSC, mask | OTGSC_INT_STATUS_BITS, data);
}
@@ -104,7 +140,31 @@
usb_gadget_vbus_disconnect(&ci->gadget);
}
-#define CI_VBUS_STABLE_TIMEOUT_MS 5000
+/**
+ * When we switch to device mode, the vbus value should be lower
+ * than OTGSC_BSV before connecting to host.
+ *
+ * @ci: the controller
+ *
+ * This function returns an error code if timeout
+ */
+static int hw_wait_vbus_lower_bsv(struct ci_hdrc *ci)
+{
+ unsigned long elapse = jiffies + msecs_to_jiffies(5000);
+ u32 mask = OTGSC_BSV;
+
+ while (hw_read_otgsc(ci, mask)) {
+ if (time_after(jiffies, elapse)) {
+ dev_err(ci->dev, "timeout waiting for %08x in OTGSC\n",
+ mask);
+ return -ETIMEDOUT;
+ }
+ msleep(20);
+ }
+
+ return 0;
+}
+
static void ci_handle_id_switch(struct ci_hdrc *ci)
{
enum ci_role role = ci_otg_role(ci);
@@ -116,9 +176,11 @@
ci_role_stop(ci);
if (role == CI_ROLE_GADGET)
- /* wait vbus lower than OTGSC_BSV */
- hw_wait_reg(ci, OP_OTGSC, OTGSC_BSV, 0,
- CI_VBUS_STABLE_TIMEOUT_MS);
+ /*
+ * wait vbus lower than OTGSC_BSV before connecting
+ * to host
+ */
+ hw_wait_vbus_lower_bsv(ci);
ci_role_start(ci, role);
}
diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
index d8a045f..aff086c 100644
--- a/drivers/usb/chipidea/udc.c
+++ b/drivers/usb/chipidea/udc.c
@@ -1982,6 +1982,7 @@
int ci_hdrc_gadget_init(struct ci_hdrc *ci)
{
struct ci_role_driver *rdrv;
+ int ret;
if (!hw_read(ci, CAP_DCCPARAMS, DCCPARAMS_DC))
return -ENXIO;
@@ -1994,7 +1995,10 @@
rdrv->stop = udc_id_switch_for_host;
rdrv->irq = udc_irq;
rdrv->name = "gadget";
- ci->roles[CI_ROLE_GADGET] = rdrv;
- return udc_start(ci);
+ ret = udc_start(ci);
+ if (!ret)
+ ci->roles[CI_ROLE_GADGET] = rdrv;
+
+ return ret;
}
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 96849e2..df96f5f 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -311,6 +311,12 @@
break;
case USB_CDC_NOTIFY_SERIAL_STATE:
+ if (le16_to_cpu(dr->wLength) != 2) {
+ dev_dbg(&acm->control->dev,
+ "%s - malformed serial state\n", __func__);
+ break;
+ }
+
newctrl = get_unaligned_le16(data);
if (!acm->clocal && (acm->ctrlin & ~newctrl & ACM_CTRL_DCD)) {
@@ -347,11 +353,10 @@
default:
dev_dbg(&acm->control->dev,
- "%s - unknown notification %d received: index %d "
- "len %d data0 %d data1 %d\n",
+ "%s - unknown notification %d received: index %d len %d\n",
__func__,
- dr->bNotificationType, dr->wIndex,
- dr->wLength, data[0], data[1]);
+ dr->bNotificationType, dr->wIndex, dr->wLength);
+
break;
}
exit:
@@ -1754,6 +1759,9 @@
{ USB_DEVICE(0x1576, 0x03b1), /* Maretron USB100 */
.driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */
},
+ { USB_DEVICE(0xfff0, 0x0100), /* DATECS FP-2000 */
+ .driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */
+ },
{ USB_DEVICE(0x2912, 0x0001), /* ATOL FPrint */
.driver_info = CLEAR_HALT_CONDITIONS,
diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c
index deaddb9..24337ac 100644
--- a/drivers/usb/class/usbtmc.c
+++ b/drivers/usb/class/usbtmc.c
@@ -1105,7 +1105,7 @@
dev_dbg(&intf->dev, "%s called\n", __func__);
- data = kmalloc(sizeof(*data), GFP_KERNEL);
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
@@ -1163,6 +1163,12 @@
}
}
+ if (!data->bulk_out || !data->bulk_in) {
+ dev_err(&intf->dev, "bulk endpoints not found\n");
+ retcode = -ENODEV;
+ goto err_put;
+ }
+
retcode = get_capabilities(data);
if (retcode)
dev_err(&intf->dev, "can't read capabilities\n");
@@ -1186,6 +1192,7 @@
error_register:
sysfs_remove_group(&intf->dev.kobj, &capability_attr_grp);
sysfs_remove_group(&intf->dev.kobj, &data_attr_grp);
+err_put:
kref_put(&data->kref, usbtmc_delete);
return retcode;
}
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index ac30a05..325cbc9 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -246,6 +246,16 @@
/*
* Adjust bInterval for quirked devices.
+ */
+ /*
+ * This quirk fixes bIntervals reported in ms.
+ */
+ if (to_usb_device(ddev)->quirks &
+ USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL) {
+ n = clamp(fls(d->bInterval) + 3, i, j);
+ i = j = n;
+ }
+ /*
* This quirk fixes bIntervals reported in
* linear microframes.
*/
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index 54d2d6b..873ba02 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -519,6 +519,8 @@
if (as->status < 0 && as->bulk_addr && as->status != -ECONNRESET &&
as->status != -ENOENT)
cancel_bulk_urbs(ps, as->bulk_addr);
+
+ wake_up(&ps->wait);
spin_unlock(&ps->lock);
if (signr) {
@@ -526,8 +528,6 @@
put_pid(pid);
put_cred(cred);
}
-
- wake_up(&ps->wait);
}
static void destroy_async(struct usb_dev_state *ps, struct list_head *list)
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index 26a305f..ee33c0d7 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -1328,6 +1328,24 @@
*/
if (udev->parent && !PMSG_IS_AUTO(msg))
status = 0;
+
+ /*
+ * If the device is inaccessible, don't try to resume
+ * suspended interfaces and just return the error.
+ */
+ if (status && status != -EBUSY) {
+ int err;
+ u16 devstat;
+
+ err = usb_get_status(udev, USB_RECIP_DEVICE, 0,
+ &devstat);
+ if (err) {
+ dev_err(&udev->dev,
+ "Failed to suspend device, error %d\n",
+ status);
+ goto done;
+ }
+ }
}
/* If the suspend failed, resume interfaces that did get suspended */
@@ -1772,6 +1790,9 @@
int w, i;
struct usb_interface *intf;
+ if (udev->state == USB_STATE_NOTATTACHED)
+ return -ENODEV;
+
/* Fail if autosuspend is disabled, or any interfaces are in use, or
* any interface drivers require remote wakeup but it isn't available.
*/
diff --git a/drivers/usb/core/file.c b/drivers/usb/core/file.c
index ea337a7..b3de80608 100644
--- a/drivers/usb/core/file.c
+++ b/drivers/usb/core/file.c
@@ -26,6 +26,7 @@
#define MAX_USB_MINORS 256
static const struct file_operations *usb_minors[MAX_USB_MINORS];
static DECLARE_RWSEM(minor_rwsem);
+static DEFINE_MUTEX(init_usb_class_mutex);
static int usb_open(struct inode *inode, struct file *file)
{
@@ -108,8 +109,9 @@
static void destroy_usb_class(void)
{
- if (usb_class)
- kref_put(&usb_class->kref, release_usb_class);
+ mutex_lock(&init_usb_class_mutex);
+ kref_put(&usb_class->kref, release_usb_class);
+ mutex_unlock(&init_usb_class_mutex);
}
int usb_major_init(void)
@@ -171,7 +173,10 @@
if (intf->minor >= 0)
return -EADDRINUSE;
+ mutex_lock(&init_usb_class_mutex);
retval = init_usb_class();
+ mutex_unlock(&init_usb_class_mutex);
+
if (retval)
return retval;
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 755dbca..c31c753 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -499,8 +499,10 @@
*/
tbuf_size = max_t(u16, sizeof(struct usb_hub_descriptor), wLength);
tbuf = kzalloc(tbuf_size, GFP_KERNEL);
- if (!tbuf)
- return -ENOMEM;
+ if (!tbuf) {
+ status = -ENOMEM;
+ goto err_alloc;
+ }
bufp = tbuf;
@@ -705,6 +707,7 @@
}
kfree(tbuf);
+ err_alloc:
/* any errors get returned through the urb completion */
spin_lock_irq(&hcd_root_hub_lock);
@@ -1848,7 +1851,7 @@
/* No more submits can occur */
spin_lock_irq(&hcd_urb_list_lock);
rescan:
- list_for_each_entry (urb, &ep->urb_list, urb_list) {
+ list_for_each_entry_reverse(urb, &ep->urb_list, urb_list) {
int is_in;
if (urb->unlinked)
@@ -2502,6 +2505,8 @@
}
if (usb_hcd_is_primary_hcd(hcd) && hcd->shared_hcd) {
hcd = hcd->shared_hcd;
+ clear_bit(HCD_FLAG_RH_RUNNING, &hcd->flags);
+ set_bit(HCD_FLAG_DEAD, &hcd->flags);
if (hcd->rh_registered) {
clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
@@ -2566,6 +2571,7 @@
hcd->bandwidth_mutex = kmalloc(sizeof(*hcd->bandwidth_mutex),
GFP_KERNEL);
if (!hcd->bandwidth_mutex) {
+ kfree(hcd->address0_mutex);
kfree(hcd);
dev_dbg(dev, "hcd bandwidth mutex alloc failed\n");
return NULL;
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 4dcca6a..370ad96 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -363,7 +363,8 @@
}
/* USB 2.0 spec Section 11.24.4.5 */
-static int get_hub_descriptor(struct usb_device *hdev, void *data)
+static int get_hub_descriptor(struct usb_device *hdev,
+ struct usb_hub_descriptor *desc)
{
int i, ret, size;
unsigned dtype;
@@ -379,10 +380,18 @@
for (i = 0; i < 3; i++) {
ret = usb_control_msg(hdev, usb_rcvctrlpipe(hdev, 0),
USB_REQ_GET_DESCRIPTOR, USB_DIR_IN | USB_RT_HUB,
- dtype << 8, 0, data, size,
+ dtype << 8, 0, desc, size,
USB_CTRL_GET_TIMEOUT);
- if (ret >= (USB_DT_HUB_NONVAR_SIZE + 2))
+ if (hub_is_superspeed(hdev)) {
+ if (ret == size)
+ return ret;
+ } else if (ret >= USB_DT_HUB_NONVAR_SIZE + 2) {
+ /* Make sure we have the DeviceRemovable field. */
+ size = USB_DT_HUB_NONVAR_SIZE + desc->bNbrPorts / 8 + 1;
+ if (ret < size)
+ return -EMSGSIZE;
return ret;
+ }
}
return -EINVAL;
}
@@ -1059,6 +1068,9 @@
portstatus = portchange = 0;
status = hub_port_status(hub, port1, &portstatus, &portchange);
+ if (status)
+ goto abort;
+
if (udev || (portstatus & USB_PORT_STAT_CONNECTION))
dev_dbg(&port_dev->dev, "status %04x change %04x\n",
portstatus, portchange);
@@ -1191,7 +1203,7 @@
/* Scan all ports that need attention */
kick_hub_wq(hub);
-
+ abort:
if (type == HUB_INIT2 || type == HUB_INIT3) {
/* Allow autosuspend if it was suppressed */
disconnected:
@@ -1303,7 +1315,7 @@
}
mutex_init(&hub->status_mutex);
- hub->descriptor = kmalloc(sizeof(*hub->descriptor), GFP_KERNEL);
+ hub->descriptor = kzalloc(sizeof(*hub->descriptor), GFP_KERNEL);
if (!hub->descriptor) {
ret = -ENOMEM;
goto fail;
@@ -1311,13 +1323,19 @@
/* Request the entire hub descriptor.
* hub->descriptor can handle USB_MAXCHILDREN ports,
- * but the hub can/will return fewer bytes here.
+ * but a (non-SS) hub can/will return fewer bytes here.
*/
ret = get_hub_descriptor(hdev, hub->descriptor);
if (ret < 0) {
message = "can't read hub descriptor";
goto fail;
- } else if (hub->descriptor->bNbrPorts > USB_MAXCHILDREN) {
+ }
+
+ maxchild = USB_MAXCHILDREN;
+ if (hub_is_superspeed(hdev))
+ maxchild = min_t(unsigned, maxchild, USB_SS_MAXPORTS);
+
+ if (hub->descriptor->bNbrPorts > maxchild) {
message = "hub has too many ports!";
ret = -ENODEV;
goto fail;
@@ -2079,6 +2097,12 @@
dev_info(&udev->dev, "USB disconnect, device number %d\n",
udev->devnum);
+ /*
+ * Ensure that the pm runtime code knows that the USB device
+ * is in the process of being disconnected.
+ */
+ pm_runtime_barrier(&udev->dev);
+
usb_lock_device(udev);
hub_disconnect_children(udev);
@@ -2613,8 +2637,15 @@
if (ret < 0)
return ret;
- /* The port state is unknown until the reset completes. */
- if (!(portstatus & USB_PORT_STAT_RESET))
+ /*
+ * The port state is unknown until the reset completes.
+ *
+ * On top of that, some chips may require additional time
+ * to re-establish a connection after the reset is complete,
+ * so also wait for the connection to be re-established.
+ */
+ if (!(portstatus & USB_PORT_STAT_RESET) &&
+ (portstatus & USB_PORT_STAT_CONNECTION))
break;
/* switch to the long delay after two short delay failures */
@@ -4212,7 +4243,7 @@
struct usb_hub *hub = usb_hub_to_struct_hub(udev->parent);
int connect_type = USB_PORT_CONNECT_TYPE_UNKNOWN;
- if (!udev->usb2_hw_lpm_capable)
+ if (!udev->usb2_hw_lpm_capable || !udev->bos)
return;
if (hub)
@@ -4643,7 +4674,8 @@
static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
u16 portchange)
{
- int status, i;
+ int status = -ENODEV;
+ int i;
unsigned unit_load;
struct usb_device *hdev = hub->hdev;
struct usb_hcd *hcd = bus_to_hcd(hdev->bus);
@@ -4847,9 +4879,10 @@
done:
hub_port_disable(hub, port1, 1);
- if (hcd->driver->relinquish_port && !hub->hdev->parent)
- hcd->driver->relinquish_port(hcd, port1);
-
+ if (hcd->driver->relinquish_port && !hub->hdev->parent) {
+ if (status != -ENOTCONN && status != -ENODEV)
+ hcd->driver->relinquish_port(hcd, port1);
+ }
}
/* Handle physical or logical connection change events.
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 2c3f913..80b348e 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -57,8 +57,9 @@
/* Microsoft LifeCam-VX700 v2.0 */
{ USB_DEVICE(0x045e, 0x0770), .driver_info = USB_QUIRK_RESET_RESUME },
- /* Logitech HD Pro Webcams C920 and C930e */
+ /* Logitech HD Pro Webcams C920, C920-C and C930e */
{ USB_DEVICE(0x046d, 0x082d), .driver_info = USB_QUIRK_DELAY_INIT },
+ { USB_DEVICE(0x046d, 0x0841), .driver_info = USB_QUIRK_DELAY_INIT },
{ USB_DEVICE(0x046d, 0x0843), .driver_info = USB_QUIRK_DELAY_INIT },
/* Logitech ConferenceCam CC3000e */
@@ -150,6 +151,9 @@
/* appletouch */
{ USB_DEVICE(0x05ac, 0x021a), .driver_info = USB_QUIRK_RESET_RESUME },
+ /* Genesys Logic hub, internally used by Moshi USB to Ethernet Adapter */
+ { USB_DEVICE(0x05e3, 0x0616), .driver_info = USB_QUIRK_NO_LPM },
+
/* Avision AV600U */
{ USB_DEVICE(0x0638, 0x0a13), .driver_info =
USB_QUIRK_STRING_FETCH_255 },
@@ -170,6 +174,14 @@
/* M-Systems Flash Disk Pioneers */
{ USB_DEVICE(0x08ec, 0x1000), .driver_info = USB_QUIRK_RESET_RESUME },
+ /* Baum Vario Ultra */
+ { USB_DEVICE(0x0904, 0x6101), .driver_info =
+ USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL },
+ { USB_DEVICE(0x0904, 0x6102), .driver_info =
+ USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL },
+ { USB_DEVICE(0x0904, 0x6103), .driver_info =
+ USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL },
+
/* Keytouch QWERTY Panel keyboard */
{ USB_DEVICE(0x0926, 0x3333), .driver_info =
USB_QUIRK_CONFIG_INTF_STRINGS },
@@ -206,6 +218,9 @@
{ USB_DEVICE(0x1a0a, 0x0200), .driver_info =
USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL },
+ /* Corsair Strafe RGB */
+ { USB_DEVICE(0x1b1c, 0x1b20), .driver_info = USB_QUIRK_DELAY_INIT },
+
/* Acer C120 LED Projector */
{ USB_DEVICE(0x1de1, 0xc102), .driver_info = USB_QUIRK_NO_LPM },
@@ -215,6 +230,10 @@
/* Blackmagic Design UltraStudio SDI */
{ USB_DEVICE(0x1edb, 0xbd4f), .driver_info = USB_QUIRK_NO_LPM },
+ /* Hauppauge HVR-950q */
+ { USB_DEVICE(0x2040, 0x7200), .driver_info =
+ USB_QUIRK_CONFIG_INTF_STRINGS },
+
/* INTEL VALUE SSD */
{ USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME },
@@ -240,6 +259,7 @@
{ USB_DEVICE(0x093a, 0x2500), .driver_info = USB_QUIRK_RESET_RESUME },
{ USB_DEVICE(0x093a, 0x2510), .driver_info = USB_QUIRK_RESET_RESUME },
{ USB_DEVICE(0x093a, 0x2521), .driver_info = USB_QUIRK_RESET_RESUME },
+ { USB_DEVICE(0x03f0, 0x2b4a), .driver_info = USB_QUIRK_RESET_RESUME },
/* Logitech Optical Mouse M90/M100 */
{ USB_DEVICE(0x046d, 0xc05a), .driver_info = USB_QUIRK_RESET_RESUME },
diff --git a/drivers/usb/core/usb-acpi.c b/drivers/usb/core/usb-acpi.c
index 2776cfe..ef9cf4a 100644
--- a/drivers/usb/core/usb-acpi.c
+++ b/drivers/usb/core/usb-acpi.c
@@ -127,6 +127,22 @@
*/
#define USB_ACPI_LOCATION_VALID (1 << 31)
+static struct acpi_device *usb_acpi_find_port(struct acpi_device *parent,
+ int raw)
+{
+ struct acpi_device *adev;
+
+ if (!parent)
+ return NULL;
+
+ list_for_each_entry(adev, &parent->children, node) {
+ if (acpi_device_adr(adev) == raw)
+ return adev;
+ }
+
+ return acpi_find_child_device(parent, raw, false);
+}
+
static struct acpi_device *usb_acpi_find_companion(struct device *dev)
{
struct usb_device *udev;
@@ -174,8 +190,10 @@
int raw;
raw = usb_hcd_find_raw_port_number(hcd, port1);
- adev = acpi_find_child_device(ACPI_COMPANION(&udev->dev),
- raw, false);
+
+ adev = usb_acpi_find_port(ACPI_COMPANION(&udev->dev),
+ raw);
+
if (!adev)
return NULL;
} else {
@@ -186,7 +204,9 @@
return NULL;
acpi_bus_get_device(parent_handle, &adev);
- adev = acpi_find_child_device(adev, port1, false);
+
+ adev = usb_acpi_find_port(adev, port1);
+
if (!adev)
return NULL;
}
diff --git a/drivers/usb/dwc3/dwc3-exynos.c b/drivers/usb/dwc3/dwc3-exynos.c
index 2f1fb7e..9eba51b 100644
--- a/drivers/usb/dwc3/dwc3-exynos.c
+++ b/drivers/usb/dwc3/dwc3-exynos.c
@@ -148,7 +148,8 @@
exynos->axius_clk = devm_clk_get(dev, "usbdrd30_axius_clk");
if (IS_ERR(exynos->axius_clk)) {
dev_err(dev, "no AXI UpScaler clk specified\n");
- return -ENODEV;
+ ret = -ENODEV;
+ goto axius_clk_err;
}
clk_prepare_enable(exynos->axius_clk);
} else {
@@ -206,6 +207,7 @@
regulator_disable(exynos->vdd33);
err2:
clk_disable_unprepare(exynos->axius_clk);
+axius_clk_err:
clk_disable_unprepare(exynos->susp_clk);
clk_disable_unprepare(exynos->clk);
return ret;
diff --git a/drivers/usb/dwc3/dwc3-msm.c b/drivers/usb/dwc3/dwc3-msm.c
index 6ae1292..fa11160 100644
--- a/drivers/usb/dwc3/dwc3-msm.c
+++ b/drivers/usb/dwc3/dwc3-msm.c
@@ -1584,7 +1584,7 @@
}
mdwc->hc_died = true;
- schedule_delayed_work(&mdwc->sm_work, 0);
+ queue_delayed_work(system_freezable_wq, &mdwc->sm_work, 0);
return 0;
}
@@ -2329,7 +2329,7 @@
clear_bit(B_SUSPEND, &mdwc->inputs);
}
- schedule_delayed_work(&mdwc->sm_work, 0);
+ queue_delayed_work(system_freezable_wq, &mdwc->sm_work, 0);
}
static void dwc3_resume_work(struct work_struct *w)
@@ -3126,7 +3126,7 @@
dwc3_msm_id_notifier(&mdwc->id_nb, true, mdwc->extcon_id);
else if (!pval.intval) {
/* USB cable is not connected */
- schedule_delayed_work(&mdwc->sm_work, 0);
+ queue_delayed_work(system_freezable_wq, &mdwc->sm_work, 0);
} else {
if (pval.intval > 0)
dev_info(mdwc->dev, "charger detection in progress\n");
@@ -3839,7 +3839,7 @@
}
if (work)
- schedule_delayed_work(&mdwc->sm_work, delay);
+ queue_delayed_work(system_freezable_wq, &mdwc->sm_work, delay);
ret:
return;
diff --git a/drivers/usb/dwc3/dwc3-st.c b/drivers/usb/dwc3/dwc3-st.c
index 5c0adb9..81db2fa 100644
--- a/drivers/usb/dwc3/dwc3-st.c
+++ b/drivers/usb/dwc3/dwc3-st.c
@@ -224,7 +224,7 @@
dwc3_data->syscfg_reg_off = res->start;
- dev_vdbg(&pdev->dev, "glue-logic addr 0x%p, syscfg-reg offset 0x%x\n",
+ dev_vdbg(&pdev->dev, "glue-logic addr 0x%pK, syscfg-reg offset 0x%x\n",
dwc3_data->glue_base, dwc3_data->syscfg_reg_off);
dwc3_data->rstc_pwrdn = devm_reset_control_get(dev, "powerdown");
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 2210da9..9470958 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -261,6 +261,7 @@
int status)
{
struct dwc3 *dwc = dep->dwc;
+ unsigned int unmap_after_complete = false;
int i;
if (req->queued) {
@@ -285,11 +286,19 @@
if (req->request.status == -EINPROGRESS)
req->request.status = status;
- if (dwc->ep0_bounced && dep->number <= 1)
+ /*
+ * NOTICE we don't want to unmap before calling ->complete() if we're
+ * dealing with a bounced ep0 request. If we unmap it here, we would end
+ * up overwritting the contents of req->buf and this could confuse the
+ * gadget driver.
+ */
+ if (dwc->ep0_bounced && dep->number <= 1) {
dwc->ep0_bounced = false;
-
- usb_gadget_unmap_request(&dwc->gadget, &req->request,
- req->direction);
+ unmap_after_complete = true;
+ } else {
+ usb_gadget_unmap_request(&dwc->gadget,
+ &req->request, req->direction);
+ }
dev_dbg(dwc->dev, "request %pK from %s completed %d/%d ===> %d\n",
req, dep->name, req->request.actual,
@@ -300,6 +309,10 @@
spin_unlock(&dwc->lock);
usb_gadget_giveback_request(&dep->endpoint, &req->request);
spin_lock(&dwc->lock);
+
+ if (unmap_after_complete)
+ usb_gadget_unmap_request(&dwc->gadget,
+ &req->request, req->direction);
}
int dwc3_send_gadget_generic_command(struct dwc3 *dwc, unsigned cmd, u32 param)
diff --git a/drivers/usb/gadget/function/f_acm.c b/drivers/usb/gadget/function/f_acm.c
index 651e4af..1bcfe81 100644
--- a/drivers/usb/gadget/function/f_acm.c
+++ b/drivers/usb/gadget/function/f_acm.c
@@ -535,13 +535,15 @@
{
struct usb_composite_dev *cdev = acm->port.func.config->cdev;
int status;
+ __le16 serial_state;
spin_lock(&acm->lock);
if (acm->notify_req) {
dev_dbg(&cdev->gadget->dev, "acm ttyGS%d serial state %04x\n",
acm->port_num, acm->serial_state);
+ serial_state = cpu_to_le16(acm->serial_state);
status = acm_cdc_notify(acm, USB_CDC_NOTIFY_SERIAL_STATE,
- 0, &acm->serial_state, sizeof(acm->serial_state));
+ 0, &serial_state, sizeof(acm->serial_state));
} else {
acm->pending = true;
status = 0;
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index 1084a24..b5f9ae5 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -3989,6 +3989,7 @@
{
struct ffs_dev *ffs_obj;
struct f_fs_opts *opts;
+ struct config_item *ci;
ENTER();
@@ -4022,11 +4023,11 @@
goto done;
}
+ ci = opts->func_inst.group.cg_item.ci_parent->ci_parent;
ffs_dev_unlock();
if (test_bit(FFS_FL_BOUND, &ffs->flags)) {
- unregister_gadget_item(opts->
- func_inst.group.cg_item.ci_parent->ci_parent);
+ unregister_gadget_item(ci);
ffs_log("unreg gadget done");
}
done:
diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c
index 99285b4..ee579ba 100644
--- a/drivers/usb/gadget/function/f_hid.c
+++ b/drivers/usb/gadget/function/f_hid.c
@@ -539,7 +539,7 @@
}
status = usb_ep_enable(hidg->out_ep);
if (status < 0) {
- ERROR(cdev, "Enable IN endpoint FAILED!\n");
+ ERROR(cdev, "Enable OUT endpoint FAILED!\n");
goto fail;
}
hidg->out_ep->driver_data = hidg;
diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c
index 59d6ac6..9b72748 100644
--- a/drivers/usb/gadget/function/f_mass_storage.c
+++ b/drivers/usb/gadget/function/f_mass_storage.c
@@ -399,7 +399,11 @@
/* Caller must hold fsg->lock */
static void wakeup_thread(struct fsg_common *common)
{
- smp_wmb(); /* ensure the write of bh->state is complete */
+ /*
+ * Ensure the reading of thread_wakeup_needed
+ * and the writing of bh->state are completed
+ */
+ smp_mb();
/* Tell the main thread that something has happened */
common->thread_wakeup_needed = 1;
if (common->thread_task)
@@ -649,7 +653,12 @@
}
__set_current_state(TASK_RUNNING);
common->thread_wakeup_needed = 0;
- smp_rmb(); /* ensure the latest bh->state is visible */
+
+ /*
+ * Ensure the writing of thread_wakeup_needed
+ * and the reading of bh->state are completed
+ */
+ smp_mb();
return rc;
}
diff --git a/drivers/usb/gadget/function/f_midi.c b/drivers/usb/gadget/function/f_midi.c
index da6b8b4..c9b2ed8 100644
--- a/drivers/usb/gadget/function/f_midi.c
+++ b/drivers/usb/gadget/function/f_midi.c
@@ -87,6 +87,8 @@
int index;
char *id;
unsigned int buflen, qlen;
+
+ unsigned char free_ref;
};
static inline struct f_midi *func_to_midi(struct usb_function *f)
@@ -95,6 +97,8 @@
}
static void f_midi_transmit(struct f_midi *midi, struct usb_request *req);
+static void f_midi_rmidi_free(struct snd_rawmidi *rmidi);
+static void f_midi_free_inst(struct usb_function_instance *f);
DECLARE_UAC_AC_HEADER_DESCRIPTOR(1);
DECLARE_USB_MIDI_OUT_JACK_DESCRIPTOR(1);
@@ -150,6 +154,13 @@
.bmAttributes = USB_ENDPOINT_XFER_BULK,
};
+static struct usb_ss_ep_comp_descriptor bulk_out_ss_comp_desc = {
+ .bLength = sizeof(bulk_out_ss_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+ /* .bMaxBurst = 0, */
+ /* .bmAttributes = 0, */
+};
+
/* B.5.2 Class-specific MS Bulk OUT Endpoint Descriptor */
static struct usb_ms_endpoint_descriptor_16 ms_out_desc = {
/* .bLength = DYNAMIC */
@@ -167,6 +178,13 @@
.bmAttributes = USB_ENDPOINT_XFER_BULK,
};
+static struct usb_ss_ep_comp_descriptor bulk_in_ss_comp_desc = {
+ .bLength = sizeof(bulk_in_ss_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+ /* .bMaxBurst = 0, */
+ /* .bmAttributes = 0, */
+};
+
/* B.6.2 Class-specific MS Bulk IN Endpoint Descriptor */
static struct usb_ms_endpoint_descriptor_16 ms_in_desc = {
/* .bLength = DYNAMIC */
@@ -361,7 +379,9 @@
/* allocate a bunch of read buffers and queue them all at once. */
for (i = 0; i < midi->qlen && err == 0; i++) {
struct usb_request *req =
- midi_alloc_ep_req(midi->out_ep, midi->buflen);
+ midi_alloc_ep_req(midi->out_ep,
+ max_t(unsigned, midi->buflen,
+ bulk_out_desc.wMaxPacketSize));
if (req == NULL)
return -ENOMEM;
@@ -698,6 +718,8 @@
SNDRV_RAWMIDI_INFO_INPUT |
SNDRV_RAWMIDI_INFO_DUPLEX;
rmidi->private_data = midi;
+ rmidi->private_free = f_midi_rmidi_free;
+ midi->free_ref++;
/*
* Yes, rawmidi OUTPUT = USB IN, and rawmidi INPUT = USB OUT.
@@ -733,7 +755,7 @@
struct usb_composite_dev *cdev = c->cdev;
struct f_midi *midi = func_to_midi(f);
struct usb_string *us;
- int status, n, jack = 1, i = 0;
+ int status, n, jack = 1, i = 0, endpoint_descriptor_index = 0;
midi->gadget = cdev->gadget;
tasklet_init(&midi->tasklet, f_midi_in_tasklet, (unsigned long) midi);
@@ -774,7 +796,7 @@
goto fail;
/* allocate temporary function list */
- midi_function = kcalloc((MAX_PORTS * 4) + 9, sizeof(*midi_function),
+ midi_function = kcalloc((MAX_PORTS * 4) + 11, sizeof(*midi_function),
GFP_KERNEL);
if (!midi_function) {
status = -ENOMEM;
@@ -864,6 +886,7 @@
ms_in_desc.bNumEmbMIDIJack = midi->out_ports;
/* ... and add them to the list */
+ endpoint_descriptor_index = i;
midi_function[i++] = (struct usb_descriptor_header *) &bulk_out_desc;
midi_function[i++] = (struct usb_descriptor_header *) &ms_out_desc;
midi_function[i++] = (struct usb_descriptor_header *) &bulk_in_desc;
@@ -888,13 +911,34 @@
goto fail_f_midi;
}
+ if (gadget_is_superspeed(c->cdev->gadget)) {
+ bulk_in_desc.wMaxPacketSize = cpu_to_le16(1024);
+ bulk_out_desc.wMaxPacketSize = cpu_to_le16(1024);
+ i = endpoint_descriptor_index;
+ midi_function[i++] = (struct usb_descriptor_header *)
+ &bulk_out_desc;
+ midi_function[i++] = (struct usb_descriptor_header *)
+ &bulk_out_ss_comp_desc;
+ midi_function[i++] = (struct usb_descriptor_header *)
+ &ms_out_desc;
+ midi_function[i++] = (struct usb_descriptor_header *)
+ &bulk_in_desc;
+ midi_function[i++] = (struct usb_descriptor_header *)
+ &bulk_in_ss_comp_desc;
+ midi_function[i++] = (struct usb_descriptor_header *)
+ &ms_in_desc;
+ f->ss_descriptors = usb_copy_descriptors(midi_function);
+ if (!f->ss_descriptors)
+ goto fail_f_midi;
+ }
+
kfree(midi_function);
return 0;
fail_f_midi:
kfree(midi_function);
- usb_free_descriptors(f->hs_descriptors);
+ usb_free_all_descriptors(f);
fail:
f_midi_unregister_card(midi);
fail_register:
@@ -941,7 +985,7 @@
u32 num; \
\
mutex_lock(&opts->lock); \
- if (opts->refcnt) { \
+ if (opts->refcnt > 1) { \
ret = -EBUSY; \
goto end; \
} \
@@ -996,7 +1040,7 @@
char *c;
mutex_lock(&opts->lock);
- if (opts->refcnt) {
+ if (opts->refcnt > 1) {
ret = -EBUSY;
goto end;
}
@@ -1037,13 +1081,21 @@
static void f_midi_free_inst(struct usb_function_instance *f)
{
struct f_midi_opts *opts;
+ bool free = false;
opts = container_of(f, struct f_midi_opts, func_inst);
- if (opts->id_allocated)
- kfree(opts->id);
+ mutex_lock(&opts->lock);
+ if (!--opts->refcnt) {
+ free = true;
+ }
+ mutex_unlock(&opts->lock);
- kfree(opts);
+ if (free) {
+ if (opts->id_allocated)
+ kfree(opts->id);
+ kfree(opts);
+ }
}
#ifdef CONFIG_USB_CONFIGFS_UEVENT
@@ -1121,6 +1173,7 @@
opts->qlen = 32;
opts->in_ports = 1;
opts->out_ports = 1;
+ opts->refcnt = 1;
if (create_alsa_device(&opts->func_inst)) {
kfree(opts);
@@ -1138,18 +1191,28 @@
struct f_midi *midi;
struct f_midi_opts *opts;
int i;
+ bool free = false;
midi = func_to_midi(f);
opts = container_of(f->fi, struct f_midi_opts, func_inst);
- kfree(midi->id);
mutex_lock(&opts->lock);
- for (i = opts->in_ports - 1; i >= 0; --i)
- kfree(midi->in_port[i]);
- opts->func_inst.f = NULL;
- kfree(midi);
- opts->func_inst.f = NULL;
- --opts->refcnt;
+ if (!--midi->free_ref) {
+ kfree(midi->id);
+ for (i = opts->in_ports - 1; i >= 0; --i)
+ kfree(midi->in_port[i]);
+ kfree(midi);
+ opts->func_inst.f = NULL;
+ free = true;
+ }
mutex_unlock(&opts->lock);
+
+ if (free)
+ f_midi_free_inst(&opts->func_inst);
+}
+
+static void f_midi_rmidi_free(struct snd_rawmidi *rmidi)
+{
+ f_midi_free(rmidi->private_data);
}
static void f_midi_unbind(struct usb_configuration *c, struct usb_function *f)
@@ -1166,7 +1229,7 @@
card = midi->card;
midi->card = NULL;
if (card)
- snd_card_free(card);
+ snd_card_free_when_closed(card);
usb_free_all_descriptors(f);
}
@@ -1220,6 +1283,7 @@
midi->index = opts->index;
midi->buflen = opts->buflen;
midi->qlen = opts->qlen;
+ midi->free_ref = 1;
++opts->refcnt;
mutex_unlock(&opts->lock);
diff --git a/drivers/usb/gadget/function/f_uvc.c b/drivers/usb/gadget/function/f_uvc.c
index 29b41b5..c7689d0 100644
--- a/drivers/usb/gadget/function/f_uvc.c
+++ b/drivers/usb/gadget/function/f_uvc.c
@@ -625,7 +625,7 @@
uvc_ss_streaming_comp.bMaxBurst = opts->streaming_maxburst;
uvc_ss_streaming_comp.wBytesPerInterval =
cpu_to_le16(max_packet_size * max_packet_mult *
- opts->streaming_maxburst);
+ (opts->streaming_maxburst + 1));
/* Allocate endpoints. */
ep = usb_ep_autoconfig(cdev->gadget, &uvc_control_ep);
diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
index de01443..43ce2cf 100644
--- a/drivers/usb/gadget/legacy/inode.c
+++ b/drivers/usb/gadget/legacy/inode.c
@@ -1676,9 +1676,10 @@
gadgetfs_suspend (struct usb_gadget *gadget)
{
struct dev_data *dev = get_gadget_data (gadget);
+ unsigned long flags;
INFO (dev, "suspended from state %d\n", dev->state);
- spin_lock (&dev->lock);
+ spin_lock_irqsave(&dev->lock, flags);
switch (dev->state) {
case STATE_DEV_SETUP: // VERY odd... host died??
case STATE_DEV_CONNECTED:
@@ -1689,7 +1690,7 @@
default:
break;
}
- spin_unlock (&dev->lock);
+ spin_unlock_irqrestore(&dev->lock, flags);
}
static struct usb_gadget_driver gadgetfs_driver = {
diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c
index 6610f7a..64f404a 100644
--- a/drivers/usb/gadget/udc/dummy_hcd.c
+++ b/drivers/usb/gadget/udc/dummy_hcd.c
@@ -442,23 +442,16 @@
/* Report reset and disconnect events to the driver */
if (dum->driver && (disconnect || reset)) {
stop_activity(dum);
- spin_unlock(&dum->lock);
if (reset)
usb_gadget_udc_reset(&dum->gadget, dum->driver);
else
dum->driver->disconnect(&dum->gadget);
- spin_lock(&dum->lock);
}
} else if (dum_hcd->active != dum_hcd->old_active) {
- if (dum_hcd->old_active && dum->driver->suspend) {
- spin_unlock(&dum->lock);
+ if (dum_hcd->old_active && dum->driver->suspend)
dum->driver->suspend(&dum->gadget);
- spin_lock(&dum->lock);
- } else if (!dum_hcd->old_active && dum->driver->resume) {
- spin_unlock(&dum->lock);
+ else if (!dum_hcd->old_active && dum->driver->resume)
dum->driver->resume(&dum->gadget);
- spin_lock(&dum->lock);
- }
}
dum_hcd->old_status = dum_hcd->port_status;
@@ -985,7 +978,9 @@
struct dummy_hcd *dum_hcd = gadget_to_dummy_hcd(g);
struct dummy *dum = dum_hcd->dum;
+ spin_lock_irq(&dum->lock);
dum->driver = NULL;
+ spin_unlock_irq(&dum->lock);
return 0;
}
@@ -2011,7 +2006,7 @@
HUB_CHAR_COMMON_OCPM);
desc->bNbrPorts = 1;
desc->u.ss.bHubHdrDecLat = 0x04; /* Worst case: 0.4 micro sec*/
- desc->u.ss.DeviceRemovable = 0xffff;
+ desc->u.ss.DeviceRemovable = 0;
}
static inline void hub_descriptor(struct usb_hub_descriptor *desc)
@@ -2023,8 +2018,8 @@
HUB_CHAR_INDV_PORT_LPSM |
HUB_CHAR_COMMON_OCPM);
desc->bNbrPorts = 1;
- desc->u.hs.DeviceRemovable[0] = 0xff;
- desc->u.hs.DeviceRemovable[1] = 0xff;
+ desc->u.hs.DeviceRemovable[0] = 0;
+ desc->u.hs.DeviceRemovable[1] = 0xff; /* PortPwrCtrlMask */
}
static int dummy_hub_control(
diff --git a/drivers/usb/gadget/udc/net2280.c b/drivers/usb/gadget/udc/net2280.c
index 6706aef..a47de8c 100644
--- a/drivers/usb/gadget/udc/net2280.c
+++ b/drivers/usb/gadget/udc/net2280.c
@@ -2425,11 +2425,8 @@
nuke(&dev->ep[i]);
/* report disconnect; the driver is already quiesced */
- if (driver) {
- spin_unlock(&dev->lock);
+ if (driver)
driver->disconnect(&dev->gadget);
- spin_lock(&dev->lock);
- }
usb_reinit(dev);
}
@@ -3275,8 +3272,6 @@
BIT(PCI_RETRY_ABORT_INTERRUPT))
static void handle_stat1_irqs(struct net2280 *dev, u32 stat)
-__releases(dev->lock)
-__acquires(dev->lock)
{
struct net2280_ep *ep;
u32 tmp, num, mask, scratch;
@@ -3317,14 +3312,12 @@
if (disconnect || reset) {
stop_activity(dev, dev->driver);
ep0_start(dev);
- spin_unlock(&dev->lock);
if (reset)
usb_gadget_udc_reset
(&dev->gadget, dev->driver);
else
(dev->driver->disconnect)
(&dev->gadget);
- spin_lock(&dev->lock);
return;
}
}
diff --git a/drivers/usb/host/ehci-exynos.c b/drivers/usb/host/ehci-exynos.c
index df538fd..46f5354c 100644
--- a/drivers/usb/host/ehci-exynos.c
+++ b/drivers/usb/host/ehci-exynos.c
@@ -77,10 +77,12 @@
if (IS_ERR(phy)) {
ret = PTR_ERR(phy);
if (ret == -EPROBE_DEFER) {
+ of_node_put(child);
return ret;
} else if (ret != -ENOSYS && ret != -ENODEV) {
dev_err(dev,
"Error retrieving usb2 phy: %d\n", ret);
+ of_node_put(child);
return ret;
}
}
diff --git a/drivers/usb/host/ohci-exynos.c b/drivers/usb/host/ohci-exynos.c
index 2cd105b..6865b91 100644
--- a/drivers/usb/host/ohci-exynos.c
+++ b/drivers/usb/host/ohci-exynos.c
@@ -66,10 +66,12 @@
if (IS_ERR(phy)) {
ret = PTR_ERR(phy);
if (ret == -EPROBE_DEFER) {
+ of_node_put(child);
return ret;
} else if (ret != -ENOSYS && ret != -ENODEV) {
dev_err(dev,
"Error retrieving usb2 phy: %d\n", ret);
+ of_node_put(child);
return ret;
}
}
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
index f940056..1fc6f47 100644
--- a/drivers/usb/host/pci-quirks.c
+++ b/drivers/usb/host/pci-quirks.c
@@ -89,6 +89,7 @@
AMD_CHIPSET_HUDSON2,
AMD_CHIPSET_BOLTON,
AMD_CHIPSET_YANGTZE,
+ AMD_CHIPSET_TAISHAN,
AMD_CHIPSET_UNKNOWN,
};
@@ -136,20 +137,26 @@
pinfo->smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD,
PCI_DEVICE_ID_AMD_HUDSON2_SMBUS, NULL);
- if (!pinfo->smbus_dev) {
- pinfo->sb_type.gen = NOT_AMD_CHIPSET;
- return 0;
+ if (pinfo->smbus_dev) {
+ rev = pinfo->smbus_dev->revision;
+ if (rev >= 0x11 && rev <= 0x14)
+ pinfo->sb_type.gen = AMD_CHIPSET_HUDSON2;
+ else if (rev >= 0x15 && rev <= 0x18)
+ pinfo->sb_type.gen = AMD_CHIPSET_BOLTON;
+ else if (rev >= 0x39 && rev <= 0x3a)
+ pinfo->sb_type.gen = AMD_CHIPSET_YANGTZE;
+ } else {
+ pinfo->smbus_dev = pci_get_device(PCI_VENDOR_ID_AMD,
+ 0x145c, NULL);
+ if (pinfo->smbus_dev) {
+ rev = pinfo->smbus_dev->revision;
+ pinfo->sb_type.gen = AMD_CHIPSET_TAISHAN;
+ } else {
+ pinfo->sb_type.gen = NOT_AMD_CHIPSET;
+ return 0;
+ }
}
-
- rev = pinfo->smbus_dev->revision;
- if (rev >= 0x11 && rev <= 0x14)
- pinfo->sb_type.gen = AMD_CHIPSET_HUDSON2;
- else if (rev >= 0x15 && rev <= 0x18)
- pinfo->sb_type.gen = AMD_CHIPSET_BOLTON;
- else if (rev >= 0x39 && rev <= 0x3a)
- pinfo->sb_type.gen = AMD_CHIPSET_YANGTZE;
}
-
pinfo->sb_type.rev = rev;
return 1;
}
@@ -251,11 +258,12 @@
{
/* Make sure amd chipset type has already been initialized */
usb_amd_find_chipset_info();
- if (amd_chipset.sb_type.gen != AMD_CHIPSET_YANGTZE)
- return 0;
-
- dev_dbg(&pdev->dev, "QUIRK: Enable AMD remote wakeup fix\n");
- return 1;
+ if (amd_chipset.sb_type.gen == AMD_CHIPSET_YANGTZE ||
+ amd_chipset.sb_type.gen == AMD_CHIPSET_TAISHAN) {
+ dev_dbg(&pdev->dev, "QUIRK: Enable AMD remote wakeup fix\n");
+ return 1;
+ }
+ return 0;
}
EXPORT_SYMBOL_GPL(usb_hcd_amd_remote_wakeup_quirk);
diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c
index 4cbd063..a11c2c8 100644
--- a/drivers/usb/host/r8a66597-hcd.c
+++ b/drivers/usb/host/r8a66597-hcd.c
@@ -1269,7 +1269,7 @@
time = 30;
break;
default:
- time = 300;
+ time = 50;
break;
}
@@ -1785,6 +1785,7 @@
pipe = td->pipe;
pipe_stop(r8a66597, pipe);
+ /* Select a different address or endpoint */
new_td = td;
do {
list_move_tail(&new_td->queue,
@@ -1794,7 +1795,8 @@
new_td = td;
break;
}
- } while (td != new_td && td->address == new_td->address);
+ } while (td != new_td && td->address == new_td->address &&
+ td->pipe->info.epnum == new_td->pipe->info.epnum);
start_transfer(r8a66597, new_td);
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index 9dbd759..fae1222 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -783,6 +783,9 @@
clear_bit(wIndex, &bus_state->resuming_ports);
set_bit(wIndex, &bus_state->rexit_ports);
+
+ xhci_test_and_clear_bit(xhci, port_array, wIndex,
+ PORT_PLC);
xhci_set_link_state(xhci, port_array, wIndex,
XDEV_U0);
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 8248359..35e0c04 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -1711,7 +1711,7 @@
xhci->dcbaa->dev_context_ptrs[0] = cpu_to_le64(xhci->scratchpad->sp_dma);
for (i = 0; i < num_sp; i++) {
dma_addr_t dma;
- void *buf = dma_alloc_coherent(dev, xhci->page_size, &dma,
+ void *buf = dma_zalloc_coherent(dev, xhci->page_size, &dma,
flags);
if (!buf)
goto fail_sp5;
@@ -2768,7 +2768,7 @@
(xhci->cmd_ring->first_seg->dma & (u64) ~CMD_RING_RSVD_BITS) |
xhci->cmd_ring->cycle_state;
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
- "// Setting command ring address to 0x%x", val);
+ "// Setting command ring address to 0x%016llx", val_64);
xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
xhci_dbg_cmd_ptrs(xhci);
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index dd262f4..e8f9906 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -52,6 +52,7 @@
#define PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI 0x0aa8
#define PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI 0x1aa8
#define PCI_DEVICE_ID_INTEL_APL_XHCI 0x5aa8
+#define PCI_DEVICE_ID_INTEL_DNV_XHCI 0x19d0
static const char hcd_name[] = "xhci_hcd";
@@ -167,12 +168,14 @@
pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI ||
- pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI)) {
+ pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI ||
+ pdev->device == PCI_DEVICE_ID_INTEL_DNV_XHCI)) {
xhci->quirks |= XHCI_PME_STUCK_QUIRK;
}
if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
(pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI ||
- pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI))
+ pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI ||
+ pdev->device == PCI_DEVICE_ID_INTEL_DNV_XHCI))
xhci->quirks |= XHCI_MISSING_CAS;
if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
@@ -195,6 +198,9 @@
if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA &&
pdev->device == 0x1042)
xhci->quirks |= XHCI_BROKEN_STREAMS;
+ if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA &&
+ pdev->device == 0x1142)
+ xhci->quirks |= XHCI_TRUST_TX_LENGTH;
if (xhci->quirks & XHCI_RESET_ON_RESUME)
xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index 1ddf882..481d9d2 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -154,7 +154,7 @@
irq = platform_get_irq(pdev, 0);
if (irq < 0)
- return -ENODEV;
+ return irq;
/* Try to set 64-bit DMA first */
if (WARN_ON(!pdev->dev.dma_mask))
@@ -208,7 +208,7 @@
pm_runtime_set_autosuspend_delay(&pdev->dev, 1000);
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
- pm_runtime_get_sync(&pdev->dev);
+ pm_runtime_get_noresume(&pdev->dev);
if (of_device_is_compatible(pdev->dev.of_node,
"marvell,armada-375-xhci") ||
@@ -293,6 +293,8 @@
usb_put_hcd(xhci->shared_hcd);
disable_clk:
+ pm_runtime_put_noidle(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
if (!IS_ERR(clk))
clk_disable_unprepare(clk);
@@ -308,8 +310,6 @@
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
struct clk *clk = xhci->clk;
- pm_runtime_disable(&dev->dev);
-
device_remove_file(&dev->dev, &dev_attr_config_imod);
xhci->xhc_state |= XHCI_STATE_REMOVING;
usb_remove_hcd(xhci->shared_hcd);
@@ -322,6 +322,9 @@
clk_disable_unprepare(clk);
usb_put_hcd(hcd);
+ pm_runtime_set_suspended(&dev->dev);
+ pm_runtime_disable(&dev->dev);
+
return 0;
}
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 12e38cf..5bfe478 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -856,13 +856,16 @@
(ep->ep_state & EP_GETTING_NO_STREAMS)) {
int stream_id;
- for (stream_id = 0; stream_id < ep->stream_info->num_streams;
+ for (stream_id = 1; stream_id < ep->stream_info->num_streams;
stream_id++) {
+ ring = ep->stream_info->stream_rings[stream_id];
+ if (!ring)
+ continue;
+
xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
"Killing URBs for slot ID %u, ep index %u, stream %u",
- slot_id, ep_index, stream_id + 1);
- xhci_kill_ring_urbs(xhci,
- ep->stream_info->stream_rings[stream_id]);
+ slot_id, ep_index, stream_id);
+ xhci_kill_ring_urbs(xhci, ring);
}
} else {
ring = ep->ring;
diff --git a/drivers/usb/misc/idmouse.c b/drivers/usb/misc/idmouse.c
index 4e38683c..6d4e757 100644
--- a/drivers/usb/misc/idmouse.c
+++ b/drivers/usb/misc/idmouse.c
@@ -346,6 +346,9 @@
if (iface_desc->desc.bInterfaceClass != 0x0A)
return -ENODEV;
+ if (iface_desc->desc.bNumEndpoints < 1)
+ return -ENODEV;
+
/* allocate memory for our device state and initialize it */
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (dev == NULL)
diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c
index 775690b..5e43fd8 100644
--- a/drivers/usb/misc/iowarrior.c
+++ b/drivers/usb/misc/iowarrior.c
@@ -557,7 +557,7 @@
info.revision = le16_to_cpu(dev->udev->descriptor.bcdDevice);
/* 0==UNKNOWN, 1==LOW(usb1.1) ,2=FULL(usb1.1), 3=HIGH(usb2.0) */
- info.speed = le16_to_cpu(dev->udev->speed);
+ info.speed = dev->udev->speed;
info.if_num = dev->interface->cur_altsetting->desc.bInterfaceNumber;
info.report_size = dev->report_size;
diff --git a/drivers/usb/misc/legousbtower.c b/drivers/usb/misc/legousbtower.c
index 4dd531a..0ec9ee5 100644
--- a/drivers/usb/misc/legousbtower.c
+++ b/drivers/usb/misc/legousbtower.c
@@ -317,9 +317,16 @@
int subminor;
int retval = 0;
struct usb_interface *interface;
- struct tower_reset_reply reset_reply;
+ struct tower_reset_reply *reset_reply;
int result;
+ reset_reply = kmalloc(sizeof(*reset_reply), GFP_KERNEL);
+
+ if (!reset_reply) {
+ retval = -ENOMEM;
+ goto exit;
+ }
+
nonseekable_open(inode, file);
subminor = iminor(inode);
@@ -364,8 +371,8 @@
USB_TYPE_VENDOR | USB_DIR_IN | USB_RECIP_DEVICE,
0,
0,
- &reset_reply,
- sizeof(reset_reply),
+ reset_reply,
+ sizeof(*reset_reply),
1000);
if (result < 0) {
dev_err(&dev->udev->dev,
@@ -406,6 +413,7 @@
mutex_unlock(&dev->lock);
exit:
+ kfree(reset_reply);
return retval;
}
@@ -808,7 +816,7 @@
struct lego_usb_tower *dev = NULL;
struct usb_host_interface *iface_desc;
struct usb_endpoint_descriptor* endpoint;
- struct tower_get_version_reply get_version_reply;
+ struct tower_get_version_reply *get_version_reply = NULL;
int i;
int retval = -ENOMEM;
int result;
@@ -898,6 +906,13 @@
dev->interrupt_in_interval = interrupt_in_interval ? interrupt_in_interval : dev->interrupt_in_endpoint->bInterval;
dev->interrupt_out_interval = interrupt_out_interval ? interrupt_out_interval : dev->interrupt_out_endpoint->bInterval;
+ get_version_reply = kmalloc(sizeof(*get_version_reply), GFP_KERNEL);
+
+ if (!get_version_reply) {
+ retval = -ENOMEM;
+ goto error;
+ }
+
/* get the firmware version and log it */
result = usb_control_msg (udev,
usb_rcvctrlpipe(udev, 0),
@@ -905,18 +920,19 @@
USB_TYPE_VENDOR | USB_DIR_IN | USB_RECIP_DEVICE,
0,
0,
- &get_version_reply,
- sizeof(get_version_reply),
+ get_version_reply,
+ sizeof(*get_version_reply),
1000);
if (result < 0) {
dev_err(idev, "LEGO USB Tower get version control request failed\n");
retval = result;
goto error;
}
- dev_info(&interface->dev, "LEGO USB Tower firmware version is %d.%d "
- "build %d\n", get_version_reply.major,
- get_version_reply.minor,
- le16_to_cpu(get_version_reply.build_no));
+ dev_info(&interface->dev,
+ "LEGO USB Tower firmware version is %d.%d build %d\n",
+ get_version_reply->major,
+ get_version_reply->minor,
+ le16_to_cpu(get_version_reply->build_no));
/* we can register the device now, as it is ready */
usb_set_intfdata (interface, dev);
@@ -937,9 +953,11 @@
USB_MAJOR, dev->minor);
exit:
+ kfree(get_version_reply);
return retval;
error:
+ kfree(get_version_reply);
tower_delete(dev);
return retval;
}
diff --git a/drivers/usb/misc/lvstest.c b/drivers/usb/misc/lvstest.c
index 86b4e4b..383fa00 100644
--- a/drivers/usb/misc/lvstest.c
+++ b/drivers/usb/misc/lvstest.c
@@ -370,6 +370,10 @@
hdev = interface_to_usbdev(intf);
desc = intf->cur_altsetting;
+
+ if (desc->desc.bNumEndpoints < 1)
+ return -ENODEV;
+
endpoint = &desc->endpoint[0].desc;
/* valid only for SS root hub */
diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c
index 1624b09..2e947dc 100644
--- a/drivers/usb/misc/usbtest.c
+++ b/drivers/usb/misc/usbtest.c
@@ -135,6 +135,7 @@
case USB_ENDPOINT_XFER_INT:
if (dev->info->intr)
goto try_intr;
+ continue;
case USB_ENDPOINT_XFER_ISOC:
if (dev->info->iso)
goto try_iso;
diff --git a/drivers/usb/misc/uss720.c b/drivers/usb/misc/uss720.c
index bbd029c..442b663 100644
--- a/drivers/usb/misc/uss720.c
+++ b/drivers/usb/misc/uss720.c
@@ -711,6 +711,11 @@
interface = intf->cur_altsetting;
+ if (interface->desc.bNumEndpoints < 3) {
+ usb_put_dev(usbdev);
+ return -ENODEV;
+ }
+
/*
* Allocate parport interface
*/
diff --git a/drivers/usb/musb/musb_cppi41.c b/drivers/usb/musb/musb_cppi41.c
index e499b86..88f26ac 100644
--- a/drivers/usb/musb/musb_cppi41.c
+++ b/drivers/usb/musb/musb_cppi41.c
@@ -250,8 +250,27 @@
transferred < cppi41_channel->packet_sz)
cppi41_channel->prog_len = 0;
- if (cppi41_channel->is_tx)
- empty = musb_is_tx_fifo_empty(hw_ep);
+ if (cppi41_channel->is_tx) {
+ u8 type;
+
+ if (is_host_active(musb))
+ type = hw_ep->out_qh->type;
+ else
+ type = hw_ep->ep_in.type;
+
+ if (type == USB_ENDPOINT_XFER_ISOC)
+ /*
+ * Don't use the early-TX-interrupt workaround below
+ * for Isoch transfter. Since Isoch are periodic
+ * transfer, by the time the next transfer is
+ * scheduled, the current one should be done already.
+ *
+ * This avoids audio playback underrun issue.
+ */
+ empty = true;
+ else
+ empty = musb_is_tx_fifo_empty(hw_ep);
+ }
if (!cppi41_channel->is_tx || empty) {
cppi41_trans_done(cppi41_channel);
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index 13d5614..0d843e0 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -138,6 +138,7 @@
"Could not flush host TX%d fifo: csr: %04x\n",
ep->epnum, csr))
return;
+ mdelay(1);
}
}
diff --git a/drivers/usb/musb/tusb6010_omap.c b/drivers/usb/musb/tusb6010_omap.c
index 4c82077..6020024 100644
--- a/drivers/usb/musb/tusb6010_omap.c
+++ b/drivers/usb/musb/tusb6010_omap.c
@@ -220,6 +220,7 @@
u32 dma_remaining;
int src_burst, dst_burst;
u16 csr;
+ u32 psize;
int ch;
s8 dmareq;
s8 sync_dev;
@@ -391,15 +392,19 @@
if (chdat->tx) {
/* Send transfer_packet_sz packets at a time */
- musb_writel(ep_conf, TUSB_EP_MAX_PACKET_SIZE_OFFSET,
- chdat->transfer_packet_sz);
+ psize = musb_readl(ep_conf, TUSB_EP_MAX_PACKET_SIZE_OFFSET);
+ psize &= ~0x7ff;
+ psize |= chdat->transfer_packet_sz;
+ musb_writel(ep_conf, TUSB_EP_MAX_PACKET_SIZE_OFFSET, psize);
musb_writel(ep_conf, TUSB_EP_TX_OFFSET,
TUSB_EP_CONFIG_XFR_SIZE(chdat->transfer_len));
} else {
/* Receive transfer_packet_sz packets at a time */
- musb_writel(ep_conf, TUSB_EP_MAX_PACKET_SIZE_OFFSET,
- chdat->transfer_packet_sz << 16);
+ psize = musb_readl(ep_conf, TUSB_EP_MAX_PACKET_SIZE_OFFSET);
+ psize &= ~(0x7ff << 16);
+ psize |= (chdat->transfer_packet_sz << 16);
+ musb_writel(ep_conf, TUSB_EP_MAX_PACKET_SIZE_OFFSET, psize);
musb_writel(ep_conf, TUSB_EP_RX_OFFSET,
TUSB_EP_CONFIG_XFR_SIZE(chdat->transfer_len));
diff --git a/drivers/usb/renesas_usbhs/common.c b/drivers/usb/renesas_usbhs/common.c
index d82fa36..005da08 100644
--- a/drivers/usb/renesas_usbhs/common.c
+++ b/drivers/usb/renesas_usbhs/common.c
@@ -733,8 +733,10 @@
struct usbhs_priv *priv = dev_get_drvdata(dev);
struct platform_device *pdev = usbhs_priv_to_pdev(priv);
- if (!usbhsc_flags_has(priv, USBHSF_RUNTIME_PWCTRL))
+ if (!usbhsc_flags_has(priv, USBHSF_RUNTIME_PWCTRL)) {
usbhsc_power_ctrl(priv, 1);
+ usbhs_mod_autonomy_mode(priv);
+ }
usbhs_platform_call(priv, phy_reset, pdev);
diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c
index efc4fae..8647d2c 100644
--- a/drivers/usb/renesas_usbhs/mod_gadget.c
+++ b/drivers/usb/renesas_usbhs/mod_gadget.c
@@ -37,6 +37,7 @@
struct usbhsg_uep {
struct usb_ep ep;
struct usbhs_pipe *pipe;
+ spinlock_t lock; /* protect the pipe */
char ep_name[EP_NAME_SIZE];
@@ -638,10 +639,16 @@
static int usbhsg_ep_disable(struct usb_ep *ep)
{
struct usbhsg_uep *uep = usbhsg_ep_to_uep(ep);
- struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep);
+ struct usbhs_pipe *pipe;
+ unsigned long flags;
+ int ret = 0;
- if (!pipe)
- return -EINVAL;
+ spin_lock_irqsave(&uep->lock, flags);
+ pipe = usbhsg_uep_to_pipe(uep);
+ if (!pipe) {
+ ret = -EINVAL;
+ goto out;
+ }
usbhsg_pipe_disable(uep);
usbhs_pipe_free(pipe);
@@ -649,6 +656,9 @@
uep->pipe->mod_private = NULL;
uep->pipe = NULL;
+out:
+ spin_unlock_irqrestore(&uep->lock, flags);
+
return 0;
}
@@ -698,8 +708,11 @@
{
struct usbhsg_uep *uep = usbhsg_ep_to_uep(ep);
struct usbhsg_request *ureq = usbhsg_req_to_ureq(req);
- struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep);
+ struct usbhs_pipe *pipe;
+ unsigned long flags;
+ spin_lock_irqsave(&uep->lock, flags);
+ pipe = usbhsg_uep_to_pipe(uep);
if (pipe)
usbhs_pkt_pop(pipe, usbhsg_ureq_to_pkt(ureq));
@@ -708,6 +721,7 @@
* even if the pipe is NULL.
*/
usbhsg_queue_pop(uep, ureq, -ECONNRESET);
+ spin_unlock_irqrestore(&uep->lock, flags);
return 0;
}
@@ -854,10 +868,10 @@
{
struct usbhsg_gpriv *gpriv = usbhsg_priv_to_gpriv(priv);
struct usbhs_mod *mod = usbhs_mod_get_current(priv);
- struct usbhsg_uep *dcp = usbhsg_gpriv_to_dcp(gpriv);
+ struct usbhsg_uep *uep;
struct device *dev = usbhs_priv_to_dev(priv);
unsigned long flags;
- int ret = 0;
+ int ret = 0, i;
/******************** spin lock ********************/
usbhs_lock(priv, flags);
@@ -889,7 +903,9 @@
usbhs_sys_set_test_mode(priv, 0);
usbhs_sys_function_ctrl(priv, 0);
- usbhsg_ep_disable(&dcp->ep);
+ /* disable all eps */
+ usbhsg_for_each_uep_with_dcp(uep, gpriv, i)
+ usbhsg_ep_disable(&uep->ep);
dev_dbg(dev, "stop gadget\n");
@@ -1072,6 +1088,7 @@
ret = -ENOMEM;
goto usbhs_mod_gadget_probe_err_gpriv;
}
+ spin_lock_init(&uep->lock);
gpriv->transceiver = usb_get_phy(USB_PHY_TYPE_UNDEFINED);
dev_info(dev, "%stransceiver found\n",
diff --git a/drivers/usb/serial/ark3116.c b/drivers/usb/serial/ark3116.c
index 7812052..754fc3e 100644
--- a/drivers/usb/serial/ark3116.c
+++ b/drivers/usb/serial/ark3116.c
@@ -373,23 +373,29 @@
dev_dbg(&port->dev,
"%s - usb_serial_generic_open failed: %d\n",
__func__, result);
- goto err_out;
+ goto err_free;
}
/* remove any data still left: also clears error state */
ark3116_read_reg(serial, UART_RX, buf);
/* read modem status */
- priv->msr = ark3116_read_reg(serial, UART_MSR, buf);
+ result = ark3116_read_reg(serial, UART_MSR, buf);
+ if (result < 0)
+ goto err_close;
+ priv->msr = *buf;
+
/* read line status */
- priv->lsr = ark3116_read_reg(serial, UART_LSR, buf);
+ result = ark3116_read_reg(serial, UART_LSR, buf);
+ if (result < 0)
+ goto err_close;
+ priv->lsr = *buf;
result = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
if (result) {
dev_err(&port->dev, "submit irq_in urb failed %d\n",
result);
- ark3116_close(port);
- goto err_out;
+ goto err_close;
}
/* activate interrupts */
@@ -402,8 +408,15 @@
if (tty)
ark3116_set_termios(tty, port, NULL);
-err_out:
kfree(buf);
+
+ return 0;
+
+err_close:
+ usb_serial_generic_close(port);
+err_free:
+ kfree(buf);
+
return result;
}
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index 33cec50..41a6513 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -134,6 +134,8 @@
{ USB_DEVICE(0x10C4, 0x8977) }, /* CEL MeshWorks DevKit Device */
{ USB_DEVICE(0x10C4, 0x8998) }, /* KCF Technologies PRN */
{ USB_DEVICE(0x10C4, 0x8A2A) }, /* HubZ dual ZigBee and Z-Wave dongle */
+ { USB_DEVICE(0x10C4, 0x8A5E) }, /* CEL EM3588 ZigBee USB Stick Long Range */
+ { USB_DEVICE(0x10C4, 0x8B34) }, /* Qivicon ZigBee USB Radio Stick */
{ USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
{ USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */
{ USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */
diff --git a/drivers/usb/serial/digi_acceleport.c b/drivers/usb/serial/digi_acceleport.c
index e0b1fe2f..be93b9f 100644
--- a/drivers/usb/serial/digi_acceleport.c
+++ b/drivers/usb/serial/digi_acceleport.c
@@ -1399,25 +1399,30 @@
{
struct usb_serial_port *port = urb->context;
struct digi_port *priv = usb_get_serial_port_data(port);
- int opcode = ((unsigned char *)urb->transfer_buffer)[0];
- int len = ((unsigned char *)urb->transfer_buffer)[1];
- int port_status = ((unsigned char *)urb->transfer_buffer)[2];
- unsigned char *data = ((unsigned char *)urb->transfer_buffer) + 3;
+ unsigned char *buf = urb->transfer_buffer;
+ int opcode;
+ int len;
+ int port_status;
+ unsigned char *data;
int flag, throttled;
- int status = urb->status;
-
- /* do not process callbacks on closed ports */
- /* but do continue the read chain */
- if (urb->status == -ENOENT)
- return 0;
/* short/multiple packet check */
+ if (urb->actual_length < 2) {
+ dev_warn(&port->dev, "short packet received\n");
+ return -1;
+ }
+
+ opcode = buf[0];
+ len = buf[1];
+
if (urb->actual_length != len + 2) {
- dev_err(&port->dev, "%s: INCOMPLETE OR MULTIPLE PACKET, "
- "status=%d, port=%d, opcode=%d, len=%d, "
- "actual_length=%d, status=%d\n", __func__, status,
- priv->dp_port_num, opcode, len, urb->actual_length,
- port_status);
+ dev_err(&port->dev, "malformed packet received: port=%d, opcode=%d, len=%d, actual_length=%u\n",
+ priv->dp_port_num, opcode, len, urb->actual_length);
+ return -1;
+ }
+
+ if (opcode == DIGI_CMD_RECEIVE_DATA && len < 1) {
+ dev_err(&port->dev, "malformed data packet received\n");
return -1;
}
@@ -1431,6 +1436,9 @@
/* receive data */
if (opcode == DIGI_CMD_RECEIVE_DATA) {
+ port_status = buf[2];
+ data = &buf[3];
+
/* get flag from port_status */
flag = 0;
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 19a9811..e0385d6 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -809,10 +809,10 @@
{ USB_DEVICE(FTDI_VID, FTDI_PROPOX_ISPCABLEIII_PID) },
{ USB_DEVICE(FTDI_VID, CYBER_CORTEX_AV_PID),
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
- { USB_DEVICE(OLIMEX_VID, OLIMEX_ARM_USB_OCD_PID),
- .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
- { USB_DEVICE(OLIMEX_VID, OLIMEX_ARM_USB_OCD_H_PID),
- .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+ { USB_DEVICE_INTERFACE_NUMBER(OLIMEX_VID, OLIMEX_ARM_USB_OCD_PID, 1) },
+ { USB_DEVICE_INTERFACE_NUMBER(OLIMEX_VID, OLIMEX_ARM_USB_OCD_H_PID, 1) },
+ { USB_DEVICE_INTERFACE_NUMBER(OLIMEX_VID, OLIMEX_ARM_USB_TINY_PID, 1) },
+ { USB_DEVICE_INTERFACE_NUMBER(OLIMEX_VID, OLIMEX_ARM_USB_TINY_H_PID, 1) },
{ USB_DEVICE(FIC_VID, FIC_NEO1973_DEBUG_PID),
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
{ USB_DEVICE(FTDI_VID, FTDI_OOCDLINK_PID),
@@ -873,6 +873,7 @@
{ USB_DEVICE_AND_INTERFACE_INFO(MICROCHIP_VID, MICROCHIP_USB_BOARD_PID,
USB_CLASS_VENDOR_SPEC,
USB_SUBCLASS_VENDOR_SPEC, 0x00) },
+ { USB_DEVICE_INTERFACE_NUMBER(ACTEL_VID, MICROSEMI_ARROW_SF2PLUS_BOARD_PID, 2) },
{ USB_DEVICE(JETI_VID, JETI_SPC1201_PID) },
{ USB_DEVICE(MARVELL_VID, MARVELL_SHEEVAPLUG_PID),
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
@@ -1439,10 +1440,13 @@
FTDI_SIO_GET_LATENCY_TIMER_REQUEST_TYPE,
0, priv->interface,
buf, 1, WDR_TIMEOUT);
- if (rv < 0)
+ if (rv < 1) {
dev_err(&port->dev, "Unable to read latency timer: %i\n", rv);
- else
+ if (rv >= 0)
+ rv = -EIO;
+ } else {
priv->latency = buf[0];
+ }
kfree(buf);
@@ -1504,9 +1508,9 @@
(new_serial.flags & ASYNC_FLAGS));
priv->custom_divisor = new_serial.custom_divisor;
+check_and_exit:
write_latency_timer(port);
-check_and_exit:
if ((old_priv.flags & ASYNC_SPD_MASK) !=
(priv->flags & ASYNC_SPD_MASK)) {
if ((priv->flags & ASYNC_SPD_MASK) == ASYNC_SPD_HI)
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 48ee04c..4fcf1ce 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -873,9 +873,17 @@
#define FIC_VID 0x1457
#define FIC_NEO1973_DEBUG_PID 0x5118
+/*
+ * Actel / Microsemi
+ */
+#define ACTEL_VID 0x1514
+#define MICROSEMI_ARROW_SF2PLUS_BOARD_PID 0x2008
+
/* Olimex */
#define OLIMEX_VID 0x15BA
#define OLIMEX_ARM_USB_OCD_PID 0x0003
+#define OLIMEX_ARM_USB_TINY_PID 0x0004
+#define OLIMEX_ARM_USB_TINY_H_PID 0x002a
#define OLIMEX_ARM_USB_OCD_H_PID 0x002b
/*
diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c
index b63a6c38..749e1b6 100644
--- a/drivers/usb/serial/io_edgeport.c
+++ b/drivers/usb/serial/io_edgeport.c
@@ -492,20 +492,24 @@
int result;
struct usb_serial *serial = ep->serial;
struct edgeport_product_info *product_info = &ep->product_info;
- struct edge_compatibility_descriptor *epic = &ep->epic_descriptor;
+ struct edge_compatibility_descriptor *epic;
struct edge_compatibility_bits *bits;
struct device *dev = &serial->dev->dev;
ep->is_epic = 0;
+
+ epic = kmalloc(sizeof(*epic), GFP_KERNEL);
+ if (!epic)
+ return -ENOMEM;
+
result = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
USB_REQUEST_ION_GET_EPIC_DESC,
0xC0, 0x00, 0x00,
- &ep->epic_descriptor,
- sizeof(struct edge_compatibility_descriptor),
+ epic, sizeof(*epic),
300);
-
- if (result > 0) {
+ if (result == sizeof(*epic)) {
ep->is_epic = 1;
+ memcpy(&ep->epic_descriptor, epic, sizeof(*epic));
memset(product_info, 0, sizeof(struct edgeport_product_info));
product_info->NumPorts = epic->NumPorts;
@@ -534,8 +538,16 @@
dev_dbg(dev, " IOSPWriteLCR : %s\n", bits->IOSPWriteLCR ? "TRUE": "FALSE");
dev_dbg(dev, " IOSPSetBaudRate : %s\n", bits->IOSPSetBaudRate ? "TRUE": "FALSE");
dev_dbg(dev, " TrueEdgeport : %s\n", bits->TrueEdgeport ? "TRUE": "FALSE");
+
+ result = 0;
+ } else if (result >= 0) {
+ dev_warn(&serial->interface->dev, "short epic descriptor received: %d\n",
+ result);
+ result = -EIO;
}
+ kfree(epic);
+
return result;
}
@@ -2097,8 +2109,7 @@
* rom_read
* reads a number of bytes from the Edgeport device starting at the given
* address.
- * If successful returns the number of bytes read, otherwise it returns
- * a negative error number of the problem.
+ * Returns zero on success or a negative error number.
****************************************************************************/
static int rom_read(struct usb_serial *serial, __u16 extAddr,
__u16 addr, __u16 length, __u8 *data)
@@ -2123,12 +2134,17 @@
USB_REQUEST_ION_READ_ROM,
0xC0, addr, extAddr, transfer_buffer,
current_length, 300);
- if (result < 0)
+ if (result < current_length) {
+ if (result >= 0)
+ result = -EIO;
break;
+ }
memcpy(data, transfer_buffer, current_length);
length -= current_length;
addr += current_length;
data += current_length;
+
+ result = 0;
}
kfree(transfer_buffer);
@@ -2585,9 +2601,10 @@
EDGE_MANUF_DESC_LEN,
(__u8 *)(&edge_serial->manuf_descriptor));
- if (response < 1)
- dev_err(dev, "error in getting manufacturer descriptor\n");
- else {
+ if (response < 0) {
+ dev_err(dev, "error in getting manufacturer descriptor: %d\n",
+ response);
+ } else {
char string[30];
dev_dbg(dev, "**Manufacturer Descriptor\n");
dev_dbg(dev, " RomSize: %dK\n",
@@ -2644,9 +2661,10 @@
EDGE_BOOT_DESC_LEN,
(__u8 *)(&edge_serial->boot_descriptor));
- if (response < 1)
- dev_err(dev, "error in getting boot descriptor\n");
- else {
+ if (response < 0) {
+ dev_err(dev, "error in getting boot descriptor: %d\n",
+ response);
+ } else {
dev_dbg(dev, "**Boot Descriptor:\n");
dev_dbg(dev, " BootCodeLength: %d\n",
le16_to_cpu(edge_serial->boot_descriptor.BootCodeLength));
@@ -2789,7 +2807,7 @@
dev_info(&serial->dev->dev, "%s detected\n", edge_serial->name);
/* Read the epic descriptor */
- if (get_epic_descriptor(edge_serial) <= 0) {
+ if (get_epic_descriptor(edge_serial) < 0) {
/* memcpy descriptor to Supports structures */
memcpy(&edge_serial->epic_descriptor.Supports, descriptor,
sizeof(struct edge_compatibility_bits));
diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c
index f1a8fdc..e98532f 100644
--- a/drivers/usb/serial/io_ti.c
+++ b/drivers/usb/serial/io_ti.c
@@ -2349,8 +2349,11 @@
if (!baud) {
/* pick a default, any default... */
baud = 9600;
- } else
+ } else {
+ /* Avoid a zero divisor. */
+ baud = min(baud, 461550);
tty_encode_baud_rate(tty, baud, baud);
+ }
edge_port->baud_rate = baud;
config->wBaudRate = (__u16)((461550L + baud/2) / baud);
diff --git a/drivers/usb/serial/keyspan_pda.c b/drivers/usb/serial/keyspan_pda.c
index 930be98..6b09424 100644
--- a/drivers/usb/serial/keyspan_pda.c
+++ b/drivers/usb/serial/keyspan_pda.c
@@ -139,6 +139,7 @@
{
struct usb_serial_port *port = urb->context;
unsigned char *data = urb->transfer_buffer;
+ unsigned int len = urb->actual_length;
int retval;
int status = urb->status;
struct keyspan_pda_private *priv;
@@ -159,18 +160,26 @@
goto exit;
}
+ if (len < 1) {
+ dev_warn(&port->dev, "short message received\n");
+ goto exit;
+ }
+
/* see if the message is data or a status interrupt */
switch (data[0]) {
case 0:
/* rest of message is rx data */
- if (urb->actual_length) {
- tty_insert_flip_string(&port->port, data + 1,
- urb->actual_length - 1);
- tty_flip_buffer_push(&port->port);
- }
+ if (len < 2)
+ break;
+ tty_insert_flip_string(&port->port, data + 1, len - 1);
+ tty_flip_buffer_push(&port->port);
break;
case 1:
/* status interrupt */
+ if (len < 3) {
+ dev_warn(&port->dev, "short interrupt message received\n");
+ break;
+ }
dev_dbg(&port->dev, "rx int, d1=%d, d2=%d\n", data[1], data[2]);
switch (data[1]) {
case 1: /* modemline change */
diff --git a/drivers/usb/serial/mct_u232.c b/drivers/usb/serial/mct_u232.c
index 89726f7..a6c07c6b 100644
--- a/drivers/usb/serial/mct_u232.c
+++ b/drivers/usb/serial/mct_u232.c
@@ -189,7 +189,7 @@
return -ENOMEM;
divisor = mct_u232_calculate_baud_rate(serial, value, &speed);
- put_unaligned_le32(cpu_to_le32(divisor), buf);
+ put_unaligned_le32(divisor, buf);
rc = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
MCT_U232_SET_BAUD_RATE_REQUEST,
MCT_U232_SET_REQUEST_TYPE,
@@ -322,8 +322,12 @@
MCT_U232_GET_REQUEST_TYPE,
0, 0, buf, MCT_U232_GET_MODEM_STAT_SIZE,
WDR_TIMEOUT);
- if (rc < 0) {
+ if (rc < MCT_U232_GET_MODEM_STAT_SIZE) {
dev_err(&port->dev, "Get MODEM STATus failed (error = %d)\n", rc);
+
+ if (rc >= 0)
+ rc = -EIO;
+
*msr = 0;
} else {
*msr = buf[0];
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 42cc72e..2a99443 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -233,6 +233,14 @@
#define BANDRICH_PRODUCT_1012 0x1012
#define QUALCOMM_VENDOR_ID 0x05C6
+/* These Quectel products use Qualcomm's vendor ID */
+#define QUECTEL_PRODUCT_UC20 0x9003
+#define QUECTEL_PRODUCT_UC15 0x9090
+
+#define QUECTEL_VENDOR_ID 0x2c7c
+/* These Quectel products use Quectel's vendor ID */
+#define QUECTEL_PRODUCT_EC21 0x0121
+#define QUECTEL_PRODUCT_EC25 0x0125
#define CMOTECH_VENDOR_ID 0x16d8
#define CMOTECH_PRODUCT_6001 0x6001
@@ -273,6 +281,7 @@
#define TELIT_PRODUCT_LE922_USBCFG0 0x1042
#define TELIT_PRODUCT_LE922_USBCFG3 0x1043
#define TELIT_PRODUCT_LE922_USBCFG5 0x1045
+#define TELIT_PRODUCT_ME910 0x1100
#define TELIT_PRODUCT_LE920 0x1200
#define TELIT_PRODUCT_LE910 0x1201
#define TELIT_PRODUCT_LE910_USBCFG4 0x1206
@@ -632,6 +641,11 @@
.reserved = BIT(5) | BIT(6),
};
+static const struct option_blacklist_info telit_me910_blacklist = {
+ .sendsetup = BIT(0),
+ .reserved = BIT(1) | BIT(3),
+};
+
static const struct option_blacklist_info telit_le910_blacklist = {
.sendsetup = BIT(0),
.reserved = BIT(1) | BIT(2),
@@ -1161,7 +1175,14 @@
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */
- { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9003), /* Quectel UC20 */
+ /* Quectel products using Qualcomm vendor ID */
+ { USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC15)},
+ { USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC20),
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ /* Quectel products using Quectel vendor ID */
+ { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC21),
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC25),
.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
@@ -1220,6 +1241,8 @@
.driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG5, 0xff),
.driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 },
+ { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910),
+ .driver_info = (kernel_ulong_t)&telit_me910_blacklist },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910),
.driver_info = (kernel_ulong_t)&telit_le910_blacklist },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910_USBCFG4),
@@ -1854,6 +1877,10 @@
.driver_info = (kernel_ulong_t)&four_g_w100_blacklist
},
{ USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, SPEEDUP_PRODUCT_SU9800, 0xff) },
+ { USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, 0x9801, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
+ { USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, 0x9803, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
{ USB_DEVICE(LONGCHEER_VENDOR_ID, ZOOM_PRODUCT_4597) },
{ USB_DEVICE(LONGCHEER_VENDOR_ID, IBALL_3_5G_CONNECT) },
{ USB_DEVICE(HAIER_VENDOR_ID, HAIER_PRODUCT_CE100) },
@@ -1996,8 +2023,11 @@
{ USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x02, 0x01) },
{ USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x00, 0x00) },
{ USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7d04, 0xff) }, /* D-Link DWM-158 */
+ { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7d0e, 0xff) }, /* D-Link DWM-157 C1 */
{ USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e19, 0xff), /* D-Link DWM-221 B1 */
.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ { USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e35, 0xff), /* D-Link DWM-222 */
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */
{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x7e11, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/A3 */
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index 1db4b61..a51b283 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -49,6 +49,7 @@
{ USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID) },
{ USB_DEVICE(IODATA_VENDOR_ID, IODATA_PRODUCT_ID_RSAQ5) },
{ USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID) },
+ { USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_UC485) },
{ USB_DEVICE(ATEN_VENDOR_ID, ATEN_PRODUCT_ID2) },
{ USB_DEVICE(ATEN_VENDOR_ID2, ATEN_PRODUCT_ID) },
{ USB_DEVICE(ELCOM_VENDOR_ID, ELCOM_PRODUCT_ID) },
diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
index 09d9be8..3b5a15d 100644
--- a/drivers/usb/serial/pl2303.h
+++ b/drivers/usb/serial/pl2303.h
@@ -27,6 +27,7 @@
#define ATEN_VENDOR_ID 0x0557
#define ATEN_VENDOR_ID2 0x0547
#define ATEN_PRODUCT_ID 0x2008
+#define ATEN_PRODUCT_UC485 0x2021
#define ATEN_PRODUCT_ID2 0x2118
#define IODATA_VENDOR_ID 0x04bb
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index 696458d..652b433 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -158,10 +158,13 @@
{DEVICE_SWI(0x1199, 0x9056)}, /* Sierra Wireless Modem */
{DEVICE_SWI(0x1199, 0x9060)}, /* Sierra Wireless Modem */
{DEVICE_SWI(0x1199, 0x9061)}, /* Sierra Wireless Modem */
+ {DEVICE_SWI(0x1199, 0x9063)}, /* Sierra Wireless EM7305 */
{DEVICE_SWI(0x1199, 0x9070)}, /* Sierra Wireless MC74xx */
{DEVICE_SWI(0x1199, 0x9071)}, /* Sierra Wireless MC74xx */
{DEVICE_SWI(0x1199, 0x9078)}, /* Sierra Wireless EM74xx */
{DEVICE_SWI(0x1199, 0x9079)}, /* Sierra Wireless EM74xx */
+ {DEVICE_SWI(0x1199, 0x907a)}, /* Sierra Wireless EM74xx QDL */
+ {DEVICE_SWI(0x1199, 0x907b)}, /* Sierra Wireless EM74xx */
{DEVICE_SWI(0x413c, 0x81a2)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card */
{DEVICE_SWI(0x413c, 0x81a3)}, /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card */
{DEVICE_SWI(0x413c, 0x81a4)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */
@@ -169,6 +172,8 @@
{DEVICE_SWI(0x413c, 0x81a9)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */
{DEVICE_SWI(0x413c, 0x81b1)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card */
{DEVICE_SWI(0x413c, 0x81b3)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */
+ {DEVICE_SWI(0x413c, 0x81b5)}, /* Dell Wireless 5811e QDL */
+ {DEVICE_SWI(0x413c, 0x81b6)}, /* Dell Wireless 5811e QDL */
/* Huawei devices */
{DEVICE_HWI(0x03f0, 0x581d)}, /* HP lt4112 LTE/HSPA+ Gobi 4G Modem (Huawei me906e) */
diff --git a/drivers/usb/serial/quatech2.c b/drivers/usb/serial/quatech2.c
index a3ed07c..af0c872 100644
--- a/drivers/usb/serial/quatech2.c
+++ b/drivers/usb/serial/quatech2.c
@@ -188,22 +188,22 @@
}
-static inline int qt2_getdevice(struct usb_device *dev, u8 *data)
-{
- return usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
- QT_SET_GET_DEVICE, 0xc0, 0, 0,
- data, 3, QT2_USB_TIMEOUT);
-}
-
static inline int qt2_getregister(struct usb_device *dev,
u8 uart,
u8 reg,
u8 *data)
{
- return usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
- QT_SET_GET_REGISTER, 0xc0, reg,
- uart, data, sizeof(*data), QT2_USB_TIMEOUT);
+ int ret;
+ ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
+ QT_SET_GET_REGISTER, 0xc0, reg,
+ uart, data, sizeof(*data), QT2_USB_TIMEOUT);
+ if (ret < sizeof(*data)) {
+ if (ret >= 0)
+ ret = -EIO;
+ }
+
+ return ret;
}
static inline int qt2_setregister(struct usb_device *dev,
@@ -372,9 +372,11 @@
0xc0, 0,
device_port, data, 2, QT2_USB_TIMEOUT);
- if (status < 0) {
+ if (status < 2) {
dev_err(&port->dev, "%s - open port failed %i\n", __func__,
status);
+ if (status >= 0)
+ status = -EIO;
kfree(data);
return status;
}
diff --git a/drivers/usb/serial/ssu100.c b/drivers/usb/serial/ssu100.c
index 70a098d..886e129 100644
--- a/drivers/usb/serial/ssu100.c
+++ b/drivers/usb/serial/ssu100.c
@@ -80,9 +80,17 @@
static inline int ssu100_getdevice(struct usb_device *dev, u8 *data)
{
- return usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
- QT_SET_GET_DEVICE, 0xc0, 0, 0,
- data, 3, 300);
+ int ret;
+
+ ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
+ QT_SET_GET_DEVICE, 0xc0, 0, 0,
+ data, 3, 300);
+ if (ret < 3) {
+ if (ret >= 0)
+ ret = -EIO;
+ }
+
+ return ret;
}
static inline int ssu100_getregister(struct usb_device *dev,
@@ -90,10 +98,17 @@
unsigned short reg,
u8 *data)
{
- return usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
- QT_SET_GET_REGISTER, 0xc0, reg,
- uart, data, sizeof(*data), 300);
+ int ret;
+ ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
+ QT_SET_GET_REGISTER, 0xc0, reg,
+ uart, data, sizeof(*data), 300);
+ if (ret < sizeof(*data)) {
+ if (ret >= 0)
+ ret = -EIO;
+ }
+
+ return ret;
}
@@ -289,8 +304,10 @@
QT_OPEN_CLOSE_CHANNEL,
QT_TRANSFER_IN, 0x01,
0, data, 2, 300);
- if (result < 0) {
+ if (result < 2) {
dev_dbg(&port->dev, "%s - open failed %i\n", __func__, result);
+ if (result >= 0)
+ result = -EIO;
kfree(data);
return result;
}
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
index 535fcfa..fe7f5ac 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.c
+++ b/drivers/usb/serial/ti_usb_3410_5052.c
@@ -1352,13 +1352,10 @@
(USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT),
value, moduleid, data, size, 1000);
- if (status == size)
- status = 0;
+ if (status < 0)
+ return status;
- if (status > 0)
- status = -ECOMM;
-
- return status;
+ return 0;
}
@@ -1374,8 +1371,7 @@
if (status == size)
status = 0;
-
- if (status > 0)
+ else if (status >= 0)
status = -ECOMM;
return status;
diff --git a/drivers/usb/storage/ene_ub6250.c b/drivers/usb/storage/ene_ub6250.c
index f3cf4ce..091e8ec 100644
--- a/drivers/usb/storage/ene_ub6250.c
+++ b/drivers/usb/storage/ene_ub6250.c
@@ -446,6 +446,10 @@
#define SD_BLOCK_LEN 9
struct ene_ub6250_info {
+
+ /* I/O bounce buffer */
+ u8 *bbuf;
+
/* for 6250 code */
struct SD_STATUS SD_Status;
struct MS_STATUS MS_Status;
@@ -493,8 +497,11 @@
static void ene_ub6250_info_destructor(void *extra)
{
+ struct ene_ub6250_info *info = (struct ene_ub6250_info *) extra;
+
if (!extra)
return;
+ kfree(info->bbuf);
}
static int ene_send_scsi_cmd(struct us_data *us, u8 fDir, void *buf, int use_sg)
@@ -858,8 +865,9 @@
u8 PageNum, u32 *PageBuf, struct ms_lib_type_extdat *ExtraDat)
{
struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
+ struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
+ u8 *bbuf = info->bbuf;
int result;
- u8 ExtBuf[4];
u32 bn = PhyBlockAddr * 0x20 + PageNum;
/* printk(KERN_INFO "MS --- MS_ReaderReadPage,
@@ -902,7 +910,7 @@
bcb->CDB[2] = (unsigned char)(PhyBlockAddr>>16);
bcb->CDB[6] = 0x01;
- result = ene_send_scsi_cmd(us, FDIR_READ, &ExtBuf, 0);
+ result = ene_send_scsi_cmd(us, FDIR_READ, bbuf, 0);
if (result != USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
@@ -911,9 +919,9 @@
ExtraDat->status0 = 0x10; /* Not yet,fireware support */
ExtraDat->status1 = 0x00; /* Not yet,fireware support */
- ExtraDat->ovrflg = ExtBuf[0];
- ExtraDat->mngflg = ExtBuf[1];
- ExtraDat->logadr = memstick_logaddr(ExtBuf[2], ExtBuf[3]);
+ ExtraDat->ovrflg = bbuf[0];
+ ExtraDat->mngflg = bbuf[1];
+ ExtraDat->logadr = memstick_logaddr(bbuf[2], bbuf[3]);
return USB_STOR_TRANSPORT_GOOD;
}
@@ -1339,8 +1347,9 @@
u8 PageNum, struct ms_lib_type_extdat *ExtraDat)
{
struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
+ struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
+ u8 *bbuf = info->bbuf;
int result;
- u8 ExtBuf[4];
/* printk("MS_LibReadExtra --- PhyBlock = %x, PageNum = %x\n", PhyBlock, PageNum); */
memset(bcb, 0, sizeof(struct bulk_cb_wrap));
@@ -1355,7 +1364,7 @@
bcb->CDB[2] = (unsigned char)(PhyBlock>>16);
bcb->CDB[6] = 0x01;
- result = ene_send_scsi_cmd(us, FDIR_READ, &ExtBuf, 0);
+ result = ene_send_scsi_cmd(us, FDIR_READ, bbuf, 0);
if (result != USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
@@ -1363,9 +1372,9 @@
ExtraDat->intr = 0x80; /* Not yet, waiting for fireware support */
ExtraDat->status0 = 0x10; /* Not yet, waiting for fireware support */
ExtraDat->status1 = 0x00; /* Not yet, waiting for fireware support */
- ExtraDat->ovrflg = ExtBuf[0];
- ExtraDat->mngflg = ExtBuf[1];
- ExtraDat->logadr = memstick_logaddr(ExtBuf[2], ExtBuf[3]);
+ ExtraDat->ovrflg = bbuf[0];
+ ExtraDat->mngflg = bbuf[1];
+ ExtraDat->logadr = memstick_logaddr(bbuf[2], bbuf[3]);
return USB_STOR_TRANSPORT_GOOD;
}
@@ -1569,9 +1578,9 @@
u16 PhyBlock, newblk, i;
u16 LogStart, LogEnde;
struct ms_lib_type_extdat extdat;
- u8 buf[0x200];
u32 count = 0, index = 0;
struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
+ u8 *bbuf = info->bbuf;
for (PhyBlock = 0; PhyBlock < info->MS_Lib.NumberOfPhyBlock;) {
ms_lib_phy_to_log_range(PhyBlock, &LogStart, &LogEnde);
@@ -1585,14 +1594,16 @@
}
if (count == PhyBlock) {
- ms_lib_read_extrablock(us, PhyBlock, 0, 0x80, &buf);
+ ms_lib_read_extrablock(us, PhyBlock, 0, 0x80,
+ bbuf);
count += 0x80;
}
index = (PhyBlock % 0x80) * 4;
- extdat.ovrflg = buf[index];
- extdat.mngflg = buf[index+1];
- extdat.logadr = memstick_logaddr(buf[index+2], buf[index+3]);
+ extdat.ovrflg = bbuf[index];
+ extdat.mngflg = bbuf[index+1];
+ extdat.logadr = memstick_logaddr(bbuf[index+2],
+ bbuf[index+3]);
if ((extdat.ovrflg & MS_REG_OVR_BKST) != MS_REG_OVR_BKST_OK) {
ms_lib_setacquired_errorblock(us, PhyBlock);
@@ -2075,9 +2086,9 @@
{
struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
int result;
- u8 buf[0x200];
u16 MSP_BlockSize, MSP_UserAreaBlocks;
struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
+ u8 *bbuf = info->bbuf;
printk(KERN_INFO "transport --- ENE_MSInit\n");
@@ -2096,13 +2107,13 @@
bcb->CDB[0] = 0xF1;
bcb->CDB[1] = 0x01;
- result = ene_send_scsi_cmd(us, FDIR_READ, &buf, 0);
+ result = ene_send_scsi_cmd(us, FDIR_READ, bbuf, 0);
if (result != USB_STOR_XFER_GOOD) {
printk(KERN_ERR "Execution MS Init Code Fail !!\n");
return USB_STOR_TRANSPORT_ERROR;
}
/* the same part to test ENE */
- info->MS_Status = *(struct MS_STATUS *)&buf[0];
+ info->MS_Status = *(struct MS_STATUS *) bbuf;
if (info->MS_Status.Insert && info->MS_Status.Ready) {
printk(KERN_INFO "Insert = %x\n", info->MS_Status.Insert);
@@ -2111,15 +2122,15 @@
printk(KERN_INFO "IsMSPHG = %x\n", info->MS_Status.IsMSPHG);
printk(KERN_INFO "WtP= %x\n", info->MS_Status.WtP);
if (info->MS_Status.IsMSPro) {
- MSP_BlockSize = (buf[6] << 8) | buf[7];
- MSP_UserAreaBlocks = (buf[10] << 8) | buf[11];
+ MSP_BlockSize = (bbuf[6] << 8) | bbuf[7];
+ MSP_UserAreaBlocks = (bbuf[10] << 8) | bbuf[11];
info->MSP_TotalBlock = MSP_BlockSize * MSP_UserAreaBlocks;
} else {
ms_card_init(us); /* Card is MS (to ms.c)*/
}
usb_stor_dbg(us, "MS Init Code OK !!\n");
} else {
- usb_stor_dbg(us, "MS Card Not Ready --- %x\n", buf[0]);
+ usb_stor_dbg(us, "MS Card Not Ready --- %x\n", bbuf[0]);
return USB_STOR_TRANSPORT_ERROR;
}
@@ -2129,9 +2140,9 @@
static int ene_sd_init(struct us_data *us)
{
int result;
- u8 buf[0x200];
struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
struct ene_ub6250_info *info = (struct ene_ub6250_info *) us->extra;
+ u8 *bbuf = info->bbuf;
usb_stor_dbg(us, "transport --- ENE_SDInit\n");
/* SD Init Part-1 */
@@ -2165,17 +2176,17 @@
bcb->Flags = US_BULK_FLAG_IN;
bcb->CDB[0] = 0xF1;
- result = ene_send_scsi_cmd(us, FDIR_READ, &buf, 0);
+ result = ene_send_scsi_cmd(us, FDIR_READ, bbuf, 0);
if (result != USB_STOR_XFER_GOOD) {
usb_stor_dbg(us, "Execution SD Init Code Fail !!\n");
return USB_STOR_TRANSPORT_ERROR;
}
- info->SD_Status = *(struct SD_STATUS *)&buf[0];
+ info->SD_Status = *(struct SD_STATUS *) bbuf;
if (info->SD_Status.Insert && info->SD_Status.Ready) {
struct SD_STATUS *s = &info->SD_Status;
- ene_get_card_status(us, (unsigned char *)&buf);
+ ene_get_card_status(us, bbuf);
usb_stor_dbg(us, "Insert = %x\n", s->Insert);
usb_stor_dbg(us, "Ready = %x\n", s->Ready);
usb_stor_dbg(us, "IsMMC = %x\n", s->IsMMC);
@@ -2183,7 +2194,7 @@
usb_stor_dbg(us, "HiSpeed = %x\n", s->HiSpeed);
usb_stor_dbg(us, "WtP = %x\n", s->WtP);
} else {
- usb_stor_dbg(us, "SD Card Not Ready --- %x\n", buf[0]);
+ usb_stor_dbg(us, "SD Card Not Ready --- %x\n", bbuf[0]);
return USB_STOR_TRANSPORT_ERROR;
}
return USB_STOR_TRANSPORT_GOOD;
@@ -2193,13 +2204,15 @@
static int ene_init(struct us_data *us)
{
int result;
- u8 misc_reg03 = 0;
+ u8 misc_reg03;
struct ene_ub6250_info *info = (struct ene_ub6250_info *)(us->extra);
+ u8 *bbuf = info->bbuf;
- result = ene_get_card_type(us, REG_CARD_STATUS, &misc_reg03);
+ result = ene_get_card_type(us, REG_CARD_STATUS, bbuf);
if (result != USB_STOR_XFER_GOOD)
return USB_STOR_TRANSPORT_ERROR;
+ misc_reg03 = bbuf[0];
if (misc_reg03 & 0x01) {
if (!info->SD_Status.Ready) {
result = ene_sd_init(us);
@@ -2316,8 +2329,9 @@
const struct usb_device_id *id)
{
int result;
- u8 misc_reg03 = 0;
+ u8 misc_reg03;
struct us_data *us;
+ struct ene_ub6250_info *info;
result = usb_stor_probe1(&us, intf, id,
(id - ene_ub6250_usb_ids) + ene_ub6250_unusual_dev_list,
@@ -2326,11 +2340,16 @@
return result;
/* FIXME: where should the code alloc extra buf ? */
- if (!us->extra) {
- us->extra = kzalloc(sizeof(struct ene_ub6250_info), GFP_KERNEL);
- if (!us->extra)
- return -ENOMEM;
- us->extra_destructor = ene_ub6250_info_destructor;
+ us->extra = kzalloc(sizeof(struct ene_ub6250_info), GFP_KERNEL);
+ if (!us->extra)
+ return -ENOMEM;
+ us->extra_destructor = ene_ub6250_info_destructor;
+
+ info = (struct ene_ub6250_info *)(us->extra);
+ info->bbuf = kmalloc(512, GFP_KERNEL);
+ if (!info->bbuf) {
+ kfree(us->extra);
+ return -ENOMEM;
}
us->transport_name = "ene_ub6250";
@@ -2342,12 +2361,13 @@
return result;
/* probe card type */
- result = ene_get_card_type(us, REG_CARD_STATUS, &misc_reg03);
+ result = ene_get_card_type(us, REG_CARD_STATUS, info->bbuf);
if (result != USB_STOR_XFER_GOOD) {
usb_stor_disconnect(intf);
return USB_STOR_TRANSPORT_ERROR;
}
+ misc_reg03 = info->bbuf[0];
if (!(misc_reg03 & 0x01)) {
pr_info("ums_eneub6250: This driver only supports SD/MS cards. "
"It does not support SM cards.\n");
diff --git a/drivers/usb/storage/isd200.c b/drivers/usb/storage/isd200.c
index 39afd70..7bb5f8d 100644
--- a/drivers/usb/storage/isd200.c
+++ b/drivers/usb/storage/isd200.c
@@ -1520,8 +1520,11 @@
/* Make sure driver was initialized */
- if (us->extra == NULL)
+ if (us->extra == NULL) {
usb_stor_dbg(us, "ERROR Driver not initialized\n");
+ srb->result = DID_ERROR << 16;
+ return;
+ }
scsi_set_resid(srb, 0);
/* scsi_bufflen might change in protocol translation to ata */
diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
index 53341a7..a37ed1e 100644
--- a/drivers/usb/storage/unusual_uas.h
+++ b/drivers/usb/storage/unusual_uas.h
@@ -123,9 +123,9 @@
/* Reported-by: Benjamin Tissoires <benjamin.tissoires@redhat.com> */
UNUSUAL_DEV(0x13fd, 0x3940, 0x0000, 0x9999,
"Initio Corporation",
- "",
+ "INIC-3069",
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
- US_FL_NO_ATA_1X),
+ US_FL_NO_ATA_1X | US_FL_IGNORE_RESIDUE),
/* Reported-by: Tom Arild Naess <tanaess@gmail.com> */
UNUSUAL_DEV(0x152d, 0x0539, 0x0000, 0x9999,
diff --git a/drivers/usb/typec/tcpm.c b/drivers/usb/typec/tcpm.c
index 3c0f7fd..80471e3 100644
--- a/drivers/usb/typec/tcpm.c
+++ b/drivers/usb/typec/tcpm.c
@@ -1372,6 +1372,7 @@
tcpm_set_current_limit(port,
port->current_limit,
port->supply_voltage);
+ port->explicit_contract = true;
tcpm_set_state(port, SNK_READY, 0);
} else {
/*
@@ -2134,9 +2135,23 @@
}
}
+enum typec_pwr_opmode tcpm_get_pwr_opmode(enum typec_cc_status cc)
+{
+ switch (cc) {
+ case TYPEC_CC_RP_1_5:
+ return TYPEC_PWR_MODE_1_5A;
+ case TYPEC_CC_RP_3_0:
+ return TYPEC_PWR_MODE_3_0A;
+ case TYPEC_CC_RP_DEF:
+ default:
+ return TYPEC_PWR_MODE_USB;
+ }
+}
+
static void run_state_machine(struct tcpm_port *port)
{
int ret;
+ enum typec_pwr_opmode opmode;
port->enter_state = port->state;
switch (port->state) {
@@ -2214,7 +2229,8 @@
ret < 0 ? 0 : PD_T_PS_SOURCE_ON);
break;
case SRC_STARTUP:
- typec_set_pwr_opmode(port->typec_port, TYPEC_PWR_MODE_USB);
+ opmode = tcpm_get_pwr_opmode(tcpm_rp_cc(port));
+ typec_set_pwr_opmode(port->typec_port, opmode);
port->pwr_opmode = TYPEC_PWR_MODE_USB;
port->caps_count = 0;
port->message_id = 0;
@@ -2374,7 +2390,9 @@
break;
case SNK_STARTUP:
/* XXX: callback into infrastructure */
- typec_set_pwr_opmode(port->typec_port, TYPEC_PWR_MODE_USB);
+ opmode = tcpm_get_pwr_opmode(port->polarity ?
+ port->cc2 : port->cc1);
+ typec_set_pwr_opmode(port->typec_port, opmode);
port->pwr_opmode = TYPEC_PWR_MODE_USB;
port->message_id = 0;
port->rx_msgid = -1;
@@ -2454,9 +2472,11 @@
break;
case SNK_READY:
port->try_snk_count = 0;
- port->explicit_contract = true;
- typec_set_pwr_opmode(port->typec_port, TYPEC_PWR_MODE_PD);
- port->pwr_opmode = TYPEC_PWR_MODE_PD;
+ if (port->explicit_contract) {
+ typec_set_pwr_opmode(port->typec_port,
+ TYPEC_PWR_MODE_PD);
+ port->pwr_opmode = TYPEC_PWR_MODE_PD;
+ }
tcpm_swap_complete(port, 0);
tcpm_typec_connect(port);
@@ -2966,6 +2986,7 @@
port->vbus_present = true;
switch (port->state) {
case SNK_TRANSITION_SINK_VBUS:
+ port->explicit_contract = true;
tcpm_set_state(port, SNK_READY, 0);
break;
case SNK_DISCOVERY:
diff --git a/drivers/usb/typec/typec.c b/drivers/usb/typec/typec.c
index da38ac7..def06b0 100644
--- a/drivers/usb/typec/typec.c
+++ b/drivers/usb/typec/typec.c
@@ -1207,6 +1207,11 @@
}
EXPORT_SYMBOL_GPL(typec_set_vconn_role);
+static int partner_match(struct device *dev, void *data)
+{
+ return is_typec_partner(dev);
+}
+
/**
* typec_set_pwr_opmode - Report changed power operation mode
* @port: The USB Type-C Port where the mode was changed
@@ -1220,12 +1225,26 @@
void typec_set_pwr_opmode(struct typec_port *port,
enum typec_pwr_opmode opmode)
{
+ struct device *partner_dev;
+
if (port->pwr_opmode == opmode)
return;
port->pwr_opmode = opmode;
sysfs_notify(&port->dev.kobj, NULL, "power_operation_mode");
kobject_uevent(&port->dev.kobj, KOBJ_CHANGE);
+
+ partner_dev = device_find_child(&port->dev, NULL, partner_match);
+ if (partner_dev) {
+ struct typec_partner *partner = to_typec_partner(partner_dev);
+
+ if (opmode == TYPEC_PWR_MODE_PD && !partner->usb_pd) {
+ partner->usb_pd = 1;
+ sysfs_notify(&partner_dev->kobj, NULL,
+ "supports_usb_power_delivery");
+ }
+ put_device(partner_dev);
+ }
}
EXPORT_SYMBOL_GPL(typec_set_pwr_opmode);
diff --git a/drivers/usb/usbip/stub_main.c b/drivers/usb/usbip/stub_main.c
index 44ab43f..af10f7b 100644
--- a/drivers/usb/usbip/stub_main.c
+++ b/drivers/usb/usbip/stub_main.c
@@ -262,7 +262,11 @@
kmem_cache_free(stub_priv_cache, priv);
kfree(urb->transfer_buffer);
+ urb->transfer_buffer = NULL;
+
kfree(urb->setup_packet);
+ urb->setup_packet = NULL;
+
usb_free_urb(urb);
}
}
diff --git a/drivers/usb/usbip/stub_tx.c b/drivers/usb/usbip/stub_tx.c
index dbcabc9..021003c 100644
--- a/drivers/usb/usbip/stub_tx.c
+++ b/drivers/usb/usbip/stub_tx.c
@@ -28,7 +28,11 @@
struct urb *urb = priv->urb;
kfree(urb->setup_packet);
+ urb->setup_packet = NULL;
+
kfree(urb->transfer_buffer);
+ urb->transfer_buffer = NULL;
+
list_del(&priv->list);
kmem_cache_free(stub_priv_cache, priv);
usb_free_urb(urb);
diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c
index 7fbe19d..81b2b9f 100644
--- a/drivers/usb/usbip/vhci_hcd.c
+++ b/drivers/usb/usbip/vhci_hcd.c
@@ -215,14 +215,19 @@
static inline void hub_descriptor(struct usb_hub_descriptor *desc)
{
+ int width;
+
memset(desc, 0, sizeof(*desc));
desc->bDescriptorType = USB_DT_HUB;
- desc->bDescLength = 9;
desc->wHubCharacteristics = cpu_to_le16(
HUB_CHAR_INDV_PORT_LPSM | HUB_CHAR_COMMON_OCPM);
+
desc->bNbrPorts = VHCI_NPORTS;
- desc->u.hs.DeviceRemovable[0] = 0xff;
- desc->u.hs.DeviceRemovable[1] = 0xff;
+ BUILD_BUG_ON(VHCI_NPORTS > USB_MAXCHILDREN);
+ width = desc->bNbrPorts / 8 + 1;
+ desc->bDescLength = USB_DT_HUB_NONVAR_SIZE + 2 * width;
+ memset(&desc->u.hs.DeviceRemovable[0], 0, width);
+ memset(&desc->u.hs.DeviceRemovable[width], 0xff, width);
}
static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
diff --git a/drivers/usb/wusbcore/wa-hc.c b/drivers/usb/wusbcore/wa-hc.c
index 252c7bd..d01496f 100644
--- a/drivers/usb/wusbcore/wa-hc.c
+++ b/drivers/usb/wusbcore/wa-hc.c
@@ -39,6 +39,9 @@
int result;
struct device *dev = &iface->dev;
+ if (iface->cur_altsetting->desc.bNumEndpoints < 3)
+ return -ENODEV;
+
result = wa_rpipes_create(wa);
if (result < 0)
goto error_rpipes_create;
diff --git a/drivers/uwb/hwa-rc.c b/drivers/uwb/hwa-rc.c
index 0257f35..e75bbe5 100644
--- a/drivers/uwb/hwa-rc.c
+++ b/drivers/uwb/hwa-rc.c
@@ -825,6 +825,9 @@
struct hwarc *hwarc;
struct device *dev = &iface->dev;
+ if (iface->cur_altsetting->desc.bNumEndpoints < 1)
+ return -ENODEV;
+
result = -ENOMEM;
uwb_rc = uwb_rc_alloc();
if (uwb_rc == NULL) {
diff --git a/drivers/uwb/i1480/dfu/usb.c b/drivers/uwb/i1480/dfu/usb.c
index 2bfc846..a50cf45 100644
--- a/drivers/uwb/i1480/dfu/usb.c
+++ b/drivers/uwb/i1480/dfu/usb.c
@@ -341,6 +341,7 @@
static
int i1480_usb_probe(struct usb_interface *iface, const struct usb_device_id *id)
{
+ struct usb_device *udev = interface_to_usbdev(iface);
struct i1480_usb *i1480_usb;
struct i1480 *i1480;
struct device *dev = &iface->dev;
@@ -352,8 +353,8 @@
iface->cur_altsetting->desc.bInterfaceNumber);
goto error;
}
- if (iface->num_altsetting > 1
- && interface_to_usbdev(iface)->descriptor.idProduct == 0xbabe) {
+ if (iface->num_altsetting > 1 &&
+ le16_to_cpu(udev->descriptor.idProduct) == 0xbabe) {
/* Need altsetting #1 [HW QUIRK] or EP1 won't work */
result = usb_set_interface(interface_to_usbdev(iface), 0, 1);
if (result < 0)
@@ -362,6 +363,9 @@
result);
}
+ if (iface->cur_altsetting->desc.bNumEndpoints < 1)
+ return -ENODEV;
+
result = -ENOMEM;
i1480_usb = kzalloc(sizeof(*i1480_usb), GFP_KERNEL);
if (i1480_usb == NULL) {
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index 9982cb1..b31b84f 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -562,8 +562,9 @@
} else if (cmd == VFIO_DEVICE_SET_IRQS) {
struct vfio_irq_set hdr;
+ size_t size;
u8 *data = NULL;
- int ret = 0;
+ int max, ret = 0;
minsz = offsetofend(struct vfio_irq_set, count);
@@ -571,23 +572,31 @@
return -EFAULT;
if (hdr.argsz < minsz || hdr.index >= VFIO_PCI_NUM_IRQS ||
+ hdr.count >= (U32_MAX - hdr.start) ||
hdr.flags & ~(VFIO_IRQ_SET_DATA_TYPE_MASK |
VFIO_IRQ_SET_ACTION_TYPE_MASK))
return -EINVAL;
- if (!(hdr.flags & VFIO_IRQ_SET_DATA_NONE)) {
- size_t size;
- int max = vfio_pci_get_irq_count(vdev, hdr.index);
+ max = vfio_pci_get_irq_count(vdev, hdr.index);
+ if (hdr.start >= max || hdr.start + hdr.count > max)
+ return -EINVAL;
- if (hdr.flags & VFIO_IRQ_SET_DATA_BOOL)
- size = sizeof(uint8_t);
- else if (hdr.flags & VFIO_IRQ_SET_DATA_EVENTFD)
- size = sizeof(int32_t);
- else
- return -EINVAL;
+ switch (hdr.flags & VFIO_IRQ_SET_DATA_TYPE_MASK) {
+ case VFIO_IRQ_SET_DATA_NONE:
+ size = 0;
+ break;
+ case VFIO_IRQ_SET_DATA_BOOL:
+ size = sizeof(uint8_t);
+ break;
+ case VFIO_IRQ_SET_DATA_EVENTFD:
+ size = sizeof(int32_t);
+ break;
+ default:
+ return -EINVAL;
+ }
- if (hdr.argsz - minsz < hdr.count * size ||
- hdr.start >= max || hdr.start + hdr.count > max)
+ if (size) {
+ if (hdr.argsz - minsz < hdr.count * size)
return -EINVAL;
data = memdup_user((void __user *)(arg + minsz),
@@ -893,6 +902,10 @@
return ret;
vdev->barmap[index] = pci_iomap(pdev, index, 0);
+ if (!vdev->barmap[index]) {
+ pci_release_selected_regions(pdev, 1 << index);
+ return -ENOMEM;
+ }
}
vma->vm_private_data = vdev;
diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c
index 20e9a86..5c8f767 100644
--- a/drivers/vfio/pci/vfio_pci_intrs.c
+++ b/drivers/vfio/pci/vfio_pci_intrs.c
@@ -255,7 +255,7 @@
if (!is_irq_none(vdev))
return -EINVAL;
- vdev->ctx = kzalloc(nvec * sizeof(struct vfio_pci_irq_ctx), GFP_KERNEL);
+ vdev->ctx = kcalloc(nvec, sizeof(struct vfio_pci_irq_ctx), GFP_KERNEL);
if (!vdev->ctx)
return -ENOMEM;
diff --git a/drivers/vfio/pci/vfio_pci_rdwr.c b/drivers/vfio/pci/vfio_pci_rdwr.c
index 210db24..4d39f79 100644
--- a/drivers/vfio/pci/vfio_pci_rdwr.c
+++ b/drivers/vfio/pci/vfio_pci_rdwr.c
@@ -190,7 +190,10 @@
if (!vdev->has_vga)
return -EINVAL;
- switch (pos) {
+ if (pos > 0xbfffful)
+ return -EINVAL;
+
+ switch ((u32)pos) {
case 0xa0000 ... 0xbffff:
count = min(count, (size_t)(0xc0000 - pos));
iomem = ioremap_nocache(0xa0000, 0xbffff - 0xa0000 + 1);
diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
index 6070b79..1e01e28 100644
--- a/drivers/vfio/vfio.c
+++ b/drivers/vfio/vfio.c
@@ -296,6 +296,34 @@
kref_put_mutex(&group->kref, vfio_group_release, &vfio.group_lock);
}
+struct vfio_group_put_work {
+ struct work_struct work;
+ struct vfio_group *group;
+};
+
+static void vfio_group_put_bg(struct work_struct *work)
+{
+ struct vfio_group_put_work *do_work;
+
+ do_work = container_of(work, struct vfio_group_put_work, work);
+
+ vfio_group_put(do_work->group);
+ kfree(do_work);
+}
+
+static void vfio_group_schedule_put(struct vfio_group *group)
+{
+ struct vfio_group_put_work *do_work;
+
+ do_work = kmalloc(sizeof(*do_work), GFP_KERNEL);
+ if (WARN_ON(!do_work))
+ return;
+
+ INIT_WORK(&do_work->work, vfio_group_put_bg);
+ do_work->group = group;
+ schedule_work(&do_work->work);
+}
+
/* Assume group_lock or group reference is held */
static void vfio_group_get(struct vfio_group *group)
{
@@ -620,7 +648,14 @@
break;
}
- vfio_group_put(group);
+ /*
+ * If we're the last reference to the group, the group will be
+ * released, which includes unregistering the iommu group notifier.
+ * We hold a read-lock on that notifier list, unregistering needs
+ * a write-lock... deadlock. Release our reference asynchronously
+ * to avoid that situation.
+ */
+ vfio_group_schedule_put(group);
return NOTIFY_OK;
}
@@ -1552,6 +1587,15 @@
}
EXPORT_SYMBOL_GPL(vfio_group_put_external_user);
+bool vfio_external_group_match_file(struct vfio_group *test_group,
+ struct file *filep)
+{
+ struct vfio_group *group = filep->private_data;
+
+ return (filep->f_op == &vfio_group_fops) && (group == test_group);
+}
+EXPORT_SYMBOL_GPL(vfio_external_group_match_file);
+
int vfio_external_user_iommu_id(struct vfio_group *group)
{
return iommu_group_id(group->iommu_group);
diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c
index 0582b72..34e4b3a 100644
--- a/drivers/vfio/vfio_iommu_spapr_tce.c
+++ b/drivers/vfio/vfio_iommu_spapr_tce.c
@@ -511,6 +511,12 @@
unsigned long hpa;
enum dma_data_direction dirtmp;
+ if (!tbl->it_userspace) {
+ ret = tce_iommu_userspace_view_alloc(tbl);
+ if (ret)
+ return ret;
+ }
+
for (i = 0; i < pages; ++i) {
struct mm_iommu_table_group_mem_t *mem = NULL;
unsigned long *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl,
@@ -584,15 +590,6 @@
WARN_ON(!ret && !(*ptbl)->it_ops->free);
WARN_ON(!ret && ((*ptbl)->it_allocated_size != table_size));
- if (!ret && container->v2) {
- ret = tce_iommu_userspace_view_alloc(*ptbl);
- if (ret)
- (*ptbl)->it_ops->free(*ptbl);
- }
-
- if (ret)
- decrement_locked_vm(table_size >> PAGE_SHIFT);
-
return ret;
}
@@ -1064,10 +1061,7 @@
if (!tbl || !tbl->it_map)
continue;
- rc = tce_iommu_userspace_view_alloc(tbl);
- if (!rc)
- rc = iommu_take_ownership(tbl);
-
+ rc = iommu_take_ownership(tbl);
if (rc) {
for (j = 0; j < i; ++j)
iommu_release_ownership(
@@ -1169,6 +1163,10 @@
/* pr_debug("tce_vfio: Attaching group #%u to iommu %p\n",
iommu_group_id(iommu_group), iommu_group); */
table_group = iommu_group_get_iommudata(iommu_group);
+ if (!table_group) {
+ ret = -ENODEV;
+ goto unlock_exit;
+ }
if (tce_groups_attached(container) && (!table_group->ops ||
!table_group->ops->take_ownership ||
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index ecb826e..2fa2806 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -130,57 +130,34 @@
rb_erase(&old->node, &iommu->dma_list);
}
-struct vwork {
- struct mm_struct *mm;
- long npage;
- struct work_struct work;
-};
-
-/* delayed decrement/increment for locked_vm */
-static void vfio_lock_acct_bg(struct work_struct *work)
+static int vfio_lock_acct(long npage, bool *lock_cap)
{
- struct vwork *vwork = container_of(work, struct vwork, work);
- struct mm_struct *mm;
+ int ret = 0;
- mm = vwork->mm;
- down_write(&mm->mmap_sem);
- mm->locked_vm += vwork->npage;
- up_write(&mm->mmap_sem);
- mmput(mm);
- kfree(vwork);
-}
+ if (!npage)
+ return 0;
-static void vfio_lock_acct(long npage)
-{
- struct vwork *vwork;
- struct mm_struct *mm;
+ if (!current->mm)
+ return -ESRCH; /* process exited */
- if (!current->mm || !npage)
- return; /* process exited or nothing to do */
+ down_write(¤t->mm->mmap_sem);
+ if (npage > 0) {
+ if (lock_cap ? !*lock_cap : !capable(CAP_IPC_LOCK)) {
+ unsigned long limit;
- if (down_write_trylock(¤t->mm->mmap_sem)) {
+ limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
+
+ if (current->mm->locked_vm + npage > limit)
+ ret = -ENOMEM;
+ }
+ }
+
+ if (!ret)
current->mm->locked_vm += npage;
- up_write(¤t->mm->mmap_sem);
- return;
- }
- /*
- * Couldn't get mmap_sem lock, so must setup to update
- * mm->locked_vm later. If locked_vm were atomic, we
- * wouldn't need this silliness
- */
- vwork = kmalloc(sizeof(struct vwork), GFP_KERNEL);
- if (!vwork)
- return;
- mm = get_task_mm(current);
- if (!mm) {
- kfree(vwork);
- return;
- }
- INIT_WORK(&vwork->work, vfio_lock_acct_bg);
- vwork->mm = mm;
- vwork->npage = npage;
- schedule_work(&vwork->work);
+ up_write(¤t->mm->mmap_sem);
+
+ return ret;
}
/*
@@ -262,9 +239,9 @@
static long vfio_pin_pages(unsigned long vaddr, long npage,
int prot, unsigned long *pfn_base)
{
- unsigned long limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
+ unsigned long pfn = 0, limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
bool lock_cap = capable(CAP_IPC_LOCK);
- long ret, i;
+ long ret, i = 1;
bool rsvd;
if (!current->mm)
@@ -283,16 +260,11 @@
return -ENOMEM;
}
- if (unlikely(disable_hugepages)) {
- if (!rsvd)
- vfio_lock_acct(1);
- return 1;
- }
+ if (unlikely(disable_hugepages))
+ goto out;
/* Lock all the consecutive pages from pfn_base */
- for (i = 1, vaddr += PAGE_SIZE; i < npage; i++, vaddr += PAGE_SIZE) {
- unsigned long pfn = 0;
-
+ for (vaddr += PAGE_SIZE; i < npage; i++, vaddr += PAGE_SIZE) {
ret = vaddr_get_pfn(vaddr, prot, &pfn);
if (ret)
break;
@@ -308,12 +280,24 @@
put_pfn(pfn, prot);
pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n",
__func__, limit << PAGE_SHIFT);
- break;
+ ret = -ENOMEM;
+ goto unpin_out;
}
}
+out:
if (!rsvd)
- vfio_lock_acct(i);
+ ret = vfio_lock_acct(i, &lock_cap);
+
+unpin_out:
+ if (ret) {
+ if (!rsvd) {
+ for (pfn = *pfn_base ; i ; pfn++, i--)
+ put_pfn(pfn, prot);
+ }
+
+ return ret;
+ }
return i;
}
@@ -328,7 +312,7 @@
unlocked += put_pfn(pfn++, prot);
if (do_accounting)
- vfio_lock_acct(-unlocked);
+ vfio_lock_acct(-unlocked, NULL);
return unlocked;
}
@@ -390,7 +374,7 @@
cond_resched();
}
- vfio_lock_acct(-unlocked);
+ vfio_lock_acct(-unlocked, NULL);
}
static void vfio_remove_dma(struct vfio_iommu *iommu, struct vfio_dma *dma)
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
index 6e92917..4e3c78d 100644
--- a/drivers/video/console/fbcon.c
+++ b/drivers/video/console/fbcon.c
@@ -1168,6 +1168,8 @@
p->userfont = 0;
}
+static void set_vc_hi_font(struct vc_data *vc, bool set);
+
static void fbcon_deinit(struct vc_data *vc)
{
struct display *p = &fb_display[vc->vc_num];
@@ -1203,6 +1205,9 @@
if (free_font)
vc->vc_font.data = NULL;
+ if (vc->vc_hi_font_mask)
+ set_vc_hi_font(vc, false);
+
if (!con_is_bound(&fb_con))
fbcon_exit();
@@ -2439,32 +2444,10 @@
return 0;
}
-static int fbcon_do_set_font(struct vc_data *vc, int w, int h,
- const u8 * data, int userfont)
+/* set/clear vc_hi_font_mask and update vc attrs accordingly */
+static void set_vc_hi_font(struct vc_data *vc, bool set)
{
- struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
- struct fbcon_ops *ops = info->fbcon_par;
- struct display *p = &fb_display[vc->vc_num];
- int resize;
- int cnt;
- char *old_data = NULL;
-
- if (CON_IS_VISIBLE(vc) && softback_lines)
- fbcon_set_origin(vc);
-
- resize = (w != vc->vc_font.width) || (h != vc->vc_font.height);
- if (p->userfont)
- old_data = vc->vc_font.data;
- if (userfont)
- cnt = FNTCHARCNT(data);
- else
- cnt = 256;
- vc->vc_font.data = (void *)(p->fontdata = data);
- if ((p->userfont = userfont))
- REFCOUNT(data)++;
- vc->vc_font.width = w;
- vc->vc_font.height = h;
- if (vc->vc_hi_font_mask && cnt == 256) {
+ if (!set) {
vc->vc_hi_font_mask = 0;
if (vc->vc_can_do_color) {
vc->vc_complement_mask >>= 1;
@@ -2487,7 +2470,7 @@
((c & 0xfe00) >> 1) | (c & 0xff);
vc->vc_attr >>= 1;
}
- } else if (!vc->vc_hi_font_mask && cnt == 512) {
+ } else {
vc->vc_hi_font_mask = 0x100;
if (vc->vc_can_do_color) {
vc->vc_complement_mask <<= 1;
@@ -2519,8 +2502,38 @@
} else
vc->vc_video_erase_char = c & ~0x100;
}
-
}
+}
+
+static int fbcon_do_set_font(struct vc_data *vc, int w, int h,
+ const u8 * data, int userfont)
+{
+ struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]];
+ struct fbcon_ops *ops = info->fbcon_par;
+ struct display *p = &fb_display[vc->vc_num];
+ int resize;
+ int cnt;
+ char *old_data = NULL;
+
+ if (CON_IS_VISIBLE(vc) && softback_lines)
+ fbcon_set_origin(vc);
+
+ resize = (w != vc->vc_font.width) || (h != vc->vc_font.height);
+ if (p->userfont)
+ old_data = vc->vc_font.data;
+ if (userfont)
+ cnt = FNTCHARCNT(data);
+ else
+ cnt = 256;
+ vc->vc_font.data = (void *)(p->fontdata = data);
+ if ((p->userfont = userfont))
+ REFCOUNT(data)++;
+ vc->vc_font.width = w;
+ vc->vc_font.height = h;
+ if (vc->vc_hi_font_mask && cnt == 256)
+ set_vc_hi_font(vc, false);
+ else if (!vc->vc_hi_font_mask && cnt == 512)
+ set_vc_hi_font(vc, true);
if (resize) {
int cols, rows;
diff --git a/drivers/video/fbdev/cobalt_lcdfb.c b/drivers/video/fbdev/cobalt_lcdfb.c
index 07675d6..d4530b5 100644
--- a/drivers/video/fbdev/cobalt_lcdfb.c
+++ b/drivers/video/fbdev/cobalt_lcdfb.c
@@ -350,6 +350,11 @@
info->screen_size = resource_size(res);
info->screen_base = devm_ioremap(&dev->dev, res->start,
info->screen_size);
+ if (!info->screen_base) {
+ framebuffer_release(info);
+ return -ENOMEM;
+ }
+
info->fbops = &cobalt_lcd_fbops;
info->fix = cobalt_lcdfb_fix;
info->fix.smem_start = res->start;
diff --git a/drivers/video/fbdev/msm/mdss_dsi_panel.c b/drivers/video/fbdev/msm/mdss_dsi_panel.c
index eeafd0e..8c4dd93e 100644
--- a/drivers/video/fbdev/msm/mdss_dsi_panel.c
+++ b/drivers/video/fbdev/msm/mdss_dsi_panel.c
@@ -1118,6 +1118,12 @@
pr_debug("%s: ctrl=%pK ndx=%d enable=%d\n", __func__, ctrl, ctrl->ndx,
enable);
+ if (enable && pinfo->persist_mode == MDSS_PANEL_LOW_PERSIST_MODE_ON) {
+ /* disable low persist mode in low power mode */
+ mdss_dsi_panel_apply_display_setting(pdata,
+ MDSS_PANEL_LOW_PERSIST_MODE_OFF);
+ }
+
if (pinfo->alpm_feature_enabled) {
enum alpm_mode_type mode;
@@ -1128,6 +1134,12 @@
}
}
+ if (!enable && pinfo->persist_mode == MDSS_PANEL_LOW_PERSIST_MODE_ON) {
+ /* reenable low persist when coming out of low power mode */
+ mdss_dsi_panel_apply_display_setting(pdata,
+ MDSS_PANEL_LOW_PERSIST_MODE_ON);
+ }
+
pr_debug("%s:-\n", __func__);
return 0;
}
diff --git a/drivers/video/fbdev/msm/mdss_fb.c b/drivers/video/fbdev/msm/mdss_fb.c
index 66ad4b2..e0b5ad5 100644
--- a/drivers/video/fbdev/msm/mdss_fb.c
+++ b/drivers/video/fbdev/msm/mdss_fb.c
@@ -875,7 +875,7 @@
}
mutex_lock(&mfd->mdss_sysfs_lock);
- if (mdss_panel_is_power_off(mfd->panel_power_state)) {
+ if (!mdss_panel_is_power_on_interactive(mfd->panel_power_state)) {
pinfo->persist_mode = persist_mode;
goto end;
}
diff --git a/drivers/video/fbdev/msm/mdss_mdp_intf_cmd.c b/drivers/video/fbdev/msm/mdss_mdp_intf_cmd.c
index 627334e..d73151b 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_intf_cmd.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_intf_cmd.c
@@ -3032,6 +3032,7 @@
struct mdss_mdp_cmd_ctx *ctx, *sctx = NULL;
struct mdss_data_type *mdata = mdss_mdp_get_mdata();
u32 clearbit = 0;
+ bool need_post_panel_on = false;
ctx = (struct mdss_mdp_cmd_ctx *) ctl->intf_ctx[MASTER_CTX];
if (!ctx) {
@@ -3068,8 +3069,10 @@
* turned on only when we send the first frame and not during cmd
* start. This is to ensure that no artifacts are seen on the panel.
*/
- if (__mdss_mdp_cmd_is_panel_power_off(ctx))
+ if (__mdss_mdp_cmd_is_panel_power_off(ctx)) {
mdss_mdp_cmd_panel_on(ctl, sctl);
+ need_post_panel_on = true;
+ }
ctx->current_pp_num = ctx->default_pp_num;
if (sctx)
@@ -3162,6 +3165,9 @@
if (ctx->autorefresh_state == MDP_AUTOREFRESH_ON)
mdss_mdp_cmd_wait4_autorefresh_done(ctl);
+ if (need_post_panel_on)
+ mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_POST_PANEL_ON, NULL,
+ CTL_INTF_EVENT_FLAG_DEFAULT);
mb();
mutex_unlock(&ctx->autorefresh_lock);
diff --git a/drivers/video/fbdev/xen-fbfront.c b/drivers/video/fbdev/xen-fbfront.c
index 0567d51..ea2f19f 100644
--- a/drivers/video/fbdev/xen-fbfront.c
+++ b/drivers/video/fbdev/xen-fbfront.c
@@ -644,7 +644,6 @@
break;
case XenbusStateInitWait:
-InitWait:
xenbus_switch_state(dev, XenbusStateConnected);
break;
@@ -655,7 +654,8 @@
* get Connected twice here.
*/
if (dev->state != XenbusStateConnected)
- goto InitWait; /* no InitWait seen yet, fudge it */
+ /* no InitWait seen yet, fudge it */
+ xenbus_switch_state(dev, XenbusStateConnected);
if (xenbus_scanf(XBT_NIL, info->xbdev->otherend,
"request-update", "%d", &val) < 0)
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 8959b32..84c6add 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -421,6 +421,8 @@
* Prime this virtqueue with one buffer so the hypervisor can
* use it to signal us later (it can't be broken yet!).
*/
+ update_balloon_stats(vb);
+
sg_init_one(&sg, vb->stats, sizeof vb->stats);
if (virtqueue_add_outbuf(vb->stats_vq, &sg, 1, vb, GFP_KERNEL)
< 0)
diff --git a/drivers/watchdog/bcm_kona_wdt.c b/drivers/watchdog/bcm_kona_wdt.c
index e0c9842..11a72bc 100644
--- a/drivers/watchdog/bcm_kona_wdt.c
+++ b/drivers/watchdog/bcm_kona_wdt.c
@@ -304,6 +304,8 @@
if (!wdt)
return -ENOMEM;
+ spin_lock_init(&wdt->lock);
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
wdt->base = devm_ioremap_resource(dev, res);
if (IS_ERR(wdt->base))
@@ -316,7 +318,6 @@
return ret;
}
- spin_lock_init(&wdt->lock);
platform_set_drvdata(pdev, wdt);
watchdog_set_drvdata(&bcm_kona_wdt_wdd, wdt);
bcm_kona_wdt_wdd.parent = &pdev->dev;
diff --git a/drivers/watchdog/pcwd_usb.c b/drivers/watchdog/pcwd_usb.c
index 1a11aed..9eb5b31 100644
--- a/drivers/watchdog/pcwd_usb.c
+++ b/drivers/watchdog/pcwd_usb.c
@@ -630,6 +630,9 @@
return -ENODEV;
}
+ if (iface_desc->desc.bNumEndpoints < 1)
+ return -ENODEV;
+
/* check out the endpoint: it has to be Interrupt & IN */
endpoint = &iface_desc->endpoint[0].desc;
diff --git a/drivers/xen/biomerge.c b/drivers/xen/biomerge.c
index 4da69db..1bdd02a6 100644
--- a/drivers/xen/biomerge.c
+++ b/drivers/xen/biomerge.c
@@ -10,8 +10,7 @@
unsigned long bfn1 = pfn_to_bfn(page_to_pfn(vec1->bv_page));
unsigned long bfn2 = pfn_to_bfn(page_to_pfn(vec2->bv_page));
- return __BIOVEC_PHYS_MERGEABLE(vec1, vec2) &&
- ((bfn1 == bfn2) || ((bfn1+1) == bfn2));
+ return bfn1 + PFN_DOWN(vec1->bv_offset + vec1->bv_len) == bfn2;
#else
/*
* XXX: Add support for merging bio_vec when using different page
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
index df2e6f7..527de56 100644
--- a/drivers/xen/privcmd.c
+++ b/drivers/xen/privcmd.c
@@ -335,8 +335,8 @@
st->global_error = 1;
}
}
- st->va += PAGE_SIZE * nr;
- st->index += nr;
+ st->va += XEN_PAGE_SIZE * nr;
+ st->index += nr / XEN_PFN_PER_PAGE;
return 0;
}
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 7399782..8a58bbc 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -409,9 +409,9 @@
if (map == SWIOTLB_MAP_ERROR)
return DMA_ERROR_CODE;
+ dev_addr = xen_phys_to_bus(map);
xen_dma_map_page(dev, pfn_to_page(map >> PAGE_SHIFT),
dev_addr, map & ~PAGE_MASK, size, dir, attrs);
- dev_addr = xen_phys_to_bus(map);
/*
* Ensure that the address returned is DMA'ble
@@ -567,13 +567,14 @@
sg_dma_len(sgl) = 0;
return 0;
}
+ dev_addr = xen_phys_to_bus(map);
xen_dma_map_page(hwdev, pfn_to_page(map >> PAGE_SHIFT),
dev_addr,
map & ~PAGE_MASK,
sg->length,
dir,
attrs);
- sg->dma_address = xen_phys_to_bus(map);
+ sg->dma_address = dev_addr;
} else {
/* we are not interested in the dma_addr returned by
* xen_dma_map_page, only in the potential cache flushes executed
diff --git a/drivers/xen/xen-acpi-processor.c b/drivers/xen/xen-acpi-processor.c
index 611f9c1..2e319d0 100644
--- a/drivers/xen/xen-acpi-processor.c
+++ b/drivers/xen/xen-acpi-processor.c
@@ -27,10 +27,10 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/types.h>
+#include <linux/syscore_ops.h>
#include <linux/acpi.h>
#include <acpi/processor.h>
#include <xen/xen.h>
-#include <xen/xen-ops.h>
#include <xen/interface/platform.h>
#include <asm/xen/hypercall.h>
@@ -466,15 +466,33 @@
return rc;
}
-static int xen_acpi_processor_resume(struct notifier_block *nb,
- unsigned long action, void *data)
+static void xen_acpi_processor_resume_worker(struct work_struct *dummy)
{
+ int rc;
+
bitmap_zero(acpi_ids_done, nr_acpi_bits);
- return xen_upload_processor_pm_data();
+
+ rc = xen_upload_processor_pm_data();
+ if (rc != 0)
+ pr_info("ACPI data upload failed, error = %d\n", rc);
}
-struct notifier_block xen_acpi_processor_resume_nb = {
- .notifier_call = xen_acpi_processor_resume,
+static void xen_acpi_processor_resume(void)
+{
+ static DECLARE_WORK(wq, xen_acpi_processor_resume_worker);
+
+ /*
+ * xen_upload_processor_pm_data() calls non-atomic code.
+ * However, the context for xen_acpi_processor_resume is syscore
+ * with only the boot CPU online and in an atomic context.
+ *
+ * So defer the upload for some point safer.
+ */
+ schedule_work(&wq);
+}
+
+static struct syscore_ops xap_syscore_ops = {
+ .resume = xen_acpi_processor_resume,
};
static int __init xen_acpi_processor_init(void)
@@ -527,7 +545,7 @@
if (rc)
goto err_unregister;
- xen_resume_notifier_register(&xen_acpi_processor_resume_nb);
+ register_syscore_ops(&xap_syscore_ops);
return 0;
err_unregister:
@@ -544,7 +562,7 @@
{
int i;
- xen_resume_notifier_unregister(&xen_acpi_processor_resume_nb);
+ unregister_syscore_ops(&xap_syscore_ops);
kfree(acpi_ids_done);
kfree(acpi_id_present);
kfree(acpi_id_cst_present);
diff --git a/fs/9p/acl.c b/fs/9p/acl.c
index 929b618..c30c6ce 100644
--- a/fs/9p/acl.c
+++ b/fs/9p/acl.c
@@ -283,6 +283,7 @@
case ACL_TYPE_ACCESS:
if (acl) {
struct iattr iattr;
+ struct posix_acl *old_acl = acl;
retval = posix_acl_update_mode(inode, &iattr.ia_mode, &acl);
if (retval)
@@ -293,6 +294,7 @@
* by the mode bits. So don't
* update ACL.
*/
+ posix_acl_release(old_acl);
value = NULL;
size = 0;
}
diff --git a/fs/autofs4/dev-ioctl.c b/fs/autofs4/dev-ioctl.c
index ac7d921..2574255 100644
--- a/fs/autofs4/dev-ioctl.c
+++ b/fs/autofs4/dev-ioctl.c
@@ -331,7 +331,7 @@
int status;
token = (autofs_wqt_t) param->fail.token;
- status = param->fail.status ? param->fail.status : -ENOENT;
+ status = param->fail.status < 0 ? param->fail.status : -ENOENT;
return autofs4_wait_release(sbi, token, status);
}
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index 0c52941..8a0243e 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -905,17 +905,60 @@
elf_flags = MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE;
vaddr = elf_ppnt->p_vaddr;
+ /*
+ * If we are loading ET_EXEC or we have already performed
+ * the ET_DYN load_addr calculations, proceed normally.
+ */
if (loc->elf_ex.e_type == ET_EXEC || load_addr_set) {
elf_flags |= MAP_FIXED;
} else if (loc->elf_ex.e_type == ET_DYN) {
- /* Try and get dynamic programs out of the way of the
- * default mmap base, as well as whatever program they
- * might try to exec. This is because the brk will
- * follow the loader, and is not movable. */
- load_bias = ELF_ET_DYN_BASE - vaddr;
- if (current->flags & PF_RANDOMIZE)
- load_bias += arch_mmap_rnd();
- load_bias = ELF_PAGESTART(load_bias);
+ /*
+ * This logic is run once for the first LOAD Program
+ * Header for ET_DYN binaries to calculate the
+ * randomization (load_bias) for all the LOAD
+ * Program Headers, and to calculate the entire
+ * size of the ELF mapping (total_size). (Note that
+ * load_addr_set is set to true later once the
+ * initial mapping is performed.)
+ *
+ * There are effectively two types of ET_DYN
+ * binaries: programs (i.e. PIE: ET_DYN with INTERP)
+ * and loaders (ET_DYN without INTERP, since they
+ * _are_ the ELF interpreter). The loaders must
+ * be loaded away from programs since the program
+ * may otherwise collide with the loader (especially
+ * for ET_EXEC which does not have a randomized
+ * position). For example to handle invocations of
+ * "./ld.so someprog" to test out a new version of
+ * the loader, the subsequent program that the
+ * loader loads must avoid the loader itself, so
+ * they cannot share the same load range. Sufficient
+ * room for the brk must be allocated with the
+ * loader as well, since brk must be available with
+ * the loader.
+ *
+ * Therefore, programs are loaded offset from
+ * ELF_ET_DYN_BASE and loaders are loaded into the
+ * independently randomized mmap region (0 load_bias
+ * without MAP_FIXED).
+ */
+ if (elf_interpreter) {
+ load_bias = ELF_ET_DYN_BASE;
+ if (current->flags & PF_RANDOMIZE)
+ load_bias += arch_mmap_rnd();
+ elf_flags |= MAP_FIXED;
+ } else
+ load_bias = 0;
+
+ /*
+ * Since load_bias is used for all subsequent loading
+ * calculations, we must lower it by the first vaddr
+ * so that the remaining calculations based on the
+ * ELF vaddrs will be correctly offset. The result
+ * is then page aligned.
+ */
+ load_bias = ELF_PAGESTART(load_bias - vaddr);
+
total_size = total_mapping_size(elf_phdata,
loc->elf_ex.e_phnum);
if (!total_size) {
@@ -2295,6 +2338,7 @@
goto end_coredump;
}
}
+ dump_truncate(cprm);
if (!elf_core_write_extra_data(cprm))
goto end_coredump;
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 198aea66..26bbaae 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -88,12 +88,11 @@
{
struct address_space *mapping = bdev->bd_inode->i_mapping;
- if (mapping->nrpages == 0)
- return;
-
- invalidate_bh_lrus();
- lru_add_drain_all(); /* make sure all lru add caches are flushed */
- invalidate_mapping_pages(mapping, 0, -1);
+ if (mapping->nrpages) {
+ invalidate_bh_lrus();
+ lru_add_drain_all(); /* make sure all lru add caches are flushed */
+ invalidate_mapping_pages(mapping, 0, -1);
+ }
/* 99% of the time, we don't need to flush the cleancache on the bdev.
* But, for the strange corners, lets be cautious
*/
@@ -1098,7 +1097,6 @@
if (disk->fops->revalidate_disk)
ret = disk->fops->revalidate_disk(disk);
- blk_integrity_revalidate(disk);
bdev = bdget_disk(disk, 0);
if (!bdev)
return ret;
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 2a2e370..c36a03f 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -3854,6 +3854,7 @@
info->space_info_kobj, "%s",
alloc_name(found->flags));
if (ret) {
+ percpu_counter_destroy(&found->total_bytes_pinned);
kfree(found);
return ret;
}
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 353f4ba..d4a6eef 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -2771,7 +2771,7 @@
if (!ret)
ret = btrfs_prealloc_file_range(inode, mode,
range->start,
- range->len, 1 << inode->i_blkbits,
+ range->len, i_blocksize(inode),
offset + len, &alloc_hint);
list_del(&range->list);
kfree(range);
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 3cff652..bebd651 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -4397,8 +4397,19 @@
if (found_type > min_type) {
del_item = 1;
} else {
- if (item_end < new_size)
+ if (item_end < new_size) {
+ /*
+ * With NO_HOLES mode, for the following mapping
+ *
+ * [0-4k][hole][8k-12k]
+ *
+ * if truncating isize down to 6k, it ends up
+ * isize being 8k.
+ */
+ if (btrfs_fs_incompat(root->fs_info, NO_HOLES))
+ last_size = new_size;
break;
+ }
if (found_key.offset >= new_size)
del_item = 1;
else
@@ -7318,8 +7329,8 @@
int found = false;
void **pagep = NULL;
struct page *page = NULL;
- int start_idx;
- int end_idx;
+ unsigned long start_idx;
+ unsigned long end_idx;
start_idx = start >> PAGE_CACHE_SHIFT;
@@ -7510,11 +7521,18 @@
* within our reservation, otherwise we need to adjust our inode
* counter appropriately.
*/
- if (dio_data->outstanding_extents) {
+ if (dio_data->outstanding_extents >= num_extents) {
dio_data->outstanding_extents -= num_extents;
} else {
+ /*
+ * If dio write length has been split due to no large enough
+ * contiguous space, we need to compensate our inode counter
+ * appropriately.
+ */
+ u64 num_needed = num_extents - dio_data->outstanding_extents;
+
spin_lock(&BTRFS_I(inode)->lock);
- BTRFS_I(inode)->outstanding_extents += num_extents;
+ BTRFS_I(inode)->outstanding_extents += num_needed;
spin_unlock(&BTRFS_I(inode)->lock);
}
}
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 5d34a06..3bd2233 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -1727,6 +1727,8 @@
goto restore;
}
+ btrfs_qgroup_rescan_resume(fs_info);
+
if (!fs_info->uuid_root) {
btrfs_info(fs_info, "creating UUID tree");
ret = btrfs_create_uuid_tree(fs_info);
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 9c62a6f..600c67e 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -108,7 +108,7 @@
},
};
-const u64 const btrfs_raid_group[BTRFS_NR_RAID_TYPES] = {
+const u64 btrfs_raid_group[BTRFS_NR_RAID_TYPES] = {
[BTRFS_RAID_RAID10] = BTRFS_BLOCK_GROUP_RAID10,
[BTRFS_RAID_RAID1] = BTRFS_BLOCK_GROUP_RAID1,
[BTRFS_RAID_DUP] = BTRFS_BLOCK_GROUP_DUP,
diff --git a/fs/buffer.c b/fs/buffer.c
index fc35dd2..14ce7b2 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -2347,7 +2347,7 @@
loff_t pos, loff_t *bytes)
{
struct inode *inode = mapping->host;
- unsigned blocksize = 1 << inode->i_blkbits;
+ unsigned int blocksize = i_blocksize(inode);
struct page *page;
void *fsdata;
pgoff_t index, curidx;
@@ -2427,8 +2427,8 @@
get_block_t *get_block, loff_t *bytes)
{
struct inode *inode = mapping->host;
- unsigned blocksize = 1 << inode->i_blkbits;
- unsigned zerofrom;
+ unsigned int blocksize = i_blocksize(inode);
+ unsigned int zerofrom;
int err;
err = cont_expand_zero(file, mapping, pos, bytes);
@@ -2790,7 +2790,7 @@
struct buffer_head map_bh;
int err;
- blocksize = 1 << inode->i_blkbits;
+ blocksize = i_blocksize(inode);
length = offset & (blocksize - 1);
/* Block boundary? Nothing to do */
@@ -2868,7 +2868,7 @@
struct buffer_head *bh;
int err;
- blocksize = 1 << inode->i_blkbits;
+ blocksize = i_blocksize(inode);
length = offset & (blocksize - 1);
/* Block boundary? Nothing to do */
@@ -2980,7 +2980,7 @@
struct inode *inode = mapping->host;
tmp.b_state = 0;
tmp.b_blocknr = 0;
- tmp.b_size = 1 << inode->i_blkbits;
+ tmp.b_size = i_blocksize(inode);
get_block(inode, block, &tmp, 0);
return tmp.b_blocknr;
}
diff --git a/fs/ceph/acl.c b/fs/ceph/acl.c
index 4d8caeb..bdb9c94 100644
--- a/fs/ceph/acl.c
+++ b/fs/ceph/acl.c
@@ -128,7 +128,7 @@
if (new_mode != old_mode) {
newattrs.ia_mode = new_mode;
newattrs.ia_valid = ATTR_MODE;
- ret = ceph_setattr(dentry, &newattrs);
+ ret = __ceph_setattr(dentry, &newattrs);
if (ret)
goto out_dput;
}
@@ -138,7 +138,7 @@
if (new_mode != old_mode) {
newattrs.ia_mode = old_mode;
newattrs.ia_valid = ATTR_MODE;
- ceph_setattr(dentry, &newattrs);
+ __ceph_setattr(dentry, &newattrs);
}
goto out_dput;
}
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index b7d218a..22bae2b 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -189,7 +189,7 @@
/*
* read a single page, without unlocking it.
*/
-static int readpage_nounlock(struct file *filp, struct page *page)
+static int ceph_do_readpage(struct file *filp, struct page *page)
{
struct inode *inode = file_inode(filp);
struct ceph_inode_info *ci = ceph_inode(inode);
@@ -219,7 +219,7 @@
err = ceph_readpage_from_fscache(inode, page);
if (err == 0)
- goto out;
+ return -EINPROGRESS;
dout("readpage inode %p file %p page %p index %lu\n",
inode, filp, page, page->index);
@@ -249,8 +249,11 @@
static int ceph_readpage(struct file *filp, struct page *page)
{
- int r = readpage_nounlock(filp, page);
- unlock_page(page);
+ int r = ceph_do_readpage(filp, page);
+ if (r != -EINPROGRESS)
+ unlock_page(page);
+ else
+ r = 0;
return r;
}
@@ -697,7 +700,7 @@
struct pagevec pvec;
int done = 0;
int rc = 0;
- unsigned wsize = 1 << inode->i_blkbits;
+ unsigned int wsize = i_blocksize(inode);
struct ceph_osd_request *req = NULL;
int do_sync = 0;
loff_t snap_size, i_size;
@@ -1094,7 +1097,7 @@
goto retry_locked;
r = writepage_nounlock(page, NULL);
if (r < 0)
- goto fail_nosnap;
+ goto fail_unlock;
goto retry_locked;
}
@@ -1122,11 +1125,14 @@
}
/* we need to read it. */
- r = readpage_nounlock(file, page);
- if (r < 0)
- goto fail_nosnap;
+ r = ceph_do_readpage(file, page);
+ if (r < 0) {
+ if (r == -EINPROGRESS)
+ return -EAGAIN;
+ goto fail_unlock;
+ }
goto retry_locked;
-fail_nosnap:
+fail_unlock:
unlock_page(page);
return r;
}
diff --git a/fs/ceph/cache.c b/fs/ceph/cache.c
index a4766de..ff1cfd7 100644
--- a/fs/ceph/cache.c
+++ b/fs/ceph/cache.c
@@ -224,13 +224,7 @@
fscache_relinquish_cookie(cookie, 0);
}
-static void ceph_vfs_readpage_complete(struct page *page, void *data, int error)
-{
- if (!error)
- SetPageUptodate(page);
-}
-
-static void ceph_vfs_readpage_complete_unlock(struct page *page, void *data, int error)
+static void ceph_readpage_from_fscache_complete(struct page *page, void *data, int error)
{
if (!error)
SetPageUptodate(page);
@@ -259,7 +253,7 @@
return -ENOBUFS;
ret = fscache_read_or_alloc_page(ci->fscache, page,
- ceph_vfs_readpage_complete, NULL,
+ ceph_readpage_from_fscache_complete, NULL,
GFP_KERNEL);
switch (ret) {
@@ -288,7 +282,7 @@
return -ENOBUFS;
ret = fscache_read_or_alloc_pages(ci->fscache, mapping, pages, nr_pages,
- ceph_vfs_readpage_complete_unlock,
+ ceph_readpage_from_fscache_complete,
NULL, mapping_gfp_mask(mapping));
switch (ret) {
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index 9314b4e..be7d187 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -247,6 +247,11 @@
if (ret < 0)
err = ret;
dput(last);
+ /* last_name no longer match cache index */
+ if (fi->readdir_cache_idx >= 0) {
+ fi->readdir_cache_idx = -1;
+ fi->dir_release_count = 0;
+ }
}
return err;
}
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index d98536c..9f0d990 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -1773,7 +1773,7 @@
/*
* setattr
*/
-int ceph_setattr(struct dentry *dentry, struct iattr *attr)
+int __ceph_setattr(struct dentry *dentry, struct iattr *attr)
{
struct inode *inode = d_inode(dentry);
struct ceph_inode_info *ci = ceph_inode(inode);
@@ -1975,11 +1975,6 @@
if (inode_dirty_flags)
__mark_inode_dirty(inode, inode_dirty_flags);
- if (ia_valid & ATTR_MODE) {
- err = posix_acl_chmod(inode, attr->ia_mode);
- if (err)
- goto out_put;
- }
if (mask) {
req->r_inode = inode;
@@ -1993,13 +1988,23 @@
ceph_cap_string(dirtied), mask);
ceph_mdsc_put_request(req);
- if (mask & CEPH_SETATTR_SIZE)
+ ceph_free_cap_flush(prealloc_cf);
+
+ if (err >= 0 && (mask & CEPH_SETATTR_SIZE))
__ceph_do_pending_vmtruncate(inode);
- ceph_free_cap_flush(prealloc_cf);
+
return err;
-out_put:
- ceph_mdsc_put_request(req);
- ceph_free_cap_flush(prealloc_cf);
+}
+
+int ceph_setattr(struct dentry *dentry, struct iattr *attr)
+{
+ int err;
+
+ err = __ceph_setattr(dentry, attr);
+
+ if (err >= 0 && (attr->ia_valid & ATTR_MODE))
+ err = posix_acl_chmod(d_inode(dentry), attr->ia_mode);
+
return err;
}
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index 75b7d12..8c8cb8f 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -788,6 +788,7 @@
return __ceph_do_getattr(inode, NULL, mask, force);
}
extern int ceph_permission(struct inode *inode, int mask);
+extern int __ceph_setattr(struct dentry *dentry, struct iattr *attr);
extern int ceph_setattr(struct dentry *dentry, struct iattr *attr);
extern int ceph_getattr(struct vfsmount *mnt, struct dentry *dentry,
struct kstat *stat);
diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c
index 819163d..b24275e 100644
--- a/fs/ceph/xattr.c
+++ b/fs/ceph/xattr.c
@@ -369,6 +369,7 @@
if (update_xattr) {
int err = 0;
+
if (xattr && (flags & XATTR_CREATE))
err = -EEXIST;
else if (!xattr && (flags & XATTR_REPLACE))
@@ -376,12 +377,14 @@
if (err) {
kfree(name);
kfree(val);
+ kfree(*newxattr);
return err;
}
if (update_xattr < 0) {
if (xattr)
__remove_xattr(ci, xattr);
kfree(name);
+ kfree(*newxattr);
return 0;
}
}
diff --git a/fs/cifs/cifs_unicode.c b/fs/cifs/cifs_unicode.c
index 02b071bf..a0b3e7d 100644
--- a/fs/cifs/cifs_unicode.c
+++ b/fs/cifs/cifs_unicode.c
@@ -83,6 +83,9 @@
case SFM_COLON:
*target = ':';
break;
+ case SFM_DOUBLEQUOTE:
+ *target = '"';
+ break;
case SFM_ASTERISK:
*target = '*';
break;
@@ -418,6 +421,9 @@
case ':':
dest_char = cpu_to_le16(SFM_COLON);
break;
+ case '"':
+ dest_char = cpu_to_le16(SFM_DOUBLEQUOTE);
+ break;
case '*':
dest_char = cpu_to_le16(SFM_ASTERISK);
break;
diff --git a/fs/cifs/cifs_unicode.h b/fs/cifs/cifs_unicode.h
index 479bc0a..07ade70 100644
--- a/fs/cifs/cifs_unicode.h
+++ b/fs/cifs/cifs_unicode.h
@@ -57,6 +57,7 @@
* not conflict (although almost does) with the mapping above.
*/
+#define SFM_DOUBLEQUOTE ((__u16) 0xF020)
#define SFM_ASTERISK ((__u16) 0xF021)
#define SFM_QUESTION ((__u16) 0xF025)
#define SFM_COLON ((__u16) 0xF022)
@@ -64,8 +65,8 @@
#define SFM_LESSTHAN ((__u16) 0xF023)
#define SFM_PIPE ((__u16) 0xF027)
#define SFM_SLASH ((__u16) 0xF026)
-#define SFM_PERIOD ((__u16) 0xF028)
-#define SFM_SPACE ((__u16) 0xF029)
+#define SFM_SPACE ((__u16) 0xF028)
+#define SFM_PERIOD ((__u16) 0xF029)
/*
* Mapping mechanism to use when one of the seven reserved characters is
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index b768836..e2f6a79 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -227,6 +227,7 @@
/* verify the message */
int (*check_message)(char *, unsigned int);
bool (*is_oplock_break)(char *, struct TCP_Server_Info *);
+ int (*handle_cancelled_mid)(char *, struct TCP_Server_Info *);
void (*downgrade_oplock)(struct TCP_Server_Info *,
struct cifsInodeInfo *, bool);
/* process transaction2 response */
@@ -906,7 +907,6 @@
bool use_persistent:1; /* use persistent instead of durable handles */
#ifdef CONFIG_CIFS_SMB2
bool print:1; /* set if connection to printer share */
- bool bad_network_name:1; /* set if ret status STATUS_BAD_NETWORK_NAME */
__le32 capabilities;
__u32 share_flags;
__u32 maximal_access;
@@ -1290,12 +1290,19 @@
void *callback_data; /* general purpose pointer for callback */
void *resp_buf; /* pointer to received SMB header */
int mid_state; /* wish this were enum but can not pass to wait_event */
+ unsigned int mid_flags;
__le16 command; /* smb command code */
bool large_buf:1; /* if valid response, is pointer to large buf */
bool multiRsp:1; /* multiple trans2 responses for one request */
bool multiEnd:1; /* both received */
};
+struct close_cancelled_open {
+ struct cifs_fid fid;
+ struct cifs_tcon *tcon;
+ struct work_struct work;
+};
+
/* Make code in transport.c a little cleaner by moving
update of optional stats into function below */
#ifdef CONFIG_CIFS_STATS2
@@ -1427,6 +1434,9 @@
#define MID_RESPONSE_MALFORMED 0x10
#define MID_SHUTDOWN 0x20
+/* Flags */
+#define MID_WAIT_CANCELLED 1 /* Cancelled while waiting for response */
+
/* Types of response buffer returned from SendReceive2 */
#define CIFS_NO_BUFFER 0 /* Response buffer not returned */
#define CIFS_SMALL_BUFFER 1
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index b1104ed..b60150e 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -717,6 +717,9 @@
if (rc)
return rc;
+ if (server->capabilities & CAP_UNICODE)
+ smb->hdr.Flags2 |= SMBFLG2_UNICODE;
+
/* set up echo request */
smb->hdr.Tid = 0xffff;
smb->hdr.WordCount = 1;
@@ -1424,6 +1427,8 @@
length = discard_remaining_data(server);
dequeue_mid(mid, rdata->result);
+ mid->resp_buf = server->smallbuf;
+ server->smallbuf = NULL;
return length;
}
@@ -1538,6 +1543,8 @@
return cifs_readv_discard(server, mid);
dequeue_mid(mid, false);
+ mid->resp_buf = server->smallbuf;
+ server->smallbuf = NULL;
return length;
}
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 5d59f25..53a827c 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -412,6 +412,9 @@
}
} while (server->tcpStatus == CifsNeedReconnect);
+ if (server->tcpStatus == CifsNeedNegotiate)
+ mod_delayed_work(cifsiod_wq, &server->echo, 0);
+
return rc;
}
@@ -421,18 +424,27 @@
int rc;
struct TCP_Server_Info *server = container_of(work,
struct TCP_Server_Info, echo.work);
+ unsigned long echo_interval;
/*
- * We cannot send an echo if it is disabled or until the
- * NEGOTIATE_PROTOCOL request is done, which is indicated by
- * server->ops->need_neg() == true. Also, no need to ping if
- * we got a response recently.
+ * If we need to renegotiate, set echo interval to zero to
+ * immediately call echo service where we can renegotiate.
+ */
+ if (server->tcpStatus == CifsNeedNegotiate)
+ echo_interval = 0;
+ else
+ echo_interval = SMB_ECHO_INTERVAL;
+
+ /*
+ * We cannot send an echo if it is disabled.
+ * Also, no need to ping if we got a response recently.
*/
if (server->tcpStatus == CifsNeedReconnect ||
- server->tcpStatus == CifsExiting || server->tcpStatus == CifsNew ||
+ server->tcpStatus == CifsExiting ||
+ server->tcpStatus == CifsNew ||
(server->ops->can_echo && !server->ops->can_echo(server)) ||
- time_before(jiffies, server->lstrp + SMB_ECHO_INTERVAL - HZ))
+ time_before(jiffies, server->lstrp + echo_interval - HZ))
goto requeue_echo;
rc = server->ops->echo ? server->ops->echo(server) : -ENOSYS;
@@ -924,10 +936,19 @@
server->lstrp = jiffies;
if (mid_entry != NULL) {
+ if ((mid_entry->mid_flags & MID_WAIT_CANCELLED) &&
+ mid_entry->mid_state == MID_RESPONSE_RECEIVED &&
+ server->ops->handle_cancelled_mid)
+ server->ops->handle_cancelled_mid(
+ mid_entry->resp_buf,
+ server);
+
if (!mid_entry->multiRsp || mid_entry->multiEnd)
mid_entry->callback(mid_entry);
- } else if (!server->ops->is_oplock_break ||
- !server->ops->is_oplock_break(buf, server)) {
+ } else if (server->ops->is_oplock_break &&
+ server->ops->is_oplock_break(buf, server)) {
+ cifs_dbg(FYI, "Received oplock break\n");
+ } else {
cifs_dbg(VFS, "No task to wake, unknown frame received! NumMids %d\n",
atomic_read(&midCount));
cifs_dump_mem("Received Data is: ", buf,
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index 26a3b38..297e05c9 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -183,15 +183,20 @@
}
/*
+ * Don't allow path components longer than the server max.
* Don't allow the separator character in a path component.
* The VFS will not allow "/", but "\" is allowed by posix.
*/
static int
-check_name(struct dentry *direntry)
+check_name(struct dentry *direntry, struct cifs_tcon *tcon)
{
struct cifs_sb_info *cifs_sb = CIFS_SB(direntry->d_sb);
int i;
+ if (unlikely(direntry->d_name.len >
+ le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength)))
+ return -ENAMETOOLONG;
+
if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)) {
for (i = 0; i < direntry->d_name.len; i++) {
if (direntry->d_name.name[i] == '\\') {
@@ -489,10 +494,6 @@
return finish_no_open(file, res);
}
- rc = check_name(direntry);
- if (rc)
- return rc;
-
xid = get_xid();
cifs_dbg(FYI, "parent inode = 0x%p name is: %pd and dentry = 0x%p\n",
@@ -505,6 +506,11 @@
}
tcon = tlink_tcon(tlink);
+
+ rc = check_name(direntry, tcon);
+ if (rc)
+ goto out_free_xid;
+
server = tcon->ses->server;
if (server->ops->new_lease_key)
@@ -765,7 +771,7 @@
}
pTcon = tlink_tcon(tlink);
- rc = check_name(direntry);
+ rc = check_name(direntry, pTcon);
if (rc)
goto lookup_out;
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 72f270d..a0c0a49 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -2545,7 +2545,7 @@
wdata->credits = credits;
if (!wdata->cfile->invalidHandle ||
- !cifs_reopen_file(wdata->cfile, false))
+ !(rc = cifs_reopen_file(wdata->cfile, false)))
rc = server->ops->async_writev(wdata,
cifs_uncached_writedata_release);
if (rc) {
@@ -2958,7 +2958,7 @@
rdata->credits = credits;
if (!rdata->cfile->invalidHandle ||
- !cifs_reopen_file(rdata->cfile, true))
+ !(rc = cifs_reopen_file(rdata->cfile, true)))
rc = server->ops->async_readv(rdata);
error:
if (rc) {
@@ -3544,7 +3544,7 @@
}
if (!rdata->cfile->invalidHandle ||
- !cifs_reopen_file(rdata->cfile, true))
+ !(rc = cifs_reopen_file(rdata->cfile, true)))
rc = server->ops->async_readv(rdata);
if (rc) {
add_credits_and_wake_if(server, rdata->credits, 0);
diff --git a/fs/cifs/ioctl.c b/fs/cifs/ioctl.c
index 35cf990..a8f5b31 100644
--- a/fs/cifs/ioctl.c
+++ b/fs/cifs/ioctl.c
@@ -272,6 +272,8 @@
rc = -EOPNOTSUPP;
break;
case CIFS_IOC_GET_MNT_INFO:
+ if (pSMBFile == NULL)
+ break;
tcon = tlink_tcon(pSMBFile->tlink);
rc = smb_mnt_get_fsinfo(xid, tcon, (void __user *)arg);
break;
diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
index fc537c2..efd72e1 100644
--- a/fs/cifs/smb1ops.c
+++ b/fs/cifs/smb1ops.c
@@ -849,8 +849,13 @@
struct cifs_fid *fid, __u16 search_flags,
struct cifs_search_info *srch_inf)
{
- return CIFSFindFirst(xid, tcon, path, cifs_sb,
- &fid->netfid, search_flags, srch_inf, true);
+ int rc;
+
+ rc = CIFSFindFirst(xid, tcon, path, cifs_sb,
+ &fid->netfid, search_flags, srch_inf, true);
+ if (rc)
+ cifs_dbg(FYI, "find first failed=%d\n", rc);
+ return rc;
}
static int
@@ -1015,6 +1020,15 @@
return !cfile->srch_inf.endOfSearch && !cfile->invalidHandle;
}
+static bool
+cifs_can_echo(struct TCP_Server_Info *server)
+{
+ if (server->tcpStatus == CifsGood)
+ return true;
+
+ return false;
+}
+
struct smb_version_operations smb1_operations = {
.send_cancel = send_nt_cancel,
.compare_fids = cifs_compare_fids,
@@ -1049,6 +1063,7 @@
.get_dfs_refer = CIFSGetDFSRefer,
.qfs_tcon = cifs_qfs_tcon,
.is_path_accessible = cifs_is_path_accessible,
+ .can_echo = cifs_can_echo,
.query_path_info = cifs_query_path_info,
.query_file_info = cifs_query_file_info,
.get_srv_inum = cifs_get_srv_inum,
diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c
index e5bc85e..76ccf20 100644
--- a/fs/cifs/smb2misc.c
+++ b/fs/cifs/smb2misc.c
@@ -630,3 +630,47 @@
cifs_dbg(FYI, "Can not process oplock break for non-existent connection\n");
return false;
}
+
+void
+smb2_cancelled_close_fid(struct work_struct *work)
+{
+ struct close_cancelled_open *cancelled = container_of(work,
+ struct close_cancelled_open, work);
+
+ cifs_dbg(VFS, "Close unmatched open\n");
+
+ SMB2_close(0, cancelled->tcon, cancelled->fid.persistent_fid,
+ cancelled->fid.volatile_fid);
+ cifs_put_tcon(cancelled->tcon);
+ kfree(cancelled);
+}
+
+int
+smb2_handle_cancelled_mid(char *buffer, struct TCP_Server_Info *server)
+{
+ struct smb2_hdr *hdr = (struct smb2_hdr *)buffer;
+ struct smb2_create_rsp *rsp = (struct smb2_create_rsp *)buffer;
+ struct cifs_tcon *tcon;
+ struct close_cancelled_open *cancelled;
+
+ if (hdr->Command != SMB2_CREATE || hdr->Status != STATUS_SUCCESS)
+ return 0;
+
+ cancelled = kzalloc(sizeof(*cancelled), GFP_KERNEL);
+ if (!cancelled)
+ return -ENOMEM;
+
+ tcon = smb2_find_smb_tcon(server, hdr->SessionId, hdr->TreeId);
+ if (!tcon) {
+ kfree(cancelled);
+ return -ENOENT;
+ }
+
+ cancelled->fid.persistent_fid = rsp->PersistentFileId;
+ cancelled->fid.volatile_fid = rsp->VolatileFileId;
+ cancelled->tcon = tcon;
+ INIT_WORK(&cancelled->work, smb2_cancelled_close_fid);
+ queue_work(cifsiod_wq, &cancelled->work);
+
+ return 0;
+}
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index be34b48..1d125d3 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -909,7 +909,7 @@
rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, NULL);
kfree(utf16_path);
if (rc) {
- cifs_dbg(VFS, "open dir failed\n");
+ cifs_dbg(FYI, "open dir failed rc=%d\n", rc);
return rc;
}
@@ -919,7 +919,7 @@
rc = SMB2_query_directory(xid, tcon, fid->persistent_fid,
fid->volatile_fid, 0, srch_inf);
if (rc) {
- cifs_dbg(VFS, "query directory failed\n");
+ cifs_dbg(FYI, "query directory failed rc=%d\n", rc);
SMB2_close(xid, tcon, fid->persistent_fid, fid->volatile_fid);
}
return rc;
@@ -1511,6 +1511,7 @@
.clear_stats = smb2_clear_stats,
.print_stats = smb2_print_stats,
.is_oplock_break = smb2_is_valid_oplock_break,
+ .handle_cancelled_mid = smb2_handle_cancelled_mid,
.downgrade_oplock = smb2_downgrade_oplock,
.need_neg = smb2_need_neg,
.negotiate = smb2_negotiate,
@@ -1589,6 +1590,7 @@
.clear_stats = smb2_clear_stats,
.print_stats = smb2_print_stats,
.is_oplock_break = smb2_is_valid_oplock_break,
+ .handle_cancelled_mid = smb2_handle_cancelled_mid,
.downgrade_oplock = smb2_downgrade_oplock,
.need_neg = smb2_need_neg,
.negotiate = smb2_negotiate,
@@ -1670,6 +1672,7 @@
.print_stats = smb2_print_stats,
.dump_share_caps = smb2_dump_share_caps,
.is_oplock_break = smb2_is_valid_oplock_break,
+ .handle_cancelled_mid = smb2_handle_cancelled_mid,
.downgrade_oplock = smb2_downgrade_oplock,
.need_neg = smb2_need_neg,
.negotiate = smb2_negotiate,
@@ -1757,6 +1760,7 @@
.print_stats = smb2_print_stats,
.dump_share_caps = smb2_dump_share_caps,
.is_oplock_break = smb2_is_valid_oplock_break,
+ .handle_cancelled_mid = smb2_handle_cancelled_mid,
.downgrade_oplock = smb2_downgrade_oplock,
.need_neg = smb2_need_neg,
.negotiate = smb2_negotiate,
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index 2fa754c..6c484dd 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -564,8 +564,12 @@
}
if (rsplen != sizeof(struct validate_negotiate_info_rsp)) {
- cifs_dbg(VFS, "invalid size of protocol negotiate response\n");
- return -EIO;
+ cifs_dbg(VFS, "invalid protocol negotiate response size: %d\n",
+ rsplen);
+
+ /* relax check since Mac returns max bufsize allowed on ioctl */
+ if (rsplen > CIFSMaxBufSize)
+ return -EIO;
}
/* check validate negotiate info response matches what we got earlier */
@@ -932,9 +936,6 @@
else
return -EIO;
- if (tcon && tcon->bad_network_name)
- return -ENOENT;
-
if ((tcon && tcon->seal) &&
((ses->server->capabilities & SMB2_GLOBAL_CAP_ENCRYPTION) == 0)) {
cifs_dbg(VFS, "encryption requested but no server support");
@@ -952,6 +953,10 @@
return -EINVAL;
}
+ /* SMB2 TREE_CONNECT request must be called with TreeId == 0 */
+ if (tcon)
+ tcon->tid = 0;
+
rc = small_smb2_init(SMB2_TREE_CONNECT, tcon, (void **) &req);
if (rc) {
kfree(unc_path);
@@ -1032,8 +1037,6 @@
tcon_error_exit:
if (rsp->hdr.Status == STATUS_BAD_NETWORK_NAME) {
cifs_dbg(VFS, "BAD_NETWORK_NAME: %s\n", tree);
- if (tcon)
- tcon->bad_network_name = true;
}
goto tcon_exit;
}
@@ -1519,8 +1522,12 @@
* than one credit. Windows typically sets this smaller, but for some
* ioctls it may be useful to allow server to send more. No point
* limiting what the server can send as long as fits in one credit
+ * Unfortunately - we can not handle more than CIFS_MAX_MSG_SIZE
+ * (by default, note that it can be overridden to make max larger)
+ * in responses (except for read responses which can be bigger.
+ * We may want to bump this limit up
*/
- req->MaxOutputResponse = cpu_to_le32(0xFF00); /* < 64K uses 1 credit */
+ req->MaxOutputResponse = cpu_to_le32(CIFSMaxBufSize);
if (is_fsctl)
req->Flags = cpu_to_le32(SMB2_0_IOCTL_IS_FSCTL);
@@ -2761,8 +2768,8 @@
kst->f_bsize = le32_to_cpu(pfs_inf->BytesPerSector) *
le32_to_cpu(pfs_inf->SectorsPerAllocationUnit);
kst->f_blocks = le64_to_cpu(pfs_inf->TotalAllocationUnits);
- kst->f_bfree = le64_to_cpu(pfs_inf->ActualAvailableAllocationUnits);
- kst->f_bavail = le64_to_cpu(pfs_inf->CallerAvailableAllocationUnits);
+ kst->f_bfree = kst->f_bavail =
+ le64_to_cpu(pfs_inf->CallerAvailableAllocationUnits);
return;
}
diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h
index b8f553b..aacb15b 100644
--- a/fs/cifs/smb2pdu.h
+++ b/fs/cifs/smb2pdu.h
@@ -82,8 +82,8 @@
#define NUMBER_OF_SMB2_COMMANDS 0x0013
-/* BB FIXME - analyze following length BB */
-#define MAX_SMB2_HDR_SIZE 0x78 /* 4 len + 64 hdr + (2*24 wct) + 2 bct + 2 pad */
+/* 4 len + 52 transform hdr + 64 hdr + 56 create rsp */
+#define MAX_SMB2_HDR_SIZE 0x00b0
#define SMB2_PROTO_NUMBER cpu_to_le32(0x424d53fe)
diff --git a/fs/cifs/smb2proto.h b/fs/cifs/smb2proto.h
index 0a406ae7..adc5234 100644
--- a/fs/cifs/smb2proto.h
+++ b/fs/cifs/smb2proto.h
@@ -47,6 +47,10 @@
struct smb_rqst *rqst);
extern struct mid_q_entry *smb2_setup_async_request(
struct TCP_Server_Info *server, struct smb_rqst *rqst);
+extern struct cifs_ses *smb2_find_smb_ses(struct TCP_Server_Info *server,
+ __u64 ses_id);
+extern struct cifs_tcon *smb2_find_smb_tcon(struct TCP_Server_Info *server,
+ __u64 ses_id, __u32 tid);
extern int smb2_calc_signature(struct smb_rqst *rqst,
struct TCP_Server_Info *server);
extern int smb3_calc_signature(struct smb_rqst *rqst,
@@ -157,6 +161,9 @@
extern int SMB2_oplock_break(const unsigned int xid, struct cifs_tcon *tcon,
const u64 persistent_fid, const u64 volatile_fid,
const __u8 oplock_level);
+extern int smb2_handle_cancelled_mid(char *buffer,
+ struct TCP_Server_Info *server);
+void smb2_cancelled_close_fid(struct work_struct *work);
extern int SMB2_QFS_info(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_file_id, u64 volatile_file_id,
struct kstatfs *FSData);
diff --git a/fs/cifs/smb2transport.c b/fs/cifs/smb2transport.c
index d4c5b6f..69e3b32 100644
--- a/fs/cifs/smb2transport.c
+++ b/fs/cifs/smb2transport.c
@@ -115,22 +115,68 @@
}
static struct cifs_ses *
-smb2_find_smb_ses(struct smb2_hdr *smb2hdr, struct TCP_Server_Info *server)
+smb2_find_smb_ses_unlocked(struct TCP_Server_Info *server, __u64 ses_id)
{
struct cifs_ses *ses;
- spin_lock(&cifs_tcp_ses_lock);
list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
- if (ses->Suid != smb2hdr->SessionId)
+ if (ses->Suid != ses_id)
continue;
- spin_unlock(&cifs_tcp_ses_lock);
return ses;
}
- spin_unlock(&cifs_tcp_ses_lock);
return NULL;
}
+struct cifs_ses *
+smb2_find_smb_ses(struct TCP_Server_Info *server, __u64 ses_id)
+{
+ struct cifs_ses *ses;
+
+ spin_lock(&cifs_tcp_ses_lock);
+ ses = smb2_find_smb_ses_unlocked(server, ses_id);
+ spin_unlock(&cifs_tcp_ses_lock);
+
+ return ses;
+}
+
+static struct cifs_tcon *
+smb2_find_smb_sess_tcon_unlocked(struct cifs_ses *ses, __u32 tid)
+{
+ struct cifs_tcon *tcon;
+
+ list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
+ if (tcon->tid != tid)
+ continue;
+ ++tcon->tc_count;
+ return tcon;
+ }
+
+ return NULL;
+}
+
+/*
+ * Obtain tcon corresponding to the tid in the given
+ * cifs_ses
+ */
+
+struct cifs_tcon *
+smb2_find_smb_tcon(struct TCP_Server_Info *server, __u64 ses_id, __u32 tid)
+{
+ struct cifs_ses *ses;
+ struct cifs_tcon *tcon;
+
+ spin_lock(&cifs_tcp_ses_lock);
+ ses = smb2_find_smb_ses_unlocked(server, ses_id);
+ if (!ses) {
+ spin_unlock(&cifs_tcp_ses_lock);
+ return NULL;
+ }
+ tcon = smb2_find_smb_sess_tcon_unlocked(ses, tid);
+ spin_unlock(&cifs_tcp_ses_lock);
+
+ return tcon;
+}
int
smb2_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server)
@@ -143,7 +189,7 @@
struct smb2_hdr *smb2_pdu = (struct smb2_hdr *)iov[0].iov_base;
struct cifs_ses *ses;
- ses = smb2_find_smb_ses(smb2_pdu, server);
+ ses = smb2_find_smb_ses(server, smb2_pdu->SessionId);
if (!ses) {
cifs_dbg(VFS, "%s: Could not find session\n", __func__);
return 0;
@@ -314,7 +360,7 @@
struct smb2_hdr *smb2_pdu = (struct smb2_hdr *)iov[0].iov_base;
struct cifs_ses *ses;
- ses = smb2_find_smb_ses(smb2_pdu, server);
+ ses = smb2_find_smb_ses(server, smb2_pdu->SessionId);
if (!ses) {
cifs_dbg(VFS, "%s: Could not find session\n", __func__);
return 0;
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index 87abe8e..54af102 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -786,9 +786,11 @@
rc = wait_for_response(ses->server, midQ);
if (rc != 0) {
+ cifs_dbg(FYI, "Cancelling wait for mid %llu\n", midQ->mid);
send_cancel(ses->server, buf, midQ);
spin_lock(&GlobalMid_Lock);
if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
+ midQ->mid_flags |= MID_WAIT_CANCELLED;
midQ->callback = DeleteMidQEntry;
spin_unlock(&GlobalMid_Lock);
cifs_small_buf_release(buf);
diff --git a/fs/configfs/symlink.c b/fs/configfs/symlink.c
index ec5c832..0525ebc 100644
--- a/fs/configfs/symlink.c
+++ b/fs/configfs/symlink.c
@@ -83,14 +83,13 @@
ret = -ENOMEM;
sl = kmalloc(sizeof(struct configfs_symlink), GFP_KERNEL);
if (sl) {
- sl->sl_target = config_item_get(item);
spin_lock(&configfs_dirent_lock);
if (target_sd->s_type & CONFIGFS_USET_DROPPING) {
spin_unlock(&configfs_dirent_lock);
- config_item_put(item);
kfree(sl);
return -ENOENT;
}
+ sl->sl_target = config_item_get(item);
list_add(&sl->sl_list, &target_sd->s_links);
spin_unlock(&configfs_dirent_lock);
ret = configfs_create_link(sl, parent_item->ci_dentry,
diff --git a/fs/coredump.c b/fs/coredump.c
index fe0a28d..2ce5ef4 100644
--- a/fs/coredump.c
+++ b/fs/coredump.c
@@ -810,3 +810,21 @@
return mod ? dump_skip(cprm, align - mod) : 1;
}
EXPORT_SYMBOL(dump_align);
+
+/*
+ * Ensures that file size is big enough to contain the current file
+ * postion. This prevents gdb from complaining about a truncated file
+ * if the last "write" to the file was dump_skip.
+ */
+void dump_truncate(struct coredump_params *cprm)
+{
+ struct file *file = cprm->file;
+ loff_t offset;
+
+ if (file->f_op->llseek && file->f_op->llseek != no_llseek) {
+ offset = file->f_op->llseek(file, 0, SEEK_CUR);
+ if (i_size_read(file->f_mapping->host) < offset)
+ do_truncate(file->f_path.dentry, offset, 0, file);
+ }
+}
+EXPORT_SYMBOL(dump_truncate);
diff --git a/fs/dcache.c b/fs/dcache.c
index bdf37d8..5bf7b4a 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -1155,11 +1155,12 @@
LIST_HEAD(dispose);
freed = list_lru_walk(&sb->s_dentry_lru,
- dentry_lru_isolate_shrink, &dispose, UINT_MAX);
+ dentry_lru_isolate_shrink, &dispose, 1024);
this_cpu_sub(nr_dentry_unused, freed);
shrink_dentry_list(&dispose);
- } while (freed > 0);
+ cond_resched();
+ } while (list_lru_count(&sb->s_dentry_lru) > 0);
}
EXPORT_SYMBOL(shrink_dcache_sb);
diff --git a/fs/direct-io.c b/fs/direct-io.c
index 0f1517d..7cdf8ad 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -589,7 +589,7 @@
/*
* Call into the fs to map some more disk blocks. We record the current number
* of available blocks at sdio->blocks_available. These are in units of the
- * fs blocksize, (1 << inode->i_blkbits).
+ * fs blocksize, i_blocksize(inode).
*
* The fs is allowed to map lots of blocks at once. If it wants to do that,
* it uses the passed inode-relative block number as the file offset, as usual.
diff --git a/fs/dlm/user.c b/fs/dlm/user.c
index 173b387..e40c440 100644
--- a/fs/dlm/user.c
+++ b/fs/dlm/user.c
@@ -355,6 +355,10 @@
error = misc_register(&ls->ls_device);
if (error) {
kfree(ls->ls_device.name);
+ /* this has to be set to NULL
+ * to avoid a double-free in dlm_device_deregister
+ */
+ ls->ls_device.name = NULL;
}
fail:
return error;
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index e73aae8..e870c0e 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -519,8 +519,13 @@
wait_queue_head_t *whead;
rcu_read_lock();
- /* If it is cleared by POLLFREE, it should be rcu-safe */
- whead = rcu_dereference(pwq->whead);
+ /*
+ * If it is cleared by POLLFREE, it should be rcu-safe.
+ * If we read NULL we need a barrier paired with
+ * smp_store_release() in ep_poll_callback(), otherwise
+ * we rely on whead->lock.
+ */
+ whead = smp_load_acquire(&pwq->whead);
if (whead)
remove_wait_queue(whead, &pwq->wait);
rcu_read_unlock();
@@ -1004,17 +1009,6 @@
struct epitem *epi = ep_item_from_wait(wait);
struct eventpoll *ep = epi->ep;
- if ((unsigned long)key & POLLFREE) {
- ep_pwq_from_wait(wait)->whead = NULL;
- /*
- * whead = NULL above can race with ep_remove_wait_queue()
- * which can do another remove_wait_queue() after us, so we
- * can't use __remove_wait_queue(). whead->lock is held by
- * the caller.
- */
- list_del_init(&wait->task_list);
- }
-
spin_lock_irqsave(&ep->lock, flags);
/*
@@ -1079,6 +1073,23 @@
if (pwake)
ep_poll_safewake(&ep->poll_wait);
+
+ if ((unsigned long)key & POLLFREE) {
+ /*
+ * If we race with ep_remove_wait_queue() it can miss
+ * ->whead = NULL and do another remove_wait_queue() after
+ * us, so we can't use __remove_wait_queue().
+ */
+ list_del_init(&wait->task_list);
+ /*
+ * ->whead != NULL protects us from the race with ep_free()
+ * or ep_remove(), ep_remove_wait_queue() takes whead->lock
+ * held by the caller. Once we nullify it, nothing protects
+ * ep/epi or even wait.
+ */
+ smp_store_release(&ep_pwq_from_wait(wait)->whead, NULL);
+ }
+
return 1;
}
diff --git a/fs/exec.c b/fs/exec.c
index 8c58183..dd3a594 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -206,7 +206,24 @@
if (write) {
unsigned long size = bprm->vma->vm_end - bprm->vma->vm_start;
- struct rlimit *rlim;
+ unsigned long ptr_size, limit;
+
+ /*
+ * Since the stack will hold pointers to the strings, we
+ * must account for them as well.
+ *
+ * The size calculation is the entire vma while each arg page is
+ * built, so each time we get here it's calculating how far it
+ * is currently (rather than each call being just the newly
+ * added size from the arg page). As a result, we need to
+ * always add the entire size of the pointers, so that on the
+ * last call to get_arg_page() we'll actually have the entire
+ * correct size.
+ */
+ ptr_size = (bprm->argc + bprm->envc) * sizeof(void *);
+ if (ptr_size > ULONG_MAX - size)
+ goto fail;
+ size += ptr_size;
acct_arg_size(bprm, size / PAGE_SIZE);
@@ -218,20 +235,24 @@
return page;
/*
- * Limit to 1/4-th the stack size for the argv+env strings.
+ * Limit to 1/4 of the max stack size or 3/4 of _STK_LIM
+ * (whichever is smaller) for the argv+env strings.
* This ensures that:
* - the remaining binfmt code will not run out of stack space,
* - the program will have a reasonable amount of stack left
* to work from.
*/
- rlim = current->signal->rlim;
- if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur) / 4) {
- put_page(page);
- return NULL;
- }
+ limit = _STK_LIM / 4 * 3;
+ limit = min(limit, rlimit(RLIMIT_STACK) / 4);
+ if (size > limit)
+ goto fail;
}
return page;
+
+fail:
+ put_page(page);
+ return NULL;
}
static void put_arg_page(struct page *page)
diff --git a/fs/ext4/crypto.c b/fs/ext4/crypto.c
index e14b1b8..b9f838a 100644
--- a/fs/ext4/crypto.c
+++ b/fs/ext4/crypto.c
@@ -271,7 +271,7 @@
struct crypto_ablkcipher *tfm = ci->ci_ctfm;
int res = 0;
- req = ablkcipher_request_alloc(tfm, GFP_NOFS);
+ req = ablkcipher_request_alloc(tfm, gfp_flags);
if (!req) {
printk_ratelimited(KERN_ERR
"%s: crypto_request_alloc() failed\n",
diff --git a/fs/ext4/crypto_fname.c b/fs/ext4/crypto_fname.c
index e2645ca..026716b 100644
--- a/fs/ext4/crypto_fname.c
+++ b/fs/ext4/crypto_fname.c
@@ -344,7 +344,7 @@
memcpy(buf+4, &hinfo->minor_hash, 4);
} else
memset(buf, 0, 8);
- memcpy(buf + 8, iname->name + iname->len - 16, 16);
+ memcpy(buf + 8, iname->name + ((iname->len - 17) & ~15), 16);
oname->name[0] = '_';
ret = digest_encode(buf, 24, oname->name+1);
oname->len = ret + 1;
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 8a456f9..61d5bfc 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -4902,6 +4902,8 @@
/* Zero out partial block at the edges of the range */
ret = ext4_zero_partial_blocks(handle, inode, offset, len);
+ if (ret >= 0)
+ ext4_update_inode_fsync_trans(handle, inode, 1);
if (file->f_flags & O_SYNC)
ext4_handle_sync(handle);
@@ -5597,6 +5599,7 @@
ext4_handle_sync(handle);
inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
ext4_mark_inode_dirty(handle, inode);
+ ext4_update_inode_fsync_trans(handle, inode, 1);
out_stop:
ext4_journal_stop(handle);
@@ -5770,6 +5773,8 @@
up_write(&EXT4_I(inode)->i_data_sem);
if (IS_SYNC(inode))
ext4_handle_sync(handle);
+ if (ret >= 0)
+ ext4_update_inode_fsync_trans(handle, inode, 1);
out_stop:
ext4_journal_stop(handle);
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index 0d24ebc..45ef997 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -463,47 +463,27 @@
num = min_t(pgoff_t, end - index, PAGEVEC_SIZE);
nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index,
(pgoff_t)num);
- if (nr_pages == 0) {
- if (whence == SEEK_DATA)
- break;
-
- BUG_ON(whence != SEEK_HOLE);
- /*
- * If this is the first time to go into the loop and
- * offset is not beyond the end offset, it will be a
- * hole at this offset
- */
- if (lastoff == startoff || lastoff < endoff)
- found = 1;
+ if (nr_pages == 0)
break;
- }
-
- /*
- * If this is the first time to go into the loop and
- * offset is smaller than the first page offset, it will be a
- * hole at this offset.
- */
- if (lastoff == startoff && whence == SEEK_HOLE &&
- lastoff < page_offset(pvec.pages[0])) {
- found = 1;
- break;
- }
for (i = 0; i < nr_pages; i++) {
struct page *page = pvec.pages[i];
struct buffer_head *bh, *head;
/*
- * If the current offset is not beyond the end of given
- * range, it will be a hole.
+ * If current offset is smaller than the page offset,
+ * there is a hole at this offset.
*/
- if (lastoff < endoff && whence == SEEK_HOLE &&
- page->index > end) {
+ if (whence == SEEK_HOLE && lastoff < endoff &&
+ lastoff < page_offset(pvec.pages[i])) {
found = 1;
*offset = lastoff;
goto out;
}
+ if (page->index > end)
+ goto out;
+
lock_page(page);
if (unlikely(page->mapping != inode->i_mapping)) {
@@ -520,6 +500,8 @@
lastoff = page_offset(page);
bh = head = page_buffers(page);
do {
+ if (lastoff + bh->b_size <= startoff)
+ goto next;
if (buffer_uptodate(bh) ||
buffer_unwritten(bh)) {
if (whence == SEEK_DATA)
@@ -534,6 +516,7 @@
unlock_page(page);
goto out;
}
+next:
lastoff += bh->b_size;
bh = bh->b_this_page;
} while (bh != head);
@@ -543,20 +526,18 @@
unlock_page(page);
}
- /*
- * The no. of pages is less than our desired, that would be a
- * hole in there.
- */
- if (nr_pages < num && whence == SEEK_HOLE) {
- found = 1;
- *offset = lastoff;
+ /* The no. of pages is less than our desired, we are done. */
+ if (nr_pages < num)
break;
- }
index = pvec.pages[i - 1]->index + 1;
pagevec_release(&pvec);
} while (index <= end);
+ if (whence == SEEK_HOLE && lastoff < endoff) {
+ found = 1;
+ *offset = lastoff;
+ }
out:
pagevec_release(&pvec);
return found;
diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
index db81acb..280d67f 100644
--- a/fs/ext4/inline.c
+++ b/fs/ext4/inline.c
@@ -1171,10 +1171,9 @@
set_buffer_uptodate(dir_block);
err = ext4_handle_dirty_dirent_node(handle, inode, dir_block);
if (err)
- goto out;
+ return err;
set_buffer_verified(dir_block);
-out:
- return err;
+ return ext4_mark_inode_dirty(handle, inode);
}
static int ext4_convert_inline_data_nolock(handle_t *handle,
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index eb60fad..1d75b80 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -73,10 +73,9 @@
csum = ext4_chksum(sbi, csum, (__u8 *)&dummy_csum,
csum_size);
offset += csum_size;
- csum = ext4_chksum(sbi, csum, (__u8 *)raw + offset,
- EXT4_INODE_SIZE(inode->i_sb) -
- offset);
}
+ csum = ext4_chksum(sbi, csum, (__u8 *)raw + offset,
+ EXT4_INODE_SIZE(inode->i_sb) - offset);
}
return csum;
@@ -2065,7 +2064,7 @@
{
struct inode *inode = mpd->inode;
int err;
- ext4_lblk_t blocks = (i_size_read(inode) + (1 << inode->i_blkbits) - 1)
+ ext4_lblk_t blocks = (i_size_read(inode) + i_blocksize(inode) - 1)
>> inode->i_blkbits;
do {
@@ -3863,6 +3862,8 @@
inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
ext4_mark_inode_dirty(handle, inode);
+ if (ret >= 0)
+ ext4_update_inode_fsync_trans(handle, inode, 1);
out_stop:
ext4_journal_stop(handle);
out_dio:
@@ -5238,8 +5239,9 @@
/* No extended attributes present */
if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR) ||
header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)) {
- memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE, 0,
- new_extra_isize);
+ memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE +
+ EXT4_I(inode)->i_extra_isize, 0,
+ new_extra_isize - EXT4_I(inode)->i_extra_isize);
EXT4_I(inode)->i_extra_isize = new_extra_isize;
return 0;
}
@@ -5469,6 +5471,11 @@
file_update_time(vma->vm_file);
down_read(&EXT4_I(inode)->i_mmap_sem);
+
+ ret = ext4_convert_inline_data(inode);
+ if (ret)
+ goto out_ret;
+
/* Delalloc case is easy... */
if (test_opt(inode->i_sb, DELALLOC) &&
!ext4_should_journal_data(inode) &&
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
index 3a25946..c21826b 100644
--- a/fs/ext4/ioctl.c
+++ b/fs/ext4/ioctl.c
@@ -626,6 +626,9 @@
struct ext4_encryption_policy policy;
int err = 0;
+ if (!ext4_has_feature_encrypt(sb))
+ return -EOPNOTSUPP;
+
if (copy_from_user(&policy,
(struct ext4_encryption_policy __user *)arg,
sizeof(policy))) {
diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c
index 7861d80..05048fc 100644
--- a/fs/ext4/move_extent.c
+++ b/fs/ext4/move_extent.c
@@ -187,7 +187,7 @@
if (PageUptodate(page))
return 0;
- blocksize = 1 << inode->i_blkbits;
+ blocksize = i_blocksize(inode);
if (!page_has_buffers(page))
create_empty_buffers(page, blocksize, 0);
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index b936da9..8a196e9 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -1243,9 +1243,9 @@
if (unlikely(!name)) {
if (fname->usr_fname->name[0] == '_') {
int ret;
- if (de->name_len < 16)
+ if (de->name_len <= 32)
return 0;
- ret = memcmp(de->name + de->name_len - 16,
+ ret = memcmp(de->name + ((de->name_len - 17) & ~15),
fname->crypto_buf.name + 8, 16);
return (ret == 0) ? 1 : 0;
}
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
index 34038e3..74516ef 100644
--- a/fs/ext4/resize.c
+++ b/fs/ext4/resize.c
@@ -1926,7 +1926,8 @@
n_desc_blocks = o_desc_blocks +
le16_to_cpu(es->s_reserved_gdt_blocks);
n_group = n_desc_blocks * EXT4_DESC_PER_BLOCK(sb);
- n_blocks_count = n_group * EXT4_BLOCKS_PER_GROUP(sb);
+ n_blocks_count = (ext4_fsblk_t)n_group *
+ EXT4_BLOCKS_PER_GROUP(sb);
n_group--; /* set to last group number */
}
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 9b9a7ab..3105444 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -67,6 +67,7 @@
static void ext4_clear_journal_err(struct super_block *sb,
struct ext4_super_block *es);
static int ext4_sync_fs(struct super_block *sb, int wait);
+static void ext4_umount_end(struct super_block *sb, int flags);
static int ext4_remount(struct super_block *sb, int *flags, char *data);
static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf);
static int ext4_unfreeze(struct super_block *sb);
@@ -1127,6 +1128,7 @@
.freeze_fs = ext4_freeze,
.unfreeze_fs = ext4_unfreeze,
.statfs = ext4_statfs,
+ .umount_end = ext4_umount_end,
.remount_fs = ext4_remount,
.show_options = ext4_show_options,
#ifdef CONFIG_QUOTA
@@ -3665,7 +3667,7 @@
db_count = (sbi->s_groups_count + EXT4_DESC_PER_BLOCK(sb) - 1) /
EXT4_DESC_PER_BLOCK(sb);
if (ext4_has_feature_meta_bg(sb)) {
- if (le32_to_cpu(es->s_first_meta_bg) >= db_count) {
+ if (le32_to_cpu(es->s_first_meta_bg) > db_count) {
ext4_msg(sb, KERN_WARNING,
"first meta block group too large: %u "
"(group descriptor block count %u)",
@@ -4625,6 +4627,22 @@
#endif
};
+static void ext4_umount_end(struct super_block *sb, int flags)
+{
+ /*
+ * this is called at the end of umount(2). If there is an unclosed
+ * namespace, ext4 won't do put_super() which triggers fsck in the
+ * next boot.
+ */
+ if ((flags & MNT_FORCE) || atomic_read(&sb->s_active) > 1) {
+ ext4_msg(sb, KERN_ERR,
+ "errors=remount-ro for active namespaces on umount %x",
+ flags);
+ clear_opt(sb, ERRORS_PANIC);
+ set_opt(sb, ERRORS_RO);
+ }
+}
+
static int ext4_remount(struct super_block *sb, int *flags, char *data)
{
struct ext4_super_block *es;
diff --git a/fs/ext4/sysfs.c b/fs/ext4/sysfs.c
index 5d09ea58..c2ee23a 100644
--- a/fs/ext4/sysfs.c
+++ b/fs/ext4/sysfs.c
@@ -100,7 +100,7 @@
int ret;
ret = kstrtoull(skip_spaces(buf), 0, &val);
- if (!ret || val >= clusters)
+ if (ret || val >= clusters)
return -EINVAL;
atomic64_set(&sbi->s_resv_clusters, val);
diff --git a/fs/f2fs/acl.c b/fs/f2fs/acl.c
index e9a8d67..83dcf7b 100644
--- a/fs/f2fs/acl.c
+++ b/fs/f2fs/acl.c
@@ -213,7 +213,7 @@
switch (type) {
case ACL_TYPE_ACCESS:
name_index = F2FS_XATTR_INDEX_POSIX_ACL_ACCESS;
- if (acl) {
+ if (acl && !ipage) {
error = posix_acl_update_mode(inode, &inode->i_mode, &acl);
if (error)
return error;
diff --git a/fs/f2fs/crypto_fname.c b/fs/f2fs/crypto_fname.c
index ab377d4..38349ed 100644
--- a/fs/f2fs/crypto_fname.c
+++ b/fs/f2fs/crypto_fname.c
@@ -333,7 +333,7 @@
memset(buf + 4, 0, 4);
} else
memset(buf, 0, 8);
- memcpy(buf + 8, iname->name + iname->len - 16, 16);
+ memcpy(buf + 8, iname->name + ((iname->len - 17) & ~15), 16);
oname->name[0] = '_';
ret = digest_encode(buf, 24, oname->name + 1);
oname->len = ret + 1;
diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c
index 7c1678b..60972a5 100644
--- a/fs/f2fs/dir.c
+++ b/fs/f2fs/dir.c
@@ -124,19 +124,29 @@
de = &d->dentry[bit_pos];
- /* encrypted case */
+ if (de->hash_code != namehash)
+ goto not_match;
+
de_name.name = d->filename[bit_pos];
de_name.len = le16_to_cpu(de->name_len);
- /* show encrypted name */
- if (fname->hash) {
- if (de->hash_code == fname->hash)
- goto found;
- } else if (de_name.len == name->len &&
- de->hash_code == namehash &&
- !memcmp(de_name.name, name->name, name->len))
+#ifdef CONFIG_F2FS_FS_ENCRYPTION
+ if (unlikely(!name->name)) {
+ if (fname->usr_fname->name[0] == '_') {
+ if (de_name.len > 32 &&
+ !memcmp(de_name.name + ((de_name.len - 17) & ~15),
+ fname->crypto_buf.name + 8, 16))
+ goto found;
+ goto not_match;
+ }
+ name->name = fname->crypto_buf.name;
+ name->len = fname->crypto_buf.len;
+ }
+#endif
+ if (de_name.len == name->len &&
+ !memcmp(de_name.name, name->name, name->len))
goto found;
-
+not_match:
if (max_slots && max_len > *max_slots)
*max_slots = max_len;
max_len = 0;
@@ -170,7 +180,7 @@
int max_slots;
f2fs_hash_t namehash;
- namehash = f2fs_dentry_hash(&name);
+ namehash = f2fs_dentry_hash(&name, fname);
f2fs_bug_on(F2FS_I_SB(dir), level > MAX_DIR_HASH_DEPTH);
@@ -547,7 +557,7 @@
level = 0;
slots = GET_DENTRY_SLOTS(new_name.len);
- dentry_hash = f2fs_dentry_hash(&new_name);
+ dentry_hash = f2fs_dentry_hash(&new_name, NULL);
current_depth = F2FS_I(dir)->i_current_depth;
if (F2FS_I(dir)->chash == dentry_hash) {
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index a70056a..b0088c3 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -1723,7 +1723,8 @@
/*
* hash.c
*/
-f2fs_hash_t f2fs_dentry_hash(const struct qstr *);
+f2fs_hash_t f2fs_dentry_hash(const struct qstr *name_info,
+ struct f2fs_filename *fname);
/*
* node.c
diff --git a/fs/f2fs/hash.c b/fs/f2fs/hash.c
index 71b7206..b238d2f 100644
--- a/fs/f2fs/hash.c
+++ b/fs/f2fs/hash.c
@@ -70,7 +70,8 @@
*buf++ = pad;
}
-f2fs_hash_t f2fs_dentry_hash(const struct qstr *name_info)
+f2fs_hash_t f2fs_dentry_hash(const struct qstr *name_info,
+ struct f2fs_filename *fname)
{
__u32 hash;
f2fs_hash_t f2fs_hash;
@@ -79,6 +80,10 @@
const unsigned char *name = name_info->name;
size_t len = name_info->len;
+ /* encrypted bigname case */
+ if (fname && !fname->disk_name.name)
+ return cpu_to_le32(fname->hash);
+
if (is_dot_dotdot(name_info))
return 0;
diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c
index dbb2cc4..f35f3eb 100644
--- a/fs/f2fs/inline.c
+++ b/fs/f2fs/inline.c
@@ -321,7 +321,7 @@
if (IS_ERR(ipage))
return NULL;
- namehash = f2fs_dentry_hash(&name);
+ namehash = f2fs_dentry_hash(&name, fname);
inline_dentry = inline_data_addr(ipage);
@@ -486,7 +486,7 @@
f2fs_wait_on_page_writeback(ipage, NODE);
- name_hash = f2fs_dentry_hash(name);
+ name_hash = f2fs_dentry_hash(name, NULL);
make_dentry_ptr(NULL, &d, (void *)dentry_blk, 2);
f2fs_update_dentry(ino, mode, &d, name, name_hash, bit_pos);
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index bf383cc..21907f1 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -918,6 +918,79 @@
return result;
}
+static inline bool sanity_check_area_boundary(struct super_block *sb,
+ struct f2fs_super_block *raw_super)
+{
+ u32 segment0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr);
+ u32 cp_blkaddr = le32_to_cpu(raw_super->cp_blkaddr);
+ u32 sit_blkaddr = le32_to_cpu(raw_super->sit_blkaddr);
+ u32 nat_blkaddr = le32_to_cpu(raw_super->nat_blkaddr);
+ u32 ssa_blkaddr = le32_to_cpu(raw_super->ssa_blkaddr);
+ u32 main_blkaddr = le32_to_cpu(raw_super->main_blkaddr);
+ u32 segment_count_ckpt = le32_to_cpu(raw_super->segment_count_ckpt);
+ u32 segment_count_sit = le32_to_cpu(raw_super->segment_count_sit);
+ u32 segment_count_nat = le32_to_cpu(raw_super->segment_count_nat);
+ u32 segment_count_ssa = le32_to_cpu(raw_super->segment_count_ssa);
+ u32 segment_count_main = le32_to_cpu(raw_super->segment_count_main);
+ u32 segment_count = le32_to_cpu(raw_super->segment_count);
+ u32 log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
+
+ if (segment0_blkaddr != cp_blkaddr) {
+ f2fs_msg(sb, KERN_INFO,
+ "Mismatch start address, segment0(%u) cp_blkaddr(%u)",
+ segment0_blkaddr, cp_blkaddr);
+ return true;
+ }
+
+ if (cp_blkaddr + (segment_count_ckpt << log_blocks_per_seg) !=
+ sit_blkaddr) {
+ f2fs_msg(sb, KERN_INFO,
+ "Wrong CP boundary, start(%u) end(%u) blocks(%u)",
+ cp_blkaddr, sit_blkaddr,
+ segment_count_ckpt << log_blocks_per_seg);
+ return true;
+ }
+
+ if (sit_blkaddr + (segment_count_sit << log_blocks_per_seg) !=
+ nat_blkaddr) {
+ f2fs_msg(sb, KERN_INFO,
+ "Wrong SIT boundary, start(%u) end(%u) blocks(%u)",
+ sit_blkaddr, nat_blkaddr,
+ segment_count_sit << log_blocks_per_seg);
+ return true;
+ }
+
+ if (nat_blkaddr + (segment_count_nat << log_blocks_per_seg) !=
+ ssa_blkaddr) {
+ f2fs_msg(sb, KERN_INFO,
+ "Wrong NAT boundary, start(%u) end(%u) blocks(%u)",
+ nat_blkaddr, ssa_blkaddr,
+ segment_count_nat << log_blocks_per_seg);
+ return true;
+ }
+
+ if (ssa_blkaddr + (segment_count_ssa << log_blocks_per_seg) !=
+ main_blkaddr) {
+ f2fs_msg(sb, KERN_INFO,
+ "Wrong SSA boundary, start(%u) end(%u) blocks(%u)",
+ ssa_blkaddr, main_blkaddr,
+ segment_count_ssa << log_blocks_per_seg);
+ return true;
+ }
+
+ if (main_blkaddr + (segment_count_main << log_blocks_per_seg) !=
+ segment0_blkaddr + (segment_count << log_blocks_per_seg)) {
+ f2fs_msg(sb, KERN_INFO,
+ "Wrong MAIN_AREA boundary, start(%u) end(%u) blocks(%u)",
+ main_blkaddr,
+ segment0_blkaddr + (segment_count << log_blocks_per_seg),
+ segment_count_main << log_blocks_per_seg);
+ return true;
+ }
+
+ return false;
+}
+
static int sanity_check_raw_super(struct super_block *sb,
struct f2fs_super_block *raw_super)
{
@@ -947,6 +1020,14 @@
return 1;
}
+ /* check log blocks per segment */
+ if (le32_to_cpu(raw_super->log_blocks_per_seg) != 9) {
+ f2fs_msg(sb, KERN_INFO,
+ "Invalid log blocks per segment (%u)\n",
+ le32_to_cpu(raw_super->log_blocks_per_seg));
+ return 1;
+ }
+
/* Currently, support 512/1024/2048/4096 bytes sector size */
if (le32_to_cpu(raw_super->log_sectorsize) >
F2FS_MAX_LOG_SECTOR_SIZE ||
@@ -966,6 +1047,18 @@
return 1;
}
+ /* check reserved ino info */
+ if (le32_to_cpu(raw_super->node_ino) != 1 ||
+ le32_to_cpu(raw_super->meta_ino) != 2 ||
+ le32_to_cpu(raw_super->root_ino) != 3) {
+ f2fs_msg(sb, KERN_INFO,
+ "Invalid Fs Meta Ino: node(%u) meta(%u) root(%u)",
+ le32_to_cpu(raw_super->node_ino),
+ le32_to_cpu(raw_super->meta_ino),
+ le32_to_cpu(raw_super->root_ino));
+ return 1;
+ }
+
if (le32_to_cpu(raw_super->segment_count) > F2FS_MAX_SEGMENT) {
f2fs_msg(sb, KERN_INFO,
"Invalid segment count (%u)",
@@ -973,6 +1066,10 @@
return 1;
}
+ /* check CP/SIT/NAT/SSA/MAIN_AREA area boundary */
+ if (sanity_check_area_boundary(sb, raw_super))
+ return 1;
+
return 0;
}
@@ -994,20 +1091,18 @@
if (unlikely(fsmeta >= total))
return 1;
- main_segs = le32_to_cpu(sbi->raw_super->segment_count_main);
+ main_segs = le32_to_cpu(raw_super->segment_count_main);
blocks_per_seg = sbi->blocks_per_seg;
for (i = 0; i < NR_CURSEG_NODE_TYPE; i++) {
if (le32_to_cpu(ckpt->cur_node_segno[i]) >= main_segs ||
- le16_to_cpu(ckpt->cur_node_blkoff[i]) >= blocks_per_seg) {
+ le16_to_cpu(ckpt->cur_node_blkoff[i]) >= blocks_per_seg)
return 1;
- }
}
for (i = 0; i < NR_CURSEG_DATA_TYPE; i++) {
if (le32_to_cpu(ckpt->cur_data_segno[i]) >= main_segs ||
- le16_to_cpu(ckpt->cur_data_blkoff[i]) >= blocks_per_seg) {
+ le16_to_cpu(ckpt->cur_data_blkoff[i]) >= blocks_per_seg)
return 1;
- }
}
if (unlikely(f2fs_cp_error(sbi))) {
diff --git a/fs/fcntl.c b/fs/fcntl.c
index ee85cd4..6237645 100644
--- a/fs/fcntl.c
+++ b/fs/fcntl.c
@@ -740,16 +740,10 @@
* Exceptions: O_NONBLOCK is a two bit define on parisc; O_NDELAY
* is defined as O_NONBLOCK on some platforms and not on others.
*/
- BUILD_BUG_ON(21 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32(
- O_RDONLY | O_WRONLY | O_RDWR |
- O_CREAT | O_EXCL | O_NOCTTY |
- O_TRUNC | O_APPEND | /* O_NONBLOCK | */
- __O_SYNC | O_DSYNC | FASYNC |
- O_DIRECT | O_LARGEFILE | O_DIRECTORY |
- O_NOFOLLOW | O_NOATIME | O_CLOEXEC |
- __FMODE_EXEC | O_PATH | __O_TMPFILE |
- __FMODE_NONOTIFY
- ));
+ BUILD_BUG_ON(21 - 1 /* for O_RDONLY being 0 */ !=
+ HWEIGHT32(
+ (VALID_OPEN_FLAGS & ~(O_NONBLOCK | O_NDELAY)) |
+ __FMODE_EXEC | __FMODE_NONOTIFY));
fasync_cache = kmem_cache_create("fasync_cache",
sizeof(struct fasync_struct), 0, SLAB_PANIC, NULL);
diff --git a/fs/file_table.c b/fs/file_table.c
index ad17e05..5a274d5 100644
--- a/fs/file_table.c
+++ b/fs/file_table.c
@@ -261,6 +261,12 @@
static DECLARE_DELAYED_WORK(delayed_fput_work, delayed_fput);
+void flush_delayed_fput_wait(void)
+{
+ delayed_fput(NULL);
+ flush_delayed_work(&delayed_fput_work);
+}
+
void fput(struct file *file)
{
if (atomic_long_dec_and_test(&file->f_count)) {
diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
index 43040721..40d6107 100644
--- a/fs/fscache/cookie.c
+++ b/fs/fscache/cookie.c
@@ -542,6 +542,7 @@
hlist_for_each_entry(object, &cookie->backing_objects, cookie_link) {
if (invalidate)
set_bit(FSCACHE_OBJECT_RETIRED, &object->flags);
+ clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
fscache_raise_event(object, FSCACHE_OBJECT_EV_KILL);
}
} else {
@@ -560,6 +561,10 @@
wait_on_atomic_t(&cookie->n_active, fscache_wait_atomic_t,
TASK_UNINTERRUPTIBLE);
+ /* Make sure any pending writes are cancelled. */
+ if (cookie->def->type != FSCACHE_COOKIE_TYPE_INDEX)
+ fscache_invalidate_writes(cookie);
+
/* Reset the cookie state if it wasn't relinquished */
if (!test_bit(FSCACHE_COOKIE_RELINQUISHED, &cookie->flags)) {
atomic_inc(&cookie->n_active);
diff --git a/fs/fscache/netfs.c b/fs/fscache/netfs.c
index 9b28649..a8aa00be 100644
--- a/fs/fscache/netfs.c
+++ b/fs/fscache/netfs.c
@@ -48,6 +48,7 @@
cookie->flags = 1 << FSCACHE_COOKIE_ENABLED;
spin_lock_init(&cookie->lock);
+ spin_lock_init(&cookie->stores_lock);
INIT_HLIST_HEAD(&cookie->backing_objects);
/* check the netfs type is not already present */
diff --git a/fs/fscache/object.c b/fs/fscache/object.c
index 9e792e3..7a182c8 100644
--- a/fs/fscache/object.c
+++ b/fs/fscache/object.c
@@ -30,6 +30,7 @@
static const struct fscache_state *fscache_object_available(struct fscache_object *, int);
static const struct fscache_state *fscache_parent_ready(struct fscache_object *, int);
static const struct fscache_state *fscache_update_object(struct fscache_object *, int);
+static const struct fscache_state *fscache_object_dead(struct fscache_object *, int);
#define __STATE_NAME(n) fscache_osm_##n
#define STATE(n) (&__STATE_NAME(n))
@@ -91,7 +92,7 @@
static WORK_STATE(KILL_OBJECT, "KILL", fscache_kill_object);
static WORK_STATE(KILL_DEPENDENTS, "KDEP", fscache_kill_dependents);
static WORK_STATE(DROP_OBJECT, "DROP", fscache_drop_object);
-static WORK_STATE(OBJECT_DEAD, "DEAD", (void*)2UL);
+static WORK_STATE(OBJECT_DEAD, "DEAD", fscache_object_dead);
static WAIT_STATE(WAIT_FOR_INIT, "?INI",
TRANSIT_TO(INIT_OBJECT, 1 << FSCACHE_OBJECT_EV_NEW_CHILD));
@@ -229,6 +230,10 @@
event = -1;
if (new_state == NO_TRANSIT) {
_debug("{OBJ%x} %s notrans", object->debug_id, state->name);
+ if (unlikely(state == STATE(OBJECT_DEAD))) {
+ _leave(" [dead]");
+ return;
+ }
fscache_enqueue_object(object);
event_mask = object->oob_event_mask;
goto unmask_events;
@@ -239,7 +244,7 @@
object->state = state = new_state;
if (state->work) {
- if (unlikely(state->work == ((void *)2UL))) {
+ if (unlikely(state == STATE(OBJECT_DEAD))) {
_leave(" [dead]");
return;
}
@@ -645,6 +650,12 @@
fscache_mark_object_dead(object);
object->oob_event_mask = 0;
+ if (test_bit(FSCACHE_OBJECT_RETIRED, &object->flags)) {
+ /* Reject any new read/write ops and abort any that are pending. */
+ clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
+ fscache_cancel_all_ops(object);
+ }
+
if (list_empty(&object->dependents) &&
object->n_ops == 0 &&
object->n_children == 0)
@@ -1077,3 +1088,20 @@
}
}
EXPORT_SYMBOL(fscache_object_mark_killed);
+
+/*
+ * The object is dead. We can get here if an object gets queued by an event
+ * that would lead to its death (such as EV_KILL) when the dispatcher is
+ * already running (and so can be requeued) but hasn't yet cleared the event
+ * mask.
+ */
+static const struct fscache_state *fscache_object_dead(struct fscache_object *object,
+ int event)
+{
+ if (!test_and_set_bit(FSCACHE_OBJECT_RUN_AFTER_DEAD,
+ &object->flags))
+ return NO_TRANSIT;
+
+ WARN(true, "FS-Cache object redispatched after death");
+ return NO_TRANSIT;
+}
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 66b34b1..f0de8fe 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -55,7 +55,7 @@
{
struct fuse_file *ff;
- ff = kmalloc(sizeof(struct fuse_file), GFP_KERNEL);
+ ff = kzalloc(sizeof(struct fuse_file), GFP_KERNEL);
if (unlikely(!ff))
return NULL;
diff --git a/fs/gfs2/dir.c b/fs/gfs2/dir.c
index ad8a5b7..a443c6e 100644
--- a/fs/gfs2/dir.c
+++ b/fs/gfs2/dir.c
@@ -760,7 +760,7 @@
int error;
error = get_leaf_nr(dip, index, &leaf_no);
- if (!error)
+ if (!IS_ERR_VALUE(error))
error = get_leaf(dip, leaf_no, bh_out);
return error;
@@ -976,7 +976,7 @@
index = name->hash >> (32 - dip->i_depth);
error = get_leaf_nr(dip, index, &leaf_no);
- if (error)
+ if (IS_ERR_VALUE(error))
return error;
/* Get the old leaf block */
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 9cd8c92..070901e 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -80,9 +80,9 @@
static struct rhashtable gl_hash_table;
-void gfs2_glock_free(struct gfs2_glock *gl)
+static void gfs2_glock_dealloc(struct rcu_head *rcu)
{
- struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
+ struct gfs2_glock *gl = container_of(rcu, struct gfs2_glock, gl_rcu);
if (gl->gl_ops->go_flags & GLOF_ASPACE) {
kmem_cache_free(gfs2_glock_aspace_cachep, gl);
@@ -90,6 +90,13 @@
kfree(gl->gl_lksb.sb_lvbptr);
kmem_cache_free(gfs2_glock_cachep, gl);
}
+}
+
+void gfs2_glock_free(struct gfs2_glock *gl)
+{
+ struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
+
+ call_rcu(&gl->gl_rcu, gfs2_glock_dealloc);
if (atomic_dec_and_test(&sdp->sd_glock_disposal))
wake_up(&sdp->sd_glock_wait);
}
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index de7b4f9..4a9077e 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -207,7 +207,7 @@
struct gfs2_sbd *ln_sbd;
u64 ln_number;
unsigned int ln_type;
-};
+} __packed __aligned(sizeof(int));
#define lm_name_equal(name1, name2) \
(((name1)->ln_number == (name2)->ln_number) && \
@@ -367,6 +367,7 @@
loff_t end;
} gl_vm;
};
+ struct rcu_head gl_rcu;
struct rhash_head gl_node;
};
diff --git a/fs/jfs/super.c b/fs/jfs/super.c
index 8f9176c..c8d58c5 100644
--- a/fs/jfs/super.c
+++ b/fs/jfs/super.c
@@ -758,7 +758,7 @@
sb->s_blocksize - offset : toread;
tmp_bh.b_state = 0;
- tmp_bh.b_size = 1 << inode->i_blkbits;
+ tmp_bh.b_size = i_blocksize(inode);
err = jfs_get_block(inode, blk, &tmp_bh, 0);
if (err)
return err;
@@ -798,7 +798,7 @@
sb->s_blocksize - offset : towrite;
tmp_bh.b_state = 0;
- tmp_bh.b_size = 1 << inode->i_blkbits;
+ tmp_bh.b_size = i_blocksize(inode);
err = jfs_get_block(inode, blk, &tmp_bh, 1);
if (err)
goto out;
diff --git a/fs/mount.h b/fs/mount.h
index 3dc7dea..37c64bb 100644
--- a/fs/mount.h
+++ b/fs/mount.h
@@ -13,6 +13,8 @@
u64 seq; /* Sequence number to prevent loops */
wait_queue_head_t poll;
u64 event;
+ unsigned int mounts; /* # of mounts in the namespace */
+ unsigned int pending_mounts;
};
struct mnt_pcp {
@@ -55,6 +57,7 @@
struct mnt_namespace *mnt_ns; /* containing namespace */
struct mountpoint *mnt_mp; /* where is it mounted */
struct hlist_node mnt_mp_list; /* list mounts with the same mountpoint */
+ struct list_head mnt_umounting; /* list entry for umount propagation */
#ifdef CONFIG_FSNOTIFY
struct hlist_head mnt_fsnotify_marks;
__u32 mnt_fsnotify_mask;
diff --git a/fs/mpage.c b/fs/mpage.c
index 0fd48fd..f37bb01 100644
--- a/fs/mpage.c
+++ b/fs/mpage.c
@@ -147,7 +147,7 @@
SetPageUptodate(page);
return;
}
- create_empty_buffers(page, 1 << inode->i_blkbits, 0);
+ create_empty_buffers(page, i_blocksize(inode), 0);
}
head = page_buffers(page);
page_bh = head;
diff --git a/fs/namespace.c b/fs/namespace.c
index c147788..72690fd 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -20,6 +20,7 @@
#include <linux/fs_struct.h> /* get_fs_root et.al. */
#include <linux/fsnotify.h> /* fsnotify_vfsmount_delete */
#include <linux/uaccess.h>
+#include <linux/file.h>
#include <linux/proc_ns.h>
#include <linux/magic.h>
#include <linux/bootmem.h>
@@ -27,6 +28,9 @@
#include "pnode.h"
#include "internal.h"
+/* Maximum number of mounts in a mount namespace */
+unsigned int sysctl_mount_max __read_mostly = 100000;
+
static unsigned int m_hash_mask __read_mostly;
static unsigned int m_hash_shift __read_mostly;
static unsigned int mp_hash_mask __read_mostly;
@@ -224,6 +228,7 @@
mnt->mnt_count = 1;
mnt->mnt_writers = 0;
#endif
+ mnt->mnt.data = NULL;
INIT_HLIST_NODE(&mnt->mnt_hash);
INIT_LIST_HEAD(&mnt->mnt_child);
@@ -234,6 +239,7 @@
INIT_LIST_HEAD(&mnt->mnt_slave_list);
INIT_LIST_HEAD(&mnt->mnt_slave);
INIT_HLIST_NODE(&mnt->mnt_mp_list);
+ INIT_LIST_HEAD(&mnt->mnt_umounting);
#ifdef CONFIG_FSNOTIFY
INIT_HLIST_HEAD(&mnt->mnt_fsnotify_marks);
#endif
@@ -926,6 +932,9 @@
list_splice(&head, n->list.prev);
+ n->mounts += n->pending_mounts;
+ n->pending_mounts = 0;
+
__attach_mnt(mnt, parent);
touch_mnt_namespace(n);
}
@@ -969,7 +978,6 @@
if (!mnt)
return ERR_PTR(-ENOMEM);
- mnt->mnt.data = NULL;
if (type->alloc_mnt_data) {
mnt->mnt.data = type->alloc_mnt_data();
if (!mnt->mnt.data) {
@@ -983,7 +991,6 @@
root = mount_fs(type, flags, name, &mnt->mnt, data);
if (IS_ERR(root)) {
- kfree(mnt->mnt.data);
mnt_free_id(mnt);
free_vfsmnt(mnt);
return ERR_CAST(root);
@@ -1087,7 +1094,6 @@
return mnt;
out_free:
- kfree(mnt->mnt.data);
mnt_free_id(mnt);
free_vfsmnt(mnt);
return ERR_PTR(err);
@@ -1133,6 +1139,12 @@
}
static DECLARE_DELAYED_WORK(delayed_mntput_work, delayed_mntput);
+void flush_delayed_mntput_wait(void)
+{
+ delayed_mntput(NULL);
+ flush_delayed_work(&delayed_mntput_work);
+}
+
static void mntput_no_expire(struct mount *mnt)
{
rcu_read_lock();
@@ -1465,11 +1477,16 @@
propagate_umount(&tmp_list);
while (!list_empty(&tmp_list)) {
+ struct mnt_namespace *ns;
bool disconnect;
p = list_first_entry(&tmp_list, struct mount, mnt_list);
list_del_init(&p->mnt_expire);
list_del_init(&p->mnt_list);
- __touch_mnt_namespace(p->mnt_ns);
+ ns = p->mnt_ns;
+ if (ns) {
+ ns->mounts--;
+ __touch_mnt_namespace(ns);
+ }
p->mnt_ns = NULL;
if (how & UMOUNT_SYNC)
p->mnt.mnt_flags |= MNT_SYNC_UMOUNT;
@@ -1645,6 +1662,7 @@
struct mount *mnt;
int retval;
int lookup_flags = 0;
+ bool user_request = !(current->flags & PF_KTHREAD);
if (flags & ~(MNT_FORCE | MNT_DETACH | MNT_EXPIRE | UMOUNT_NOFOLLOW))
return -EINVAL;
@@ -1670,11 +1688,35 @@
if (flags & MNT_FORCE && !capable(CAP_SYS_ADMIN))
goto dput_and_out;
+ /* flush delayed_fput to put mnt_count */
+ if (user_request)
+ flush_delayed_fput_wait();
+
retval = do_umount(mnt, flags);
dput_and_out:
/* we mustn't call path_put() as that would clear mnt_expiry_mark */
dput(path.dentry);
mntput_no_expire(mnt);
+
+ if (!user_request)
+ goto out;
+
+ if (!retval) {
+ /*
+ * If the last delayed_fput() is called during do_umount()
+ * and makes mnt_count zero, we need to guarantee to register
+ * delayed_mntput by waiting for delayed_fput work again.
+ */
+ flush_delayed_fput_wait();
+
+ /* flush delayed_mntput_work to put sb->s_active */
+ flush_delayed_mntput_wait();
+ }
+ if (!retval || (flags & MNT_FORCE)) {
+ /* filesystem needs to handle unclosed namespaces */
+ if (mnt->mnt.mnt_sb->s_op->umount_end)
+ mnt->mnt.mnt_sb->s_op->umount_end(mnt->mnt.mnt_sb, flags);
+ }
out:
return retval;
}
@@ -1870,6 +1912,28 @@
return 0;
}
+int count_mounts(struct mnt_namespace *ns, struct mount *mnt)
+{
+ unsigned int max = READ_ONCE(sysctl_mount_max);
+ unsigned int mounts = 0, old, pending, sum;
+ struct mount *p;
+
+ for (p = mnt; p; p = next_mnt(p, mnt))
+ mounts++;
+
+ old = ns->mounts;
+ pending = ns->pending_mounts;
+ sum = old + pending;
+ if ((old > sum) ||
+ (pending > sum) ||
+ (max < sum) ||
+ (mounts > (max - sum)))
+ return -ENOSPC;
+
+ ns->pending_mounts = pending + mounts;
+ return 0;
+}
+
/*
* @source_mnt : mount tree to be attached
* @nd : place the mount tree @source_mnt is attached
@@ -1939,6 +2003,7 @@
struct path *parent_path)
{
HLIST_HEAD(tree_list);
+ struct mnt_namespace *ns = dest_mnt->mnt_ns;
struct mountpoint *smp;
struct mount *child, *p;
struct hlist_node *n;
@@ -1951,6 +2016,13 @@
if (IS_ERR(smp))
return PTR_ERR(smp);
+ /* Is there space to add these mounts to the mount namespace? */
+ if (!parent_path) {
+ err = count_mounts(ns, source_mnt);
+ if (err)
+ goto out;
+ }
+
if (IS_MNT_SHARED(dest_mnt)) {
err = invent_group_ids(source_mnt, true);
if (err)
@@ -1990,11 +2062,14 @@
out_cleanup_ids:
while (!hlist_empty(&tree_list)) {
child = hlist_entry(tree_list.first, struct mount, mnt_hash);
+ child->mnt_parent->mnt_ns->pending_mounts = 0;
umount_tree(child, UMOUNT_SYNC);
}
unlock_mount_hash();
cleanup_group_ids(source_mnt, NULL);
out:
+ ns->pending_mounts = 0;
+
read_seqlock_excl(&mount_lock);
put_mountpoint(smp);
read_sequnlock_excl(&mount_lock);
@@ -2830,6 +2905,8 @@
init_waitqueue_head(&new_ns->poll);
new_ns->event = 0;
new_ns->user_ns = get_user_ns(user_ns);
+ new_ns->mounts = 0;
+ new_ns->pending_mounts = 0;
return new_ns;
}
@@ -2879,6 +2956,7 @@
q = new;
while (p) {
q->mnt_ns = new_ns;
+ new_ns->mounts++;
if (new_fs) {
if (&p->mnt == new_fs->root.mnt) {
new_fs->root.mnt = mntget(&q->mnt);
@@ -2917,6 +2995,7 @@
struct mount *mnt = real_mount(m);
mnt->mnt_ns = new_ns;
new_ns->root = mnt;
+ new_ns->mounts++;
list_add(&mnt->mnt_list, &new_ns->list);
} else {
mntput(m);
diff --git a/fs/nfs/Kconfig b/fs/nfs/Kconfig
index f31fd0d..b1daeaf 100644
--- a/fs/nfs/Kconfig
+++ b/fs/nfs/Kconfig
@@ -121,6 +121,7 @@
config PNFS_BLOCK
tristate
depends on NFS_V4_1 && BLK_DEV_DM
+ depends on 64BIT || LBDAF
default NFS_V4
config PNFS_OBJLAYOUT
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index 52ee0b7..348e0a0 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -1135,11 +1135,13 @@
/* Force a full look up iff the parent directory has changed */
if (!nfs_is_exclusive_create(dir, flags) &&
nfs_check_verifier(dir, dentry, flags & LOOKUP_RCU)) {
-
- if (nfs_lookup_verify_inode(inode, flags)) {
+ error = nfs_lookup_verify_inode(inode, flags);
+ if (error) {
if (flags & LOOKUP_RCU)
return -ECHILD;
- goto out_zap_parent;
+ if (error == -ESTALE)
+ goto out_zap_parent;
+ goto out_error;
}
goto out_valid;
}
@@ -1163,8 +1165,10 @@
trace_nfs_lookup_revalidate_enter(dir, dentry, flags);
error = NFS_PROTO(dir)->lookup(dir, &dentry->d_name, fhandle, fattr, label);
trace_nfs_lookup_revalidate_exit(dir, dentry, flags, error);
- if (error)
+ if (error == -ESTALE || error == -ENOENT)
goto out_bad;
+ if (error)
+ goto out_error;
if (nfs_compare_fh(NFS_FH(inode), fhandle))
goto out_bad;
if ((error = nfs_refresh_inode(inode, fattr)) != 0)
@@ -2421,6 +2425,20 @@
}
EXPORT_SYMBOL_GPL(nfs_may_open);
+static int nfs_execute_ok(struct inode *inode, int mask)
+{
+ struct nfs_server *server = NFS_SERVER(inode);
+ int ret;
+
+ if (mask & MAY_NOT_BLOCK)
+ ret = nfs_revalidate_inode_rcu(server, inode);
+ else
+ ret = nfs_revalidate_inode(server, inode);
+ if (ret == 0 && !execute_ok(inode))
+ ret = -EACCES;
+ return ret;
+}
+
int nfs_permission(struct inode *inode, int mask)
{
struct rpc_cred *cred;
@@ -2438,6 +2456,9 @@
case S_IFLNK:
goto out;
case S_IFREG:
+ if ((mask & MAY_OPEN) &&
+ nfs_server_capable(inode, NFS_CAP_ATOMIC_OPEN))
+ return 0;
break;
case S_IFDIR:
/*
@@ -2470,8 +2491,8 @@
res = PTR_ERR(cred);
}
out:
- if (!res && (mask & MAY_EXEC) && !execute_ok(inode))
- res = -EACCES;
+ if (!res && (mask & MAY_EXEC))
+ res = nfs_execute_ok(inode, mask);
dfprintk(VFS, "NFS: permission(%s/%lu), mask=0x%x, res=%d\n",
inode->i_sb->s_id, inode->i_ino, mask, res);
diff --git a/fs/nfs/flexfilelayout/flexfilelayoutdev.c b/fs/nfs/flexfilelayout/flexfilelayoutdev.c
index e125e55..2603d75 100644
--- a/fs/nfs/flexfilelayout/flexfilelayoutdev.c
+++ b/fs/nfs/flexfilelayout/flexfilelayoutdev.c
@@ -30,6 +30,7 @@
{
nfs4_print_deviceid(&mirror_ds->id_node.deviceid);
nfs4_pnfs_ds_put(mirror_ds->ds);
+ kfree(mirror_ds->ds_versions);
kfree_rcu(mirror_ds, id_node.rcu);
}
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index f714b98..668ac19 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -1241,9 +1241,9 @@
return 0;
/* Has the inode gone and changed behind our back? */
if ((fattr->valid & NFS_ATTR_FATTR_FILEID) && nfsi->fileid != fattr->fileid)
- return -EIO;
+ return -ESTALE;
if ((fattr->valid & NFS_ATTR_FATTR_TYPE) && (inode->i_mode & S_IFMT) != (fattr->mode & S_IFMT))
- return -EIO;
+ return -ESTALE;
if ((fattr->valid & NFS_ATTR_FATTR_CHANGE) != 0 &&
inode->i_version != fattr->change_attr)
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index 9dea85f..578350f 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -243,7 +243,6 @@
extern const struct nfs_pageio_ops nfs_pgio_rw_ops;
struct nfs_pgio_header *nfs_pgio_header_alloc(const struct nfs_rw_ops *);
void nfs_pgio_header_free(struct nfs_pgio_header *);
-void nfs_pgio_data_destroy(struct nfs_pgio_header *);
int nfs_generic_pgio(struct nfs_pageio_descriptor *, struct nfs_pgio_header *);
int nfs_initiate_pgio(struct rpc_clnt *clnt, struct nfs_pgio_header *hdr,
struct rpc_cred *cred, const struct nfs_rpc_ops *rpc_ops,
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 4e3679b2..8e425f2 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -2188,8 +2188,6 @@
if ((mask & ~cache.mask & (MAY_READ | MAY_EXEC)) == 0)
return 0;
- /* even though OPEN succeeded, access is denied. Close the file */
- nfs4_close_state(state, fmode);
return -EACCES;
}
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 82dc303..e8d1d6c 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -1072,6 +1072,7 @@
case -NFS4ERR_BADXDR:
case -NFS4ERR_RESOURCE:
case -NFS4ERR_NOFILEHANDLE:
+ case -NFS4ERR_MOVED:
/* Non-seqid mutating errors */
return;
};
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index 452a011..8ebfdd0 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -528,16 +528,6 @@
}
EXPORT_SYMBOL_GPL(nfs_pgio_header_alloc);
-/*
- * nfs_pgio_header_free - Free a read or write header
- * @hdr: The header to free
- */
-void nfs_pgio_header_free(struct nfs_pgio_header *hdr)
-{
- hdr->rw_ops->rw_free_header(hdr);
-}
-EXPORT_SYMBOL_GPL(nfs_pgio_header_free);
-
/**
* nfs_pgio_data_destroy - make @hdr suitable for reuse
*
@@ -546,14 +536,24 @@
*
* @hdr: A header that has had nfs_generic_pgio called
*/
-void nfs_pgio_data_destroy(struct nfs_pgio_header *hdr)
+static void nfs_pgio_data_destroy(struct nfs_pgio_header *hdr)
{
if (hdr->args.context)
put_nfs_open_context(hdr->args.context);
if (hdr->page_array.pagevec != hdr->page_array.page_array)
kfree(hdr->page_array.pagevec);
}
-EXPORT_SYMBOL_GPL(nfs_pgio_data_destroy);
+
+/*
+ * nfs_pgio_header_free - Free a read or write header
+ * @hdr: The header to free
+ */
+void nfs_pgio_header_free(struct nfs_pgio_header *hdr)
+{
+ nfs_pgio_data_destroy(hdr);
+ hdr->rw_ops->rw_free_header(hdr);
+}
+EXPORT_SYMBOL_GPL(nfs_pgio_header_free);
/**
* nfs_pgio_rpcsetup - Set up arguments for a pageio call
@@ -671,7 +671,6 @@
u32 midx;
set_bit(NFS_IOHDR_REDO, &hdr->flags);
- nfs_pgio_data_destroy(hdr);
hdr->completion_ops->completion(hdr);
/* TODO: Make sure it's right to clean up all mirrors here
* and not just hdr->pgio_mirror_idx */
@@ -689,7 +688,6 @@
static void nfs_pgio_release(void *calldata)
{
struct nfs_pgio_header *hdr = calldata;
- nfs_pgio_data_destroy(hdr);
hdr->completion_ops->completion(hdr);
}
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index 3cae072..7af7bed 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -1943,7 +1943,6 @@
nfs_pageio_reset_write_mds(desc);
mirror->pg_recoalesce = 1;
}
- nfs_pgio_data_destroy(hdr);
hdr->release(hdr);
}
@@ -2059,7 +2058,6 @@
nfs_pageio_reset_read_mds(desc);
mirror->pg_recoalesce = 1;
}
- nfs_pgio_data_destroy(hdr);
hdr->release(hdr);
}
diff --git a/fs/nfsd/blocklayout.c b/fs/nfsd/blocklayout.c
index c29d942..0976f8d 100644
--- a/fs/nfsd/blocklayout.c
+++ b/fs/nfsd/blocklayout.c
@@ -50,7 +50,7 @@
{
struct nfsd4_layout_seg *seg = &args->lg_seg;
struct super_block *sb = inode->i_sb;
- u32 block_size = (1 << inode->i_blkbits);
+ u32 block_size = i_blocksize(inode);
struct pnfs_block_extent *bex;
struct iomap iomap;
u32 device_generation = 0;
@@ -151,7 +151,7 @@
int error;
nr_iomaps = nfsd4_block_decode_layoutupdate(lcp->lc_up_layout,
- lcp->lc_up_len, &iomaps, 1 << inode->i_blkbits);
+ lcp->lc_up_len, &iomaps, i_blocksize(inode));
if (nr_iomaps < 0)
return nfserrno(nr_iomaps);
diff --git a/fs/nfsd/nfs3xdr.c b/fs/nfsd/nfs3xdr.c
index 00575d7..7162ab7 100644
--- a/fs/nfsd/nfs3xdr.c
+++ b/fs/nfsd/nfs3xdr.c
@@ -358,6 +358,7 @@
{
unsigned int len, v, hdr, dlen;
u32 max_blocksize = svc_max_payload(rqstp);
+ struct kvec *head = rqstp->rq_arg.head;
p = decode_fh(p, &args->fh);
if (!p)
@@ -367,6 +368,8 @@
args->count = ntohl(*p++);
args->stable = ntohl(*p++);
len = args->len = ntohl(*p++);
+ if ((void *)p > head->iov_base + head->iov_len)
+ return 0;
/*
* The count must equal the amount of data passed.
*/
@@ -377,9 +380,8 @@
* Check to make sure that we got the right number of
* bytes.
*/
- hdr = (void*)p - rqstp->rq_arg.head[0].iov_base;
- dlen = rqstp->rq_arg.head[0].iov_len + rqstp->rq_arg.page_len
- - hdr;
+ hdr = (void*)p - head->iov_base;
+ dlen = head->iov_len + rqstp->rq_arg.page_len - hdr;
/*
* Round the length of the data which was specified up to
* the next multiple of XDR units and then compare that
@@ -396,7 +398,7 @@
len = args->len = max_blocksize;
}
rqstp->rq_vec[0].iov_base = (void*)p;
- rqstp->rq_vec[0].iov_len = rqstp->rq_arg.head[0].iov_len - hdr;
+ rqstp->rq_vec[0].iov_len = head->iov_len - hdr;
v = 0;
while (len > rqstp->rq_vec[v].iov_len) {
len -= rqstp->rq_vec[v].iov_len;
@@ -471,6 +473,8 @@
/* first copy and check from the first page */
old = (char*)p;
vec = &rqstp->rq_arg.head[0];
+ if ((void *)old > vec->iov_base + vec->iov_len)
+ return 0;
avail = vec->iov_len - (old - (char*)vec->iov_base);
while (len && avail && *old) {
*new++ = *old++;
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index 7d5351c..209dbfc 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -1690,6 +1690,12 @@
opdesc->op_get_currentstateid(cstate, &op->u);
op->status = opdesc->op_func(rqstp, cstate, &op->u);
+ /* Only from SEQUENCE */
+ if (cstate->status == nfserr_replay_cache) {
+ dprintk("%s NFS4.1 replay from cache\n", __func__);
+ status = op->status;
+ goto out;
+ }
if (!op->status) {
if (opdesc->op_set_currentstateid)
opdesc->op_set_currentstateid(cstate, &op->u);
@@ -1700,14 +1706,7 @@
if (need_wrongsec_check(rqstp))
op->status = check_nfsd_access(current_fh->fh_export, rqstp);
}
-
encode_op:
- /* Only from SEQUENCE */
- if (cstate->status == nfserr_replay_cache) {
- dprintk("%s NFS4.1 replay from cache\n", __func__);
- status = op->status;
- goto out;
- }
if (op->status == nfserr_replay_me) {
op->replay = &cstate->replay_owner->so_replay;
nfsd4_encode_replay(&resp->xdr, op);
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index 1293520..544672b 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -129,7 +129,7 @@
argp->p = page_address(argp->pagelist[0]);
argp->pagelist++;
if (argp->pagelen < PAGE_SIZE) {
- argp->end = argp->p + (argp->pagelen>>2);
+ argp->end = argp->p + XDR_QUADLEN(argp->pagelen);
argp->pagelen = 0;
} else {
argp->end = argp->p + (PAGE_SIZE>>2);
@@ -1246,9 +1246,7 @@
argp->pagelen -= pages * PAGE_SIZE;
len -= pages * PAGE_SIZE;
- argp->p = (__be32 *)page_address(argp->pagelist[0]);
- argp->pagelist++;
- argp->end = argp->p + XDR_QUADLEN(PAGE_SIZE);
+ next_decode_page(argp);
}
argp->p += XDR_QUADLEN(len);
@@ -2753,9 +2751,16 @@
}
#endif /* CONFIG_NFSD_PNFS */
if (bmval2 & FATTR4_WORD2_SUPPATTR_EXCLCREAT) {
- status = nfsd4_encode_bitmap(xdr, NFSD_SUPPATTR_EXCLCREAT_WORD0,
- NFSD_SUPPATTR_EXCLCREAT_WORD1,
- NFSD_SUPPATTR_EXCLCREAT_WORD2);
+ u32 supp[3];
+
+ supp[0] = nfsd_suppattrs0(minorversion);
+ supp[1] = nfsd_suppattrs1(minorversion);
+ supp[2] = nfsd_suppattrs2(minorversion);
+ supp[0] &= NFSD_SUPPATTR_EXCLCREAT_WORD0;
+ supp[1] &= NFSD_SUPPATTR_EXCLCREAT_WORD1;
+ supp[2] &= NFSD_SUPPATTR_EXCLCREAT_WORD2;
+
+ status = nfsd4_encode_bitmap(xdr, supp[0], supp[1], supp[2]);
if (status)
goto out;
}
@@ -4041,8 +4046,7 @@
struct nfsd4_getdeviceinfo *gdev)
{
struct xdr_stream *xdr = &resp->xdr;
- const struct nfsd4_layout_ops *ops =
- nfsd4_layout_ops[gdev->gd_layout_type];
+ const struct nfsd4_layout_ops *ops;
u32 starting_len = xdr->buf->len, needed_len;
__be32 *p;
@@ -4059,6 +4063,7 @@
/* If maxcount is 0 then just update notifications */
if (gdev->gd_maxcount != 0) {
+ ops = nfsd4_layout_ops[gdev->gd_layout_type];
nfserr = ops->encode_getdeviceinfo(xdr, gdev);
if (nfserr) {
/*
@@ -4111,8 +4116,7 @@
struct nfsd4_layoutget *lgp)
{
struct xdr_stream *xdr = &resp->xdr;
- const struct nfsd4_layout_ops *ops =
- nfsd4_layout_ops[lgp->lg_layout_type];
+ const struct nfsd4_layout_ops *ops;
__be32 *p;
dprintk("%s: err %d\n", __func__, nfserr);
@@ -4135,6 +4139,7 @@
*p++ = cpu_to_be32(lgp->lg_seg.iomode);
*p++ = cpu_to_be32(lgp->lg_layout_type);
+ ops = nfsd4_layout_ops[lgp->lg_layout_type];
nfserr = ops->encode_layoutget(xdr, lgp);
out:
kfree(lgp->lg_content);
diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
index ad4e237..5be1fa6 100644
--- a/fs/nfsd/nfssvc.c
+++ b/fs/nfsd/nfssvc.c
@@ -656,6 +656,37 @@
return nfserr;
}
+/*
+ * A write procedure can have a large argument, and a read procedure can
+ * have a large reply, but no NFSv2 or NFSv3 procedure has argument and
+ * reply that can both be larger than a page. The xdr code has taken
+ * advantage of this assumption to be a sloppy about bounds checking in
+ * some cases. Pending a rewrite of the NFSv2/v3 xdr code to fix that
+ * problem, we enforce these assumptions here:
+ */
+static bool nfs_request_too_big(struct svc_rqst *rqstp,
+ struct svc_procedure *proc)
+{
+ /*
+ * The ACL code has more careful bounds-checking and is not
+ * susceptible to this problem:
+ */
+ if (rqstp->rq_prog != NFS_PROGRAM)
+ return false;
+ /*
+ * Ditto NFSv4 (which can in theory have argument and reply both
+ * more than a page):
+ */
+ if (rqstp->rq_vers >= 4)
+ return false;
+ /* The reply will be small, we're OK: */
+ if (proc->pc_xdrressize > 0 &&
+ proc->pc_xdrressize < XDR_QUADLEN(PAGE_SIZE))
+ return false;
+
+ return rqstp->rq_arg.len > PAGE_SIZE;
+}
+
int
nfsd_dispatch(struct svc_rqst *rqstp, __be32 *statp)
{
@@ -668,6 +699,11 @@
rqstp->rq_vers, rqstp->rq_proc);
proc = rqstp->rq_procinfo;
+ if (nfs_request_too_big(rqstp, proc)) {
+ dprintk("nfsd: NFSv%d argument too large\n", rqstp->rq_vers);
+ *statp = rpc_garbage_args;
+ return 1;
+ }
/*
* Give the xdr decoder a chance to change this if it wants
* (necessary in the NFSv4.0 compound case)
diff --git a/fs/nfsd/nfsxdr.c b/fs/nfsd/nfsxdr.c
index 79d964a..bf91320 100644
--- a/fs/nfsd/nfsxdr.c
+++ b/fs/nfsd/nfsxdr.c
@@ -280,6 +280,7 @@
struct nfsd_writeargs *args)
{
unsigned int len, hdr, dlen;
+ struct kvec *head = rqstp->rq_arg.head;
int v;
p = decode_fh(p, &args->fh);
@@ -300,9 +301,10 @@
* Check to make sure that we got the right number of
* bytes.
*/
- hdr = (void*)p - rqstp->rq_arg.head[0].iov_base;
- dlen = rqstp->rq_arg.head[0].iov_len + rqstp->rq_arg.page_len
- - hdr;
+ hdr = (void*)p - head->iov_base;
+ if (hdr > head->iov_len)
+ return 0;
+ dlen = head->iov_len + rqstp->rq_arg.page_len - hdr;
/*
* Round the length of the data which was specified up to
@@ -316,7 +318,7 @@
return 0;
rqstp->rq_vec[0].iov_base = (void*)p;
- rqstp->rq_vec[0].iov_len = rqstp->rq_arg.head[0].iov_len - hdr;
+ rqstp->rq_vec[0].iov_len = head->iov_len - hdr;
v = 0;
while (len > rqstp->rq_vec[v].iov_len) {
len -= rqstp->rq_vec[v].iov_len;
diff --git a/fs/nilfs2/btnode.c b/fs/nilfs2/btnode.c
index a35ae35..cd39b57 100644
--- a/fs/nilfs2/btnode.c
+++ b/fs/nilfs2/btnode.c
@@ -55,7 +55,7 @@
brelse(bh);
BUG();
}
- memset(bh->b_data, 0, 1 << inode->i_blkbits);
+ memset(bh->b_data, 0, i_blocksize(inode));
bh->b_bdev = inode->i_sb->s_bdev;
bh->b_blocknr = blocknr;
set_buffer_mapped(bh);
diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c
index ac2f649..00877ef 100644
--- a/fs/nilfs2/inode.c
+++ b/fs/nilfs2/inode.c
@@ -55,7 +55,7 @@
{
struct nilfs_root *root = NILFS_I(inode)->i_root;
- inode_add_bytes(inode, (1 << inode->i_blkbits) * n);
+ inode_add_bytes(inode, i_blocksize(inode) * n);
if (root)
atomic64_add(n, &root->blocks_count);
}
@@ -64,7 +64,7 @@
{
struct nilfs_root *root = NILFS_I(inode)->i_root;
- inode_sub_bytes(inode, (1 << inode->i_blkbits) * n);
+ inode_sub_bytes(inode, i_blocksize(inode) * n);
if (root)
atomic64_sub(n, &root->blocks_count);
}
diff --git a/fs/nilfs2/mdt.c b/fs/nilfs2/mdt.c
index 1125f40..612a245 100644
--- a/fs/nilfs2/mdt.c
+++ b/fs/nilfs2/mdt.c
@@ -60,7 +60,7 @@
set_buffer_mapped(bh);
kaddr = kmap_atomic(bh->b_page);
- memset(kaddr + bh_offset(bh), 0, 1 << inode->i_blkbits);
+ memset(kaddr + bh_offset(bh), 0, i_blocksize(inode));
if (init_block)
init_block(inode, bh, kaddr);
flush_dcache_page(bh->b_page);
@@ -503,7 +503,7 @@
struct nilfs_mdt_info *mi = NILFS_MDT(inode);
mi->mi_entry_size = entry_size;
- mi->mi_entries_per_block = (1 << inode->i_blkbits) / entry_size;
+ mi->mi_entries_per_block = i_blocksize(inode) / entry_size;
mi->mi_first_entry_offset = DIV_ROUND_UP(header_size, entry_size);
}
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
index 3b65ada..2f27c93 100644
--- a/fs/nilfs2/segment.c
+++ b/fs/nilfs2/segment.c
@@ -719,7 +719,7 @@
lock_page(page);
if (!page_has_buffers(page))
- create_empty_buffers(page, 1 << inode->i_blkbits, 0);
+ create_empty_buffers(page, i_blocksize(inode), 0);
unlock_page(page);
bh = head = page_buffers(page);
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index e6795c7..e4184bd 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -1103,7 +1103,7 @@
int ret = 0;
struct buffer_head *head, *bh, *wait[2], **wait_bh = wait;
unsigned int block_end, block_start;
- unsigned int bsize = 1 << inode->i_blkbits;
+ unsigned int bsize = i_blocksize(inode);
if (!page_has_buffers(page))
create_empty_buffers(page, bsize, 0);
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c
index 709fbbd..acebc35 100644
--- a/fs/ocfs2/cluster/heartbeat.c
+++ b/fs/ocfs2/cluster/heartbeat.c
@@ -2070,13 +2070,13 @@
spin_unlock(&o2hb_live_lock);
}
-static ssize_t o2hb_heartbeat_group_threshold_show(struct config_item *item,
+static ssize_t o2hb_heartbeat_group_dead_threshold_show(struct config_item *item,
char *page)
{
return sprintf(page, "%u\n", o2hb_dead_threshold);
}
-static ssize_t o2hb_heartbeat_group_threshold_store(struct config_item *item,
+static ssize_t o2hb_heartbeat_group_dead_threshold_store(struct config_item *item,
const char *page, size_t count)
{
unsigned long tmp;
@@ -2125,11 +2125,11 @@
}
-CONFIGFS_ATTR(o2hb_heartbeat_group_, threshold);
+CONFIGFS_ATTR(o2hb_heartbeat_group_, dead_threshold);
CONFIGFS_ATTR(o2hb_heartbeat_group_, mode);
static struct configfs_attribute *o2hb_heartbeat_group_attrs[] = {
- &o2hb_heartbeat_group_attr_threshold,
+ &o2hb_heartbeat_group_attr_dead_threshold,
&o2hb_heartbeat_group_attr_mode,
NULL,
};
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index 56dd395..1d73872 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -808,7 +808,7 @@
/* We know that zero_from is block aligned */
for (block_start = zero_from; block_start < zero_to;
block_start = block_end) {
- block_end = block_start + (1 << inode->i_blkbits);
+ block_end = block_start + i_blocksize(inode);
/*
* block_start is block-aligned. Bump it by one to force
diff --git a/fs/open.c b/fs/open.c
index e70cca1..1fd96c5 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -898,6 +898,12 @@
int lookup_flags = 0;
int acc_mode;
+ /*
+ * Clear out all open flags we don't know about so that we don't report
+ * them in fcntl(F_GETFD) or similar interfaces.
+ */
+ flags &= VALID_OPEN_FLAGS;
+
if (flags & (O_CREAT | __O_TMPFILE))
op->mode = (mode & S_IALLUGO) | S_IFREG;
else
diff --git a/fs/pnode.c b/fs/pnode.c
index 00ecb43..ddb846f8 100644
--- a/fs/pnode.c
+++ b/fs/pnode.c
@@ -24,6 +24,11 @@
return list_entry(p->mnt_slave_list.next, struct mount, mnt_slave);
}
+static inline struct mount *last_slave(struct mount *p)
+{
+ return list_entry(p->mnt_slave_list.prev, struct mount, mnt_slave);
+}
+
static inline struct mount *next_slave(struct mount *p)
{
return list_entry(p->mnt_slave.next, struct mount, mnt_slave);
@@ -164,6 +169,19 @@
}
}
+static struct mount *skip_propagation_subtree(struct mount *m,
+ struct mount *origin)
+{
+ /*
+ * Advance m such that propagation_next will not return
+ * the slaves of m.
+ */
+ if (!IS_MNT_NEW(m) && !list_empty(&m->mnt_slave_list))
+ m = last_slave(m);
+
+ return m;
+}
+
static struct mount *next_group(struct mount *m, struct mount *origin)
{
while (1) {
@@ -259,7 +277,7 @@
read_sequnlock_excl(&mount_lock);
}
hlist_add_head(&child->mnt_hash, list);
- return 0;
+ return count_mounts(m->mnt_ns, child);
}
/*
@@ -415,68 +433,107 @@
}
}
-/*
- * Mark all mounts that the MNT_LOCKED logic will allow to be unmounted.
- */
-static void mark_umount_candidates(struct mount *mnt)
+static void umount_one(struct mount *mnt, struct list_head *to_umount)
{
- struct mount *parent = mnt->mnt_parent;
- struct mount *m;
-
- BUG_ON(parent == mnt);
-
- for (m = propagation_next(parent, parent); m;
- m = propagation_next(m, parent)) {
- struct mount *child = __lookup_mnt(&m->mnt,
- mnt->mnt_mountpoint);
- if (!child || (child->mnt.mnt_flags & MNT_UMOUNT))
- continue;
- if (!IS_MNT_LOCKED(child) || IS_MNT_MARKED(m)) {
- SET_MNT_MARK(child);
- }
- }
+ CLEAR_MNT_MARK(mnt);
+ mnt->mnt.mnt_flags |= MNT_UMOUNT;
+ list_del_init(&mnt->mnt_child);
+ list_del_init(&mnt->mnt_umounting);
+ list_move_tail(&mnt->mnt_list, to_umount);
}
/*
* NOTE: unmounting 'mnt' naturally propagates to all other mounts its
* parent propagates to.
*/
-static void __propagate_umount(struct mount *mnt)
+static bool __propagate_umount(struct mount *mnt,
+ struct list_head *to_umount,
+ struct list_head *to_restore)
{
- struct mount *parent = mnt->mnt_parent;
- struct mount *m;
+ bool progress = false;
+ struct mount *child;
- BUG_ON(parent == mnt);
+ /*
+ * The state of the parent won't change if this mount is
+ * already unmounted or marked as without children.
+ */
+ if (mnt->mnt.mnt_flags & (MNT_UMOUNT | MNT_MARKED))
+ goto out;
- for (m = propagation_next(parent, parent); m;
- m = propagation_next(m, parent)) {
- struct mount *topper;
- struct mount *child = __lookup_mnt(&m->mnt,
- mnt->mnt_mountpoint);
- /*
- * umount the child only if the child has no children
- * and the child is marked safe to unmount.
- */
- if (!child || !IS_MNT_MARKED(child))
+ /* Verify topper is the only grandchild that has not been
+ * speculatively unmounted.
+ */
+ list_for_each_entry(child, &mnt->mnt_mounts, mnt_child) {
+ if (child->mnt_mountpoint == mnt->mnt.mnt_root)
continue;
- CLEAR_MNT_MARK(child);
+ if (!list_empty(&child->mnt_umounting) && IS_MNT_MARKED(child))
+ continue;
+ /* Found a mounted child */
+ goto children;
+ }
- /* If there is exactly one mount covering all of child
- * replace child with that mount.
- */
- topper = find_topper(child);
- if (topper)
- mnt_change_mountpoint(child->mnt_parent, child->mnt_mp,
- topper);
+ /* Mark mounts that can be unmounted if not locked */
+ SET_MNT_MARK(mnt);
+ progress = true;
- if (list_empty(&child->mnt_mounts)) {
- list_del_init(&child->mnt_child);
- child->mnt.mnt_flags |= MNT_UMOUNT;
- list_move_tail(&child->mnt_list, &mnt->mnt_list);
+ /* If a mount is without children and not locked umount it. */
+ if (!IS_MNT_LOCKED(mnt)) {
+ umount_one(mnt, to_umount);
+ } else {
+children:
+ list_move_tail(&mnt->mnt_umounting, to_restore);
+ }
+out:
+ return progress;
+}
+
+static void umount_list(struct list_head *to_umount,
+ struct list_head *to_restore)
+{
+ struct mount *mnt, *child, *tmp;
+ list_for_each_entry(mnt, to_umount, mnt_list) {
+ list_for_each_entry_safe(child, tmp, &mnt->mnt_mounts, mnt_child) {
+ /* topper? */
+ if (child->mnt_mountpoint == mnt->mnt.mnt_root)
+ list_move_tail(&child->mnt_umounting, to_restore);
+ else
+ umount_one(child, to_umount);
}
}
}
+static void restore_mounts(struct list_head *to_restore)
+{
+ /* Restore mounts to a clean working state */
+ while (!list_empty(to_restore)) {
+ struct mount *mnt, *parent;
+ struct mountpoint *mp;
+
+ mnt = list_first_entry(to_restore, struct mount, mnt_umounting);
+ CLEAR_MNT_MARK(mnt);
+ list_del_init(&mnt->mnt_umounting);
+
+ /* Should this mount be reparented? */
+ mp = mnt->mnt_mp;
+ parent = mnt->mnt_parent;
+ while (parent->mnt.mnt_flags & MNT_UMOUNT) {
+ mp = parent->mnt_mp;
+ parent = parent->mnt_parent;
+ }
+ if (parent != mnt->mnt_parent)
+ mnt_change_mountpoint(parent, mp, mnt);
+ }
+}
+
+static void cleanup_umount_visitations(struct list_head *visited)
+{
+ while (!list_empty(visited)) {
+ struct mount *mnt =
+ list_first_entry(visited, struct mount, mnt_umounting);
+ list_del_init(&mnt->mnt_umounting);
+ }
+}
+
/*
* collect all mounts that receive propagation from the mount in @list,
* and return these additional mounts in the same list.
@@ -487,12 +544,69 @@
int propagate_umount(struct list_head *list)
{
struct mount *mnt;
+ LIST_HEAD(to_restore);
+ LIST_HEAD(to_umount);
+ LIST_HEAD(visited);
- list_for_each_entry_reverse(mnt, list, mnt_list)
- mark_umount_candidates(mnt);
+ /* Find candidates for unmounting */
+ list_for_each_entry_reverse(mnt, list, mnt_list) {
+ struct mount *parent = mnt->mnt_parent;
+ struct mount *m;
- list_for_each_entry(mnt, list, mnt_list)
- __propagate_umount(mnt);
+ /*
+ * If this mount has already been visited it is known that it's
+ * entire peer group and all of their slaves in the propagation
+ * tree for the mountpoint has already been visited and there is
+ * no need to visit them again.
+ */
+ if (!list_empty(&mnt->mnt_umounting))
+ continue;
+
+ list_add_tail(&mnt->mnt_umounting, &visited);
+ for (m = propagation_next(parent, parent); m;
+ m = propagation_next(m, parent)) {
+ struct mount *child = __lookup_mnt(&m->mnt,
+ mnt->mnt_mountpoint);
+ if (!child)
+ continue;
+
+ if (!list_empty(&child->mnt_umounting)) {
+ /*
+ * If the child has already been visited it is
+ * know that it's entire peer group and all of
+ * their slaves in the propgation tree for the
+ * mountpoint has already been visited and there
+ * is no need to visit this subtree again.
+ */
+ m = skip_propagation_subtree(m, parent);
+ continue;
+ } else if (child->mnt.mnt_flags & MNT_UMOUNT) {
+ /*
+ * We have come accross an partially unmounted
+ * mount in list that has not been visited yet.
+ * Remember it has been visited and continue
+ * about our merry way.
+ */
+ list_add_tail(&child->mnt_umounting, &visited);
+ continue;
+ }
+
+ /* Check the child and parents while progress is made */
+ while (__propagate_umount(child,
+ &to_umount, &to_restore)) {
+ /* Is the parent a umount candidate? */
+ child = child->mnt_parent;
+ if (list_empty(&child->mnt_umounting))
+ break;
+ }
+ }
+ }
+
+ umount_list(&to_umount, &to_restore);
+ restore_mounts(&to_restore);
+ cleanup_umount_visitations(&visited);
+ list_splice_tail(&to_umount, list);
+
return 0;
}
diff --git a/fs/pnode.h b/fs/pnode.h
index f41fc9a..a9a6576 100644
--- a/fs/pnode.h
+++ b/fs/pnode.h
@@ -55,4 +55,5 @@
struct mount *copy_tree(struct mount *, struct dentry *, int);
bool is_path_reachable(struct mount *, struct dentry *,
const struct path *root);
+int count_mounts(struct mnt_namespace *ns, struct mount *mnt);
#endif /* _LINUX_PNODE_H */
diff --git a/fs/proc/base.c b/fs/proc/base.c
index c45c756..cc0c952 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -87,6 +87,7 @@
#include <linux/slab.h>
#include <linux/flex_array.h>
#include <linux/posix-timers.h>
+#include <linux/cpufreq.h>
#ifdef CONFIG_HARDWALL
#include <asm/hardwall.h>
#endif
@@ -2860,6 +2861,7 @@
#ifdef CONFIG_PROC_PAGE_MONITOR
REG("clear_refs", S_IWUSR, proc_clear_refs_operations),
REG("smaps", S_IRUGO, proc_pid_smaps_operations),
+ REG("smaps_rollup", S_IRUGO, proc_pid_smaps_rollup_operations),
REG("pagemap", S_IRUSR, proc_pagemap_operations),
#endif
#ifdef CONFIG_SECURITY
@@ -2912,6 +2914,9 @@
REG("timers", S_IRUGO, proc_timers_operations),
#endif
REG("timerslack_ns", S_IRUGO|S_IWUGO, proc_pid_set_timerslack_ns_operations),
+#ifdef CONFIG_CPU_FREQ_STAT
+ ONE("time_in_state", 0444, proc_time_in_state_show),
+#endif
};
static int proc_tgid_base_readdir(struct file *file, struct dir_context *ctx)
@@ -3153,6 +3158,8 @@
iter.tgid += 1, iter = next_tgid(ns, iter)) {
char name[PROC_NUMBUF];
int len;
+
+ cond_resched();
if (!has_pid_permissions(ns, iter.task, 2))
continue;
@@ -3249,6 +3256,7 @@
#ifdef CONFIG_PROC_PAGE_MONITOR
REG("clear_refs", S_IWUSR, proc_clear_refs_operations),
REG("smaps", S_IRUGO, proc_tid_smaps_operations),
+ REG("smaps_rollup", S_IRUGO, proc_pid_smaps_rollup_operations),
REG("pagemap", S_IRUSR, proc_pagemap_operations),
#endif
#ifdef CONFIG_SECURITY
@@ -3294,6 +3302,9 @@
REG("projid_map", S_IRUGO|S_IWUSR, proc_projid_map_operations),
REG("setgroups", S_IRUGO|S_IWUSR, proc_setgroups_operations),
#endif
+#ifdef CONFIG_CPU_FREQ_STAT
+ ONE("time_in_state", 0444, proc_time_in_state_show),
+#endif
};
static int proc_tid_base_readdir(struct file *file, struct dir_context *ctx)
diff --git a/fs/proc/generic.c b/fs/proc/generic.c
index ff3ffc7..3773335 100644
--- a/fs/proc/generic.c
+++ b/fs/proc/generic.c
@@ -469,6 +469,7 @@
ent->data = NULL;
ent->proc_fops = NULL;
ent->proc_iops = NULL;
+ parent->nlink++;
if (proc_register(parent, ent) < 0) {
kfree(ent);
parent->nlink--;
diff --git a/fs/proc/internal.h b/fs/proc/internal.h
index ef2b015..3448a194 100644
--- a/fs/proc/internal.h
+++ b/fs/proc/internal.h
@@ -276,10 +276,12 @@
/*
* task_[no]mmu.c
*/
+struct mem_size_stats;
struct proc_maps_private {
struct inode *inode;
struct task_struct *task;
struct mm_struct *mm;
+ struct mem_size_stats *rollup;
#ifdef CONFIG_MMU
struct vm_area_struct *tail_vma;
#endif
@@ -295,6 +297,7 @@
extern const struct file_operations proc_pid_numa_maps_operations;
extern const struct file_operations proc_tid_numa_maps_operations;
extern const struct file_operations proc_pid_smaps_operations;
+extern const struct file_operations proc_pid_smaps_rollup_operations;
extern const struct file_operations proc_tid_smaps_operations;
extern const struct file_operations proc_clear_refs_operations;
extern const struct file_operations proc_pagemap_operations;
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index a9ebf5b..6df5b28 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -290,6 +290,7 @@
if (priv->mm)
mmdrop(priv->mm);
+ kfree(priv->rollup);
return seq_release_private(inode, file);
}
@@ -325,6 +326,23 @@
return stack;
}
+static void show_vma_header_prefix(struct seq_file *m,
+ unsigned long start, unsigned long end,
+ vm_flags_t flags, unsigned long long pgoff,
+ dev_t dev, unsigned long ino)
+{
+ seq_setwidth(m, 25 + sizeof(void *) * 6 - 1);
+ seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu ",
+ start,
+ end,
+ flags & VM_READ ? 'r' : '-',
+ flags & VM_WRITE ? 'w' : '-',
+ flags & VM_EXEC ? 'x' : '-',
+ flags & VM_MAYSHARE ? 's' : 'p',
+ pgoff,
+ MAJOR(dev), MINOR(dev), ino);
+}
+
static void
show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
{
@@ -348,17 +366,7 @@
/* We don't show the stack guard page in /proc/maps */
start = vma->vm_start;
end = vma->vm_end;
-
- seq_setwidth(m, 25 + sizeof(void *) * 6 - 1);
- seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu ",
- start,
- end,
- flags & VM_READ ? 'r' : '-',
- flags & VM_WRITE ? 'w' : '-',
- flags & VM_EXEC ? 'x' : '-',
- flags & VM_MAYSHARE ? 's' : 'p',
- pgoff,
- MAJOR(dev), MINOR(dev), ino);
+ show_vma_header_prefix(m, start, end, flags, pgoff, dev, ino);
/*
* Print the dentry name for named mappings, and a
@@ -483,6 +491,7 @@
#ifdef CONFIG_PROC_PAGE_MONITOR
struct mem_size_stats {
+ bool first;
unsigned long resident;
unsigned long shared_clean;
unsigned long shared_dirty;
@@ -494,7 +503,9 @@
unsigned long swap;
unsigned long shared_hugetlb;
unsigned long private_hugetlb;
+ unsigned long first_vma_start;
u64 pss;
+ u64 pss_locked;
u64 swap_pss;
};
@@ -704,69 +715,103 @@
static int show_smap(struct seq_file *m, void *v, int is_pid)
{
+ struct proc_maps_private *priv = m->private;
struct vm_area_struct *vma = v;
- struct mem_size_stats mss;
+ struct mem_size_stats mss_stack;
+ struct mem_size_stats *mss;
struct mm_walk smaps_walk = {
.pmd_entry = smaps_pte_range,
#ifdef CONFIG_HUGETLB_PAGE
.hugetlb_entry = smaps_hugetlb_range,
#endif
.mm = vma->vm_mm,
- .private = &mss,
};
+ int ret = 0;
+ bool rollup_mode;
+ bool last_vma;
- memset(&mss, 0, sizeof mss);
+ if (priv->rollup) {
+ rollup_mode = true;
+ mss = priv->rollup;
+ if (mss->first) {
+ mss->first_vma_start = vma->vm_start;
+ mss->first = false;
+ }
+ last_vma = !m_next_vma(priv, vma);
+ } else {
+ rollup_mode = false;
+ memset(&mss_stack, 0, sizeof mss_stack);
+ mss = &mss_stack;
+ }
+
+ smaps_walk.private = mss;
/* mmap_sem is held in m_start */
walk_page_vma(vma, &smaps_walk);
+ if (vma->vm_flags & VM_LOCKED)
+ mss->pss_locked += mss->pss;
- show_map_vma(m, vma, is_pid);
+ if (!rollup_mode) {
+ show_map_vma(m, vma, is_pid);
+ } else if (last_vma) {
+ show_vma_header_prefix(
+ m, mss->first_vma_start, vma->vm_end, 0, 0, 0, 0);
+ seq_pad(m, ' ');
+ seq_puts(m, "[rollup]\n");
+ } else {
+ ret = SEQ_SKIP;
+ }
- if (vma_get_anon_name(vma)) {
+ if (!rollup_mode && vma_get_anon_name(vma)) {
seq_puts(m, "Name: ");
seq_print_vma_name(m, vma);
seq_putc(m, '\n');
}
- seq_printf(m,
- "Size: %8lu kB\n"
- "Rss: %8lu kB\n"
- "Pss: %8lu kB\n"
- "Shared_Clean: %8lu kB\n"
- "Shared_Dirty: %8lu kB\n"
- "Private_Clean: %8lu kB\n"
- "Private_Dirty: %8lu kB\n"
- "Referenced: %8lu kB\n"
- "Anonymous: %8lu kB\n"
- "AnonHugePages: %8lu kB\n"
- "Shared_Hugetlb: %8lu kB\n"
- "Private_Hugetlb: %7lu kB\n"
- "Swap: %8lu kB\n"
- "SwapPss: %8lu kB\n"
- "KernelPageSize: %8lu kB\n"
- "MMUPageSize: %8lu kB\n"
- "Locked: %8lu kB\n",
- (vma->vm_end - vma->vm_start) >> 10,
- mss.resident >> 10,
- (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
- mss.shared_clean >> 10,
- mss.shared_dirty >> 10,
- mss.private_clean >> 10,
- mss.private_dirty >> 10,
- mss.referenced >> 10,
- mss.anonymous >> 10,
- mss.anonymous_thp >> 10,
- mss.shared_hugetlb >> 10,
- mss.private_hugetlb >> 10,
- mss.swap >> 10,
- (unsigned long)(mss.swap_pss >> (10 + PSS_SHIFT)),
- vma_kernel_pagesize(vma) >> 10,
- vma_mmu_pagesize(vma) >> 10,
- (vma->vm_flags & VM_LOCKED) ?
- (unsigned long)(mss.pss >> (10 + PSS_SHIFT)) : 0);
+ if (!rollup_mode)
+ seq_printf(m,
+ "Size: %8lu kB\n"
+ "KernelPageSize: %8lu kB\n"
+ "MMUPageSize: %8lu kB\n",
+ (vma->vm_end - vma->vm_start) >> 10,
+ vma_kernel_pagesize(vma) >> 10,
+ vma_mmu_pagesize(vma) >> 10);
- show_smap_vma_flags(m, vma);
+
+ if (!rollup_mode || last_vma)
+ seq_printf(m,
+ "Rss: %8lu kB\n"
+ "Pss: %8lu kB\n"
+ "Shared_Clean: %8lu kB\n"
+ "Shared_Dirty: %8lu kB\n"
+ "Private_Clean: %8lu kB\n"
+ "Private_Dirty: %8lu kB\n"
+ "Referenced: %8lu kB\n"
+ "Anonymous: %8lu kB\n"
+ "AnonHugePages: %8lu kB\n"
+ "Shared_Hugetlb: %8lu kB\n"
+ "Private_Hugetlb: %7lu kB\n"
+ "Swap: %8lu kB\n"
+ "SwapPss: %8lu kB\n"
+ "Locked: %8lu kB\n",
+ mss->resident >> 10,
+ (unsigned long)(mss->pss >> (10 + PSS_SHIFT)),
+ mss->shared_clean >> 10,
+ mss->shared_dirty >> 10,
+ mss->private_clean >> 10,
+ mss->private_dirty >> 10,
+ mss->referenced >> 10,
+ mss->anonymous >> 10,
+ mss->anonymous_thp >> 10,
+ mss->shared_hugetlb >> 10,
+ mss->private_hugetlb >> 10,
+ mss->swap >> 10,
+ (unsigned long)(mss->swap_pss >> (10 + PSS_SHIFT)),
+ (unsigned long)(mss->pss_locked >> (10 + PSS_SHIFT)));
+
+ if (!rollup_mode)
+ show_smap_vma_flags(m, vma);
m_cache_vma(m, vma);
- return 0;
+ return ret;
}
static int show_pid_smap(struct seq_file *m, void *v)
@@ -798,6 +843,24 @@
return do_maps_open(inode, file, &proc_pid_smaps_op);
}
+static int pid_smaps_rollup_open(struct inode *inode, struct file *file)
+{
+ struct seq_file *seq;
+ struct proc_maps_private *priv;
+ int ret = do_maps_open(inode, file, &proc_pid_smaps_op);
+ if (ret < 0)
+ return ret;
+ seq = file->private_data;
+ priv = seq->private;
+ priv->rollup = kzalloc(sizeof (*priv->rollup), GFP_KERNEL);
+ if (!priv->rollup) {
+ proc_map_release(inode, file);
+ return -ENOMEM;
+ }
+ priv->rollup->first = true;
+ return 0;
+}
+
static int tid_smaps_open(struct inode *inode, struct file *file)
{
return do_maps_open(inode, file, &proc_tid_smaps_op);
@@ -810,6 +873,13 @@
.release = proc_map_release,
};
+const struct file_operations proc_pid_smaps_rollup_operations = {
+ .open = pid_smaps_rollup_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = proc_map_release,
+};
+
const struct file_operations proc_tid_smaps_operations = {
.open = tid_smaps_open,
.read = seq_read,
@@ -863,7 +933,14 @@
static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma,
unsigned long addr, pmd_t *pmdp)
{
- pmd_t pmd = pmdp_huge_get_and_clear(vma->vm_mm, addr, pmdp);
+ pmd_t pmd = *pmdp;
+
+ /* See comment in change_huge_pmd() */
+ pmdp_invalidate(vma, addr, pmdp);
+ if (pmd_dirty(*pmdp))
+ pmd = pmd_mkdirty(pmd);
+ if (pmd_young(*pmdp))
+ pmd = pmd_mkyoung(pmd);
pmd = pmd_wrprotect(pmd);
pmd = pmd_clear_soft_dirty(pmd);
diff --git a/fs/pstore/inode.c b/fs/pstore/inode.c
index a8cc988..f7d73d8 100644
--- a/fs/pstore/inode.c
+++ b/fs/pstore/inode.c
@@ -36,7 +36,7 @@
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/uaccess.h>
-#include <linux/syslog.h>
+#include <linux/vmalloc.h>
#include <linux/htc_debug_tools.h>
#include "internal.h"
@@ -122,18 +122,6 @@
.show = pstore_ftrace_seq_show,
};
-static int pstore_check_syslog_permissions(struct pstore_private *ps)
-{
- switch (ps->type) {
- case PSTORE_TYPE_DMESG:
- case PSTORE_TYPE_CONSOLE:
- return check_syslog_permissions(SYSLOG_ACTION_READ_ALL,
- SYSLOG_FROM_READER);
- default:
- return 0;
- }
-}
-
static ssize_t pstore_file_read(struct file *file, char __user *userbuf,
size_t count, loff_t *ppos)
{
@@ -156,10 +144,6 @@
int err;
const struct seq_operations *sops = NULL;
- err = pstore_check_syslog_permissions(ps);
- if (err)
- return err;
-
if (ps->type == PSTORE_TYPE_FTRACE)
sops = &pstore_ftrace_seq_ops;
@@ -196,11 +180,6 @@
static int pstore_unlink(struct inode *dir, struct dentry *dentry)
{
struct pstore_private *p = d_inode(dentry)->i_private;
- int err;
-
- err = pstore_check_syslog_permissions(p);
- if (err)
- return err;
if (p->psi->erase)
p->psi->erase(p->type, p->id, p->count,
@@ -221,7 +200,7 @@
spin_lock_irqsave(&allpstore_lock, flags);
list_del(&p->list);
spin_unlock_irqrestore(&allpstore_lock, flags);
- kfree(p);
+ vfree(p);
}
}
@@ -333,7 +312,7 @@
goto fail;
inode->i_mode = S_IFREG | 0444;
inode->i_fop = &pstore_file_operations;
- private = kmalloc(sizeof *private + size, GFP_KERNEL);
+ private = vmalloc(sizeof *private + size);
if (!private)
goto fail_alloc;
private->type = type;
@@ -410,7 +389,7 @@
fail_lockedalloc:
mutex_unlock(&d_inode(root)->i_mutex);
- kfree(private);
+ vfree(private);
fail_alloc:
iput(inode);
@@ -437,7 +416,7 @@
inode = pstore_get_inode(sb);
if (inode) {
- inode->i_mode = S_IFDIR | 0755;
+ inode->i_mode = S_IFDIR | 0750;
inode->i_op = &pstore_dir_inode_operations;
inode->i_fop = &simple_dir_operations;
inc_nlink(inode);
diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c
index 6cd155b..4b93766 100644
--- a/fs/pstore/platform.c
+++ b/fs/pstore/platform.c
@@ -36,6 +36,7 @@
#include <linux/hardirq.h>
#include <linux/jiffies.h>
#include <linux/workqueue.h>
+#include <linux/vmalloc.h>
#include <linux/htc_debug_tools.h>
#include "internal.h"
@@ -581,7 +582,7 @@
big_oops_buf_sz);
if (unzipped_len > 0) {
- kfree(buf);
+ vfree(buf);
buf = big_oops_buf;
size = unzipped_len;
compressed = false;
@@ -596,7 +597,7 @@
compressed, (size_t)size, time, psi);
if (unzipped_len < 0) {
/* Free buffer other than big oops */
- kfree(buf);
+ vfree(buf);
buf = NULL;
} else
unzipped_len = -1;
diff --git a/fs/pstore/ram.c b/fs/pstore/ram.c
index c3cf780..1c745f2 100644
--- a/fs/pstore/ram.c
+++ b/fs/pstore/ram.c
@@ -384,7 +384,7 @@
/* ECC correction notice */
ecc_notice_size = persistent_ram_ecc_string(prz, NULL, 0);
- *buf = kmalloc(size + ecc_notice_size + 1, GFP_KERNEL);
+ *buf = vmalloc(size + ecc_notice_size + 1);
if (*buf == NULL)
return -ENOMEM;
@@ -666,7 +666,7 @@
cxt->przs[i] = persistent_ram_new(*paddr, *alt_paddr,
cxt->record_size, 0,
&cxt->ecc_info,
- cxt->memtype);
+ cxt->memtype, 0);
if (IS_ERR(cxt->przs[i])) {
err = PTR_ERR(cxt->przs[i]);
dev_err(dev, "failed to request mem region (0x%zx@0x%llx): %d\n",
@@ -707,7 +707,7 @@
}
*prz = persistent_ram_new(*paddr, *alt_paddr, sz, sig, &cxt->ecc_info,
- cxt->memtype);
+ cxt->memtype, 0);
if (IS_ERR(*prz)) {
int err = PTR_ERR(*prz);
diff --git a/fs/pstore/ram_core.c b/fs/pstore/ram_core.c
index 129008b..3a68b4c 100644
--- a/fs/pstore/ram_core.c
+++ b/fs/pstore/ram_core.c
@@ -50,16 +50,15 @@
return atomic_read(&buffer->start);
}
-static DEFINE_RAW_SPINLOCK(buffer_lock);
-
/* increase and wrap the start pointer, returning the old value */
static size_t buffer_start_add(struct persistent_ram_zone *prz, size_t a)
{
int old;
int new;
- unsigned long flags;
+ unsigned long flags = 0;
- raw_spin_lock_irqsave(&buffer_lock, flags);
+ if (!(prz->flags & PRZ_FLAG_NO_LOCK))
+ raw_spin_lock_irqsave(&prz->buffer_lock, flags);
old = atomic_read(&prz->buffer->start);
new = old + a;
@@ -67,7 +66,8 @@
new -= prz->buffer_size;
atomic_set(&prz->buffer->start, new);
- raw_spin_unlock_irqrestore(&buffer_lock, flags);
+ if (!(prz->flags & PRZ_FLAG_NO_LOCK))
+ raw_spin_unlock_irqrestore(&prz->buffer_lock, flags);
return old;
}
@@ -77,9 +77,10 @@
{
size_t old;
size_t new;
- unsigned long flags;
+ unsigned long flags = 0;
- raw_spin_lock_irqsave(&buffer_lock, flags);
+ if (!(prz->flags & PRZ_FLAG_NO_LOCK))
+ raw_spin_lock_irqsave(&prz->buffer_lock, flags);
old = atomic_read(&prz->buffer->size);
if (old == prz->buffer_size)
@@ -91,7 +92,8 @@
atomic_set(&prz->buffer->size, new);
exit:
- raw_spin_unlock_irqrestore(&buffer_lock, flags);
+ if (!(prz->flags & PRZ_FLAG_NO_LOCK))
+ raw_spin_unlock_irqrestore(&prz->buffer_lock, flags);
}
static void notrace persistent_ram_encode_rs8(struct persistent_ram_zone *prz,
@@ -305,7 +307,7 @@
persistent_ram_free_old(prz);
if (!prz->old_log) {
persistent_ram_ecc_old(prz, use_alt);
- prz->old_log = kmalloc(size, GFP_KERNEL);
+ prz->old_log = vmalloc(size);
prz->old_log_alloc_size = size;
}
if (!prz->old_log) {
@@ -392,7 +394,7 @@
void persistent_ram_free_old(struct persistent_ram_zone *prz)
{
- kfree(prz->old_log);
+ vfree(prz->old_log);
prz->old_log = NULL;
prz->old_log_size = 0;
prz->old_log_alloc_size = 0;
@@ -550,7 +552,8 @@
buffer->sig);
}
- buffer->sig = sig;
+ /* Rewind missing or invalid memory area. */
+ prz->buffer->sig = sig;
persistent_ram_zap(prz, 0);
return 0;
@@ -587,7 +590,7 @@
struct persistent_ram_zone *persistent_ram_new(phys_addr_t start,
phys_addr_t alt_start, size_t size,
u32 sig, struct persistent_ram_ecc_info *ecc_info,
- unsigned int memtype)
+ unsigned int memtype, u32 flags)
{
struct persistent_ram_zone *prz;
int ret = -ENOMEM;
@@ -598,6 +601,10 @@
goto err;
}
+ /* Initialize general buffer state. */
+ raw_spin_lock_init(&prz->buffer_lock);
+ prz->flags = flags;
+
ret = persistent_ram_buffer_map(start, size, prz, memtype);
if (ret)
goto err;
diff --git a/fs/reiserfs/file.c b/fs/reiserfs/file.c
index 8f5ccdf..3818730 100644
--- a/fs/reiserfs/file.c
+++ b/fs/reiserfs/file.c
@@ -189,7 +189,7 @@
int ret = 0;
th.t_trans_id = 0;
- blocksize = 1 << inode->i_blkbits;
+ blocksize = i_blocksize(inode);
if (logit) {
reiserfs_write_lock(s);
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
index 3d8e7e6..60ba350 100644
--- a/fs/reiserfs/inode.c
+++ b/fs/reiserfs/inode.c
@@ -524,7 +524,7 @@
* referenced in convert_tail_for_hole() that may be called from
* reiserfs_get_block()
*/
- bh_result->b_size = (1 << inode->i_blkbits);
+ bh_result->b_size = i_blocksize(inode);
ret = reiserfs_get_block(inode, iblock, bh_result,
create | GET_BLOCK_NO_DANGLE);
diff --git a/fs/romfs/super.c b/fs/romfs/super.c
index 268733c..5f4f188 100644
--- a/fs/romfs/super.c
+++ b/fs/romfs/super.c
@@ -74,6 +74,7 @@
#include <linux/highmem.h>
#include <linux/pagemap.h>
#include <linux/uaccess.h>
+#include <linux/major.h>
#include "internal.h"
static struct kmem_cache *romfs_inode_cachep;
@@ -415,7 +416,22 @@
static int romfs_statfs(struct dentry *dentry, struct kstatfs *buf)
{
struct super_block *sb = dentry->d_sb;
- u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
+ u64 id = 0;
+
+ /* When calling huge_encode_dev(),
+ * use sb->s_bdev->bd_dev when,
+ * - CONFIG_ROMFS_ON_BLOCK defined
+ * use sb->s_dev when,
+ * - CONFIG_ROMFS_ON_BLOCK undefined and
+ * - CONFIG_ROMFS_ON_MTD defined
+ * leave id as 0 when,
+ * - CONFIG_ROMFS_ON_BLOCK undefined and
+ * - CONFIG_ROMFS_ON_MTD undefined
+ */
+ if (sb->s_bdev)
+ id = huge_encode_dev(sb->s_bdev->bd_dev);
+ else if (sb->s_dev)
+ id = huge_encode_dev(sb->s_dev);
buf->f_type = ROMFS_MAGIC;
buf->f_namelen = ROMFS_MAXFN;
@@ -488,6 +504,11 @@
sb->s_flags |= MS_RDONLY | MS_NOATIME;
sb->s_op = &romfs_super_ops;
+#ifdef CONFIG_ROMFS_ON_MTD
+ /* Use same dev ID from the underlying mtdblock device */
+ if (sb->s_mtd)
+ sb->s_dev = MKDEV(MTD_BLOCK_MAJOR, sb->s_mtd->index);
+#endif
/* read the image superblock and check it */
rsb = kmalloc(512, GFP_KERNEL);
if (!rsb)
diff --git a/fs/sdcardfs/derived_perm.c b/fs/sdcardfs/derived_perm.c
index 85a60fb..e0b74dc 100644
--- a/fs/sdcardfs/derived_perm.c
+++ b/fs/sdcardfs/derived_perm.c
@@ -176,6 +176,9 @@
gid_t gid = sbi->options.fs_low_gid;
struct iattr newattrs;
+ if (!sbi->options.gid_derivation)
+ return;
+
info = SDCARDFS_I(d_inode(dentry));
info_d = info->data;
perm = info_d->perm;
diff --git a/fs/sdcardfs/inode.c b/fs/sdcardfs/inode.c
index 103dc45..a799716 100644
--- a/fs/sdcardfs/inode.c
+++ b/fs/sdcardfs/inode.c
@@ -34,10 +34,15 @@
if (!cred)
return NULL;
- if (data->under_obb)
- uid = AID_MEDIA_OBB;
- else
- uid = multiuser_get_uid(data->userid, sbi->options.fs_low_uid);
+ if (sbi->options.gid_derivation) {
+ if (data->under_obb)
+ uid = AID_MEDIA_OBB;
+ else
+ uid = multiuser_get_uid(data->userid,
+ sbi->options.fs_low_uid);
+ } else {
+ uid = sbi->options.fs_low_uid;
+ }
cred->fsuid = make_kuid(&init_user_ns, uid);
cred->fsgid = make_kgid(&init_user_ns, sbi->options.fs_low_gid);
diff --git a/fs/sdcardfs/main.c b/fs/sdcardfs/main.c
index 8b189ee..e990f23 100644
--- a/fs/sdcardfs/main.c
+++ b/fs/sdcardfs/main.c
@@ -32,6 +32,7 @@
Opt_multiuser,
Opt_userid,
Opt_reserved_mb,
+ Opt_gid_derivation,
Opt_err,
};
@@ -43,6 +44,7 @@
{Opt_mask, "mask=%u"},
{Opt_userid, "userid=%d"},
{Opt_multiuser, "multiuser"},
+ {Opt_gid_derivation, "derive_gid"},
{Opt_reserved_mb, "reserved_mb=%u"},
{Opt_err, NULL}
};
@@ -64,6 +66,8 @@
vfsopts->gid = 0;
/* by default, 0MB is reserved */
opts->reserved_mb = 0;
+ /* by default, gid derivation is off */
+ opts->gid_derivation = false;
*debug = 0;
@@ -115,6 +119,9 @@
return 0;
opts->reserved_mb = option;
break;
+ case Opt_gid_derivation:
+ opts->gid_derivation = true;
+ break;
/* unknown option */
default:
if (!silent)
diff --git a/fs/sdcardfs/sdcardfs.h b/fs/sdcardfs/sdcardfs.h
index 31d37d7..56ff135 100644
--- a/fs/sdcardfs/sdcardfs.h
+++ b/fs/sdcardfs/sdcardfs.h
@@ -219,6 +219,7 @@
gid_t fs_low_gid;
userid_t fs_user_id;
bool multiuser;
+ bool gid_derivation;
unsigned int reserved_mb;
};
diff --git a/fs/sdcardfs/super.c b/fs/sdcardfs/super.c
index 7f4539b..b89947d 100644
--- a/fs/sdcardfs/super.c
+++ b/fs/sdcardfs/super.c
@@ -302,6 +302,8 @@
seq_printf(m, ",mask=%u", vfsopts->mask);
if (opts->fs_user_id)
seq_printf(m, ",userid=%u", opts->fs_user_id);
+ if (opts->gid_derivation)
+ seq_puts(m, ",derive_gid");
if (opts->reserved_mb != 0)
seq_printf(m, ",reserved=%uMB", opts->reserved_mb);
diff --git a/fs/seq_file.c b/fs/seq_file.c
index d672e2f..6dc4296 100644
--- a/fs/seq_file.c
+++ b/fs/seq_file.c
@@ -72,9 +72,10 @@
mutex_init(&p->lock);
p->op = op;
-#ifdef CONFIG_USER_NS
- p->user_ns = file->f_cred->user_ns;
-#endif
+
+ // No refcounting: the lifetime of 'p' is constrained
+ // to the lifetime of the file.
+ p->file = file;
/*
* Wrappers around seq_open(e.g. swaps_open) need to be
diff --git a/fs/squashfs/lz4_wrapper.c b/fs/squashfs/lz4_wrapper.c
index c31e2bc..95da653 100644
--- a/fs/squashfs/lz4_wrapper.c
+++ b/fs/squashfs/lz4_wrapper.c
@@ -97,7 +97,6 @@
struct squashfs_lz4 *stream = strm;
void *buff = stream->input, *data;
int avail, i, bytes = length, res;
- size_t dest_len = output->length;
for (i = 0; i < b; i++) {
avail = min(bytes, msblk->devblksize - offset);
@@ -108,27 +107,28 @@
put_bh(bh[i]);
}
- res = lz4_decompress_unknownoutputsize(stream->input, length,
- stream->output, &dest_len);
- if (res)
+ res = LZ4_decompress_safe(stream->input, stream->output,
+ length, output->length);
+
+ if (res < 0)
return -EIO;
- bytes = dest_len;
+ bytes = res;
data = squashfs_first_page(output);
buff = stream->output;
while (data) {
- if (bytes <= PAGE_CACHE_SIZE) {
+ if (bytes <= PAGE_SIZE) {
memcpy(data, buff, bytes);
break;
}
- memcpy(data, buff, PAGE_CACHE_SIZE);
- buff += PAGE_CACHE_SIZE;
- bytes -= PAGE_CACHE_SIZE;
+ memcpy(data, buff, PAGE_SIZE);
+ buff += PAGE_SIZE;
+ bytes -= PAGE_SIZE;
data = squashfs_next_page(output);
}
squashfs_finish_page(output);
- return dest_len;
+ return res;
}
const struct squashfs_decompressor squashfs_lz4_comp_ops = {
diff --git a/fs/stat.c b/fs/stat.c
index d4a61d8..004dd77 100644
--- a/fs/stat.c
+++ b/fs/stat.c
@@ -31,7 +31,7 @@
stat->atime = inode->i_atime;
stat->mtime = inode->i_mtime;
stat->ctime = inode->i_ctime;
- stat->blksize = (1 << inode->i_blkbits);
+ stat->blksize = i_blocksize(inode);
stat->blocks = inode->i_blocks;
}
@@ -454,6 +454,7 @@
inode->i_bytes -= 512;
}
}
+EXPORT_SYMBOL(__inode_add_bytes);
void inode_add_bytes(struct inode *inode, loff_t bytes)
{
diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
index b803213..39c75a86 100644
--- a/fs/sysfs/file.c
+++ b/fs/sysfs/file.c
@@ -108,7 +108,7 @@
{
const struct sysfs_ops *ops = sysfs_file_ops(of->kn);
struct kobject *kobj = of->kn->parent->priv;
- size_t len;
+ ssize_t len;
/*
* If buf != of->prealloc_buf, we don't know how
@@ -117,13 +117,15 @@
if (WARN_ON_ONCE(buf != of->prealloc_buf))
return 0;
len = ops->show(kobj, of->kn->priv, buf);
+ if (len < 0)
+ return len;
if (pos) {
if (len <= pos)
return 0;
len -= pos;
memmove(buf, buf + pos, len);
}
- return min(count, len);
+ return min_t(ssize_t, count, len);
}
/* kernfs write callback for regular sysfs files */
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
index 566df9b..0e659d9 100644
--- a/fs/udf/inode.c
+++ b/fs/udf/inode.c
@@ -1206,7 +1206,7 @@
{
int err;
struct udf_inode_info *iinfo;
- int bsize = 1 << inode->i_blkbits;
+ int bsize = i_blocksize(inode);
if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
S_ISLNK(inode->i_mode)))
@@ -1235,8 +1235,8 @@
return err;
}
set_size:
- truncate_setsize(inode, newsize);
up_write(&iinfo->i_data_sem);
+ truncate_setsize(inode, newsize);
} else {
if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
down_write(&iinfo->i_data_sem);
@@ -1253,9 +1253,9 @@
udf_get_block);
if (err)
return err;
+ truncate_setsize(inode, newsize);
down_write(&iinfo->i_data_sem);
udf_clear_extent_cache(inode);
- truncate_setsize(inode, newsize);
udf_truncate_extents(inode);
up_write(&iinfo->i_data_sem);
}
diff --git a/fs/ufs/balloc.c b/fs/ufs/balloc.c
index dc5fae6..637e17cb 100644
--- a/fs/ufs/balloc.c
+++ b/fs/ufs/balloc.c
@@ -81,7 +81,8 @@
ufs_error (sb, "ufs_free_fragments",
"bit already cleared for fragment %u", i);
}
-
+
+ inode_sub_bytes(inode, count << uspi->s_fshift);
fs32_add(sb, &ucg->cg_cs.cs_nffree, count);
uspi->cs_total.cs_nffree += count;
fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, count);
@@ -183,6 +184,7 @@
ufs_error(sb, "ufs_free_blocks", "freeing free fragment");
}
ubh_setblock(UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
+ inode_sub_bytes(inode, uspi->s_fpb << uspi->s_fshift);
if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
ufs_clusteracct (sb, ucpi, blkno, 1);
@@ -494,6 +496,20 @@
return 0;
}
+static bool try_add_frags(struct inode *inode, unsigned frags)
+{
+ unsigned size = frags * i_blocksize(inode);
+ spin_lock(&inode->i_lock);
+ __inode_add_bytes(inode, size);
+ if (unlikely((u32)inode->i_blocks != inode->i_blocks)) {
+ __inode_sub_bytes(inode, size);
+ spin_unlock(&inode->i_lock);
+ return false;
+ }
+ spin_unlock(&inode->i_lock);
+ return true;
+}
+
static u64 ufs_add_fragments(struct inode *inode, u64 fragment,
unsigned oldcount, unsigned newcount)
{
@@ -530,6 +546,9 @@
for (i = oldcount; i < newcount; i++)
if (ubh_isclr (UCPI_UBH(ucpi), ucpi->c_freeoff, fragno + i))
return 0;
+
+ if (!try_add_frags(inode, count))
+ return 0;
/*
* Block can be extended
*/
@@ -647,6 +666,7 @@
ubh_setbit (UCPI_UBH(ucpi), ucpi->c_freeoff, goal + i);
i = uspi->s_fpb - count;
+ inode_sub_bytes(inode, i << uspi->s_fshift);
fs32_add(sb, &ucg->cg_cs.cs_nffree, i);
uspi->cs_total.cs_nffree += i;
fs32_add(sb, &UFS_SB(sb)->fs_cs(cgno).cs_nffree, i);
@@ -657,6 +677,8 @@
result = ufs_bitmap_search (sb, ucpi, goal, allocsize);
if (result == INVBLOCK)
return 0;
+ if (!try_add_frags(inode, count))
+ return 0;
for (i = 0; i < count; i++)
ubh_clrbit (UCPI_UBH(ucpi), ucpi->c_freeoff, result + i);
@@ -716,6 +738,8 @@
return INVBLOCK;
ucpi->c_rotor = result;
gotit:
+ if (!try_add_frags(inode, uspi->s_fpb))
+ return 0;
blkno = ufs_fragstoblks(result);
ubh_clrblock (UCPI_UBH(ucpi), ucpi->c_freeoff, blkno);
if ((UFS_SB(sb)->s_flags & UFS_CG_MASK) == UFS_CG_44BSD)
diff --git a/fs/ufs/inode.c b/fs/ufs/inode.c
index a064cf44..1f69bb9b 100644
--- a/fs/ufs/inode.c
+++ b/fs/ufs/inode.c
@@ -235,7 +235,8 @@
p = ufs_get_direct_data_ptr(uspi, ufsi, block);
tmp = ufs_new_fragments(inode, p, lastfrag, ufs_data_ptr_to_cpu(sb, p),
- new_size, err, locked_page);
+ new_size - (lastfrag & uspi->s_fpbmask), err,
+ locked_page);
return tmp != 0;
}
@@ -284,7 +285,7 @@
goal += uspi->s_fpb;
}
tmp = ufs_new_fragments(inode, p, ufs_blknum(new_fragment),
- goal, uspi->s_fpb, err, locked_page);
+ goal, nfrags, err, locked_page);
if (!tmp) {
*err = -ENOSPC;
@@ -402,7 +403,9 @@
if (!create) {
phys64 = ufs_frag_map(inode, offsets, depth);
- goto out;
+ if (phys64)
+ map_bh(bh_result, sb, phys64 + frag);
+ return 0;
}
/* This code entered only while writing ....? */
diff --git a/fs/ufs/super.c b/fs/ufs/super.c
index f6390ee..10f3644 100644
--- a/fs/ufs/super.c
+++ b/fs/ufs/super.c
@@ -746,6 +746,23 @@
return;
}
+static u64 ufs_max_bytes(struct super_block *sb)
+{
+ struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
+ int bits = uspi->s_apbshift;
+ u64 res;
+
+ if (bits > 21)
+ res = ~0ULL;
+ else
+ res = UFS_NDADDR + (1LL << bits) + (1LL << (2*bits)) +
+ (1LL << (3*bits));
+
+ if (res >= (MAX_LFS_FILESIZE >> uspi->s_bshift))
+ return MAX_LFS_FILESIZE;
+ return res << uspi->s_bshift;
+}
+
static int ufs_fill_super(struct super_block *sb, void *data, int silent)
{
struct ufs_sb_info * sbi;
@@ -1212,6 +1229,7 @@
"fast symlink size (%u)\n", uspi->s_maxsymlinklen);
uspi->s_maxsymlinklen = maxsymlen;
}
+ sb->s_maxbytes = ufs_max_bytes(sb);
sb->s_max_links = UFS_LINK_MAX;
inode = ufs_iget(sb, UFS_ROOTINO);
diff --git a/fs/ufs/util.h b/fs/ufs/util.h
index 9541759..3f9463f 100644
--- a/fs/ufs/util.h
+++ b/fs/ufs/util.h
@@ -473,15 +473,19 @@
static inline int _ubh_isblockset_(struct ufs_sb_private_info * uspi,
struct ufs_buffer_head * ubh, unsigned begin, unsigned block)
{
+ u8 mask;
switch (uspi->s_fpb) {
case 8:
return (*ubh_get_addr (ubh, begin + block) == 0xff);
case 4:
- return (*ubh_get_addr (ubh, begin + (block >> 1)) == (0x0f << ((block & 0x01) << 2)));
+ mask = 0x0f << ((block & 0x01) << 2);
+ return (*ubh_get_addr (ubh, begin + (block >> 1)) & mask) == mask;
case 2:
- return (*ubh_get_addr (ubh, begin + (block >> 2)) == (0x03 << ((block & 0x03) << 1)));
+ mask = 0x03 << ((block & 0x03) << 1);
+ return (*ubh_get_addr (ubh, begin + (block >> 2)) & mask) == mask;
case 1:
- return (*ubh_get_addr (ubh, begin + (block >> 3)) == (0x01 << (block & 0x07)));
+ mask = 0x01 << (block & 0x07);
+ return (*ubh_get_addr (ubh, begin + (block >> 3)) & mask) == mask;
}
return 0;
}
diff --git a/fs/xattr.c b/fs/xattr.c
index 9b932b9..f0da9d2 100644
--- a/fs/xattr.c
+++ b/fs/xattr.c
@@ -442,7 +442,7 @@
size = XATTR_SIZE_MAX;
kvalue = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
if (!kvalue) {
- vvalue = vmalloc(size);
+ vvalue = vzalloc(size);
if (!vvalue)
return -ENOMEM;
kvalue = vvalue;
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index 119c242..75884ae 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -2179,8 +2179,10 @@
}
temp = xfs_bmap_worst_indlen(bma->ip, temp);
temp2 = xfs_bmap_worst_indlen(bma->ip, temp2);
- diff = (int)(temp + temp2 - startblockval(PREV.br_startblock) -
- (bma->cur ? bma->cur->bc_private.b.allocated : 0));
+ diff = (int)(temp + temp2 -
+ (startblockval(PREV.br_startblock) -
+ (bma->cur ?
+ bma->cur->bc_private.b.allocated : 0)));
if (diff > 0) {
error = xfs_mod_fdblocks(bma->ip->i_mount,
-((int64_t)diff), false);
@@ -2232,7 +2234,6 @@
temp = da_new;
if (bma->cur)
temp += bma->cur->bc_private.b.allocated;
- ASSERT(temp <= da_old);
if (temp < da_old)
xfs_mod_fdblocks(bma->ip->i_mount,
(int64_t)(da_old - temp), false);
diff --git a/fs/xfs/libxfs/xfs_btree.c b/fs/xfs/libxfs/xfs_btree.c
index af1bbee..28bc5e7 100644
--- a/fs/xfs/libxfs/xfs_btree.c
+++ b/fs/xfs/libxfs/xfs_btree.c
@@ -4064,7 +4064,7 @@
xfs_btree_readahead_ptr(cur, ptr, 1);
/* save for the next iteration of the loop */
- lptr = *ptr;
+ xfs_btree_copy_ptrs(cur, &lptr, ptr, 1);
}
/* for each buffer in the level */
diff --git a/fs/xfs/libxfs/xfs_inode_buf.c b/fs/xfs/libxfs/xfs_inode_buf.c
index 1aabfda..7183b7e 100644
--- a/fs/xfs/libxfs/xfs_inode_buf.c
+++ b/fs/xfs/libxfs/xfs_inode_buf.c
@@ -299,6 +299,14 @@
if (dip->di_magic != cpu_to_be16(XFS_DINODE_MAGIC))
return false;
+ /* don't allow invalid i_size */
+ if (be64_to_cpu(dip->di_size) & (1ULL << 63))
+ return false;
+
+ /* No zero-length symlinks. */
+ if (S_ISLNK(be16_to_cpu(dip->di_mode)) && dip->di_size == 0)
+ return false;
+
/* only version 3 or greater inodes are extensively verified here */
if (dip->di_version < 3)
return true;
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index 29e7e5dd..a9063ac 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -288,7 +288,7 @@
{
struct xfs_inode *ip = XFS_I(inode);
struct xfs_mount *mp = ip->i_mount;
- ssize_t count = 1 << inode->i_blkbits;
+ ssize_t count = i_blocksize(inode);
xfs_fileoff_t offset_fsb, end_fsb;
int error = 0;
int bmapi_flags = XFS_BMAPI_ENTIRE;
@@ -921,7 +921,7 @@
break;
}
next_buffer:
- offset += 1 << inode->i_blkbits;
+ offset += i_blocksize(inode);
} while ((bh = bh->b_this_page) != head);
@@ -1363,7 +1363,7 @@
offset + mapping_size >= i_size_read(inode)) {
/* limit mapping to block that spans EOF */
mapping_size = roundup_64(i_size_read(inode) - offset,
- 1 << inode->i_blkbits);
+ i_blocksize(inode));
}
if (mapping_size > LONG_MAX)
mapping_size = LONG_MAX;
@@ -1395,7 +1395,7 @@
return -EIO;
offset = (xfs_off_t)iblock << inode->i_blkbits;
- ASSERT(bh_result->b_size >= (1 << inode->i_blkbits));
+ ASSERT(bh_result->b_size >= i_blocksize(inode));
size = bh_result->b_size;
if (!create && direct && offset >= i_size_read(inode))
@@ -1426,6 +1426,26 @@
if (error)
goto out_unlock;
+ /*
+ * The only time we can ever safely find delalloc blocks on direct I/O
+ * is a dio write to post-eof speculative preallocation. All other
+ * scenarios are indicative of a problem or misuse (such as mixing
+ * direct and mapped I/O).
+ *
+ * The file may be unmapped by the time we get here so we cannot
+ * reliably fail the I/O based on mapping. Instead, fail the I/O if this
+ * is a read or a write within eof. Otherwise, carry on but warn as a
+ * precuation if the file happens to be mapped.
+ */
+ if (direct && imap.br_startblock == DELAYSTARTBLOCK) {
+ if (!create || offset < i_size_read(VFS_I(ip))) {
+ WARN_ON_ONCE(1);
+ error = -EIO;
+ goto out_unlock;
+ }
+ WARN_ON_ONCE(mapping_mapped(VFS_I(ip)->i_mapping));
+ }
+
/* for DAX, we convert unwritten extents directly */
if (create &&
(!nimaps ||
@@ -1525,7 +1545,6 @@
set_buffer_new(bh_result);
if (imap.br_startblock == DELAYSTARTBLOCK) {
- BUG_ON(direct);
if (create) {
set_buffer_uptodate(bh_result);
set_buffer_mapped(bh_result);
@@ -1968,7 +1987,7 @@
if (offset < end_offset)
set_buffer_dirty(bh);
bh = bh->b_this_page;
- offset += 1 << inode->i_blkbits;
+ offset += i_blocksize(inode);
} while (bh != head);
}
/*
diff --git a/fs/xfs/xfs_attr.h b/fs/xfs/xfs_attr.h
index dd48245..2343312 100644
--- a/fs/xfs/xfs_attr.h
+++ b/fs/xfs/xfs_attr.h
@@ -112,6 +112,7 @@
*========================================================================*/
+/* Return 0 on success, or -errno; other state communicated via *context */
typedef int (*put_listent_func_t)(struct xfs_attr_list_context *, int,
unsigned char *, int, int, unsigned char *);
diff --git a/fs/xfs/xfs_attr_list.c b/fs/xfs/xfs_attr_list.c
index 4fa1482..c8be331 100644
--- a/fs/xfs/xfs_attr_list.c
+++ b/fs/xfs/xfs_attr_list.c
@@ -108,16 +108,14 @@
(int)sfe->namelen,
(int)sfe->valuelen,
&sfe->nameval[sfe->namelen]);
-
+ if (error)
+ return error;
/*
* Either search callback finished early or
* didn't fit it all in the buffer after all.
*/
if (context->seen_enough)
break;
-
- if (error)
- return error;
sfe = XFS_ATTR_SF_NEXTENTRY(sfe);
}
trace_xfs_attr_list_sf_all(context);
@@ -581,7 +579,7 @@
trace_xfs_attr_list_full(context);
alist->al_more = 1;
context->seen_enough = 1;
- return 1;
+ return 0;
}
aep = (attrlist_ent_t *)&context->alist[context->firstu];
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
index dbae6490..863e1bf 100644
--- a/fs/xfs/xfs_bmap_util.c
+++ b/fs/xfs/xfs_bmap_util.c
@@ -682,7 +682,7 @@
* extents.
*/
if (map[i].br_startblock == DELAYSTARTBLOCK &&
- map[i].br_startoff <= XFS_B_TO_FSB(mp, XFS_ISIZE(ip)))
+ map[i].br_startoff < XFS_B_TO_FSB(mp, XFS_ISIZE(ip)))
ASSERT((iflags & BMV_IF_DELALLOC) != 0);
if (map[i].br_startblock == HOLESTARTBLOCK &&
@@ -1713,6 +1713,7 @@
xfs_trans_t *tp;
xfs_bstat_t *sbp = &sxp->sx_stat;
xfs_ifork_t *tempifp, *ifp, *tifp;
+ xfs_extnum_t nextents;
int src_log_flags, target_log_flags;
int error = 0;
int aforkblks = 0;
@@ -1899,7 +1900,8 @@
* pointer. Otherwise it's already NULL or
* pointing to the extent.
*/
- if (ip->i_d.di_nextents <= XFS_INLINE_EXTS) {
+ nextents = ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
+ if (nextents <= XFS_INLINE_EXTS) {
ifp->if_u1.if_extents =
ifp->if_u2.if_inline_ext;
}
@@ -1918,7 +1920,8 @@
* pointer. Otherwise it's already NULL or
* pointing to the extent.
*/
- if (tip->i_d.di_nextents <= XFS_INLINE_EXTS) {
+ nextents = tip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
+ if (nextents <= XFS_INLINE_EXTS) {
tifp->if_u1.if_extents =
tifp->if_u2.if_inline_ext;
}
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index eb1b8c8..dcb7096 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -375,6 +375,7 @@
out_free_pages:
for (i = 0; i < bp->b_page_count; i++)
__free_page(bp->b_pages[i]);
+ bp->b_flags &= ~_XBF_PAGES;
return error;
}
@@ -978,6 +979,8 @@
xfs_buf_unlock(
struct xfs_buf *bp)
{
+ ASSERT(xfs_buf_islocked(bp));
+
XB_CLEAR_OWNER(bp);
up(&bp->b_sema);
@@ -1712,6 +1715,28 @@
}
/*
+ * Cancel a delayed write list.
+ *
+ * Remove each buffer from the list, clear the delwri queue flag and drop the
+ * associated buffer reference.
+ */
+void
+xfs_buf_delwri_cancel(
+ struct list_head *list)
+{
+ struct xfs_buf *bp;
+
+ while (!list_empty(list)) {
+ bp = list_first_entry(list, struct xfs_buf, b_list);
+
+ xfs_buf_lock(bp);
+ bp->b_flags &= ~_XBF_DELWRI_Q;
+ list_del_init(&bp->b_list);
+ xfs_buf_relse(bp);
+ }
+}
+
+/*
* Add a buffer to the delayed write list.
*
* This queues a buffer for writeout if it hasn't already been. Note that
diff --git a/fs/xfs/xfs_buf.h b/fs/xfs/xfs_buf.h
index c75721a..149bbd4 100644
--- a/fs/xfs/xfs_buf.h
+++ b/fs/xfs/xfs_buf.h
@@ -304,6 +304,7 @@
extern void *xfs_buf_offset(struct xfs_buf *, size_t);
/* Delayed Write Buffer Routines */
+extern void xfs_buf_delwri_cancel(struct list_head *);
extern bool xfs_buf_delwri_queue(struct xfs_buf *, struct list_head *);
extern int xfs_buf_delwri_submit(struct list_head *);
extern int xfs_buf_delwri_submit_nowait(struct list_head *);
diff --git a/fs/xfs/xfs_dir2_readdir.c b/fs/xfs/xfs_dir2_readdir.c
index 642d55d..2fbf643f 100644
--- a/fs/xfs/xfs_dir2_readdir.c
+++ b/fs/xfs/xfs_dir2_readdir.c
@@ -406,6 +406,7 @@
/*
* Do we need more readahead?
+ * Each loop tries to process 1 full dir blk; last may be partial.
*/
blk_start_plug(&plug);
for (mip->ra_index = mip->ra_offset = i = 0;
@@ -416,7 +417,8 @@
* Read-ahead a contiguous directory block.
*/
if (i > mip->ra_current &&
- map[mip->ra_index].br_blockcount >= geo->fsbcount) {
+ (map[mip->ra_index].br_blockcount - mip->ra_offset) >=
+ geo->fsbcount) {
xfs_dir3_data_readahead(dp,
map[mip->ra_index].br_startoff + mip->ra_offset,
XFS_FSB_TO_DADDR(dp->i_mount,
@@ -437,14 +439,19 @@
}
/*
- * Advance offset through the mapping table.
+ * Advance offset through the mapping table, processing a full
+ * dir block even if it is fragmented into several extents.
+ * But stop if we have consumed all valid mappings, even if
+ * it's not yet a full directory block.
*/
- for (j = 0; j < geo->fsbcount; j += length ) {
+ for (j = 0;
+ j < geo->fsbcount && mip->ra_index < mip->map_valid;
+ j += length ) {
/*
* The rest of this extent but not more than a dir
* block.
*/
- length = min_t(int, geo->fsbcount,
+ length = min_t(int, geo->fsbcount - j,
map[mip->ra_index].br_blockcount -
mip->ra_offset);
mip->ra_offset += length;
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index f5392ab..3dd4730 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -947,7 +947,7 @@
if (error)
goto out_unlock;
} else if (mode & FALLOC_FL_COLLAPSE_RANGE) {
- unsigned blksize_mask = (1 << inode->i_blkbits) - 1;
+ unsigned int blksize_mask = i_blocksize(inode) - 1;
if (offset & blksize_mask || len & blksize_mask) {
error = -EINVAL;
@@ -969,7 +969,7 @@
if (error)
goto out_unlock;
} else if (mode & FALLOC_FL_INSERT_RANGE) {
- unsigned blksize_mask = (1 << inode->i_blkbits) - 1;
+ unsigned int blksize_mask = i_blocksize(inode) - 1;
new_size = i_size_read(inode) + len;
if (offset & blksize_mask || len & blksize_mask) {
@@ -1208,7 +1208,7 @@
unsigned nr_pages;
unsigned int i;
- want = min_t(pgoff_t, end - index, PAGEVEC_SIZE);
+ want = min_t(pgoff_t, end - index, PAGEVEC_SIZE - 1) + 1;
nr_pages = pagevec_lookup(&pvec, inode->i_mapping, index,
want);
/*
@@ -1235,17 +1235,6 @@
break;
}
- /*
- * At lease we found one page. If this is the first time we
- * step into the loop, and if the first page index offset is
- * greater than the given search offset, a hole was found.
- */
- if (type == HOLE_OFF && lastoff == startoff &&
- lastoff < page_offset(pvec.pages[0])) {
- found = true;
- break;
- }
-
for (i = 0; i < nr_pages; i++) {
struct page *page = pvec.pages[i];
loff_t b_offset;
@@ -1257,18 +1246,18 @@
* file mapping. However, page->index will not change
* because we have a reference on the page.
*
- * Searching done if the page index is out of range.
- * If the current offset is not reaches the end of
- * the specified search range, there should be a hole
- * between them.
+ * If current page offset is beyond where we've ended,
+ * we've found a hole.
*/
- if (page->index > end) {
- if (type == HOLE_OFF && lastoff < endoff) {
- *offset = lastoff;
- found = true;
- }
+ if (type == HOLE_OFF && lastoff < endoff &&
+ lastoff < page_offset(pvec.pages[i])) {
+ found = true;
+ *offset = lastoff;
goto out;
}
+ /* Searching done if the page index is out of range. */
+ if (page->index > end)
+ goto out;
lock_page(page);
/*
diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c
index d7a490f..adbc1f5 100644
--- a/fs/xfs/xfs_icache.c
+++ b/fs/xfs/xfs_icache.c
@@ -210,14 +210,17 @@
error = inode_init_always(mp->m_super, inode);
if (error) {
+ bool wake;
/*
* Re-initializing the inode failed, and we are in deep
* trouble. Try to re-add it to the reclaim list.
*/
rcu_read_lock();
spin_lock(&ip->i_flags_lock);
-
+ wake = !!__xfs_iflags_test(ip, XFS_INEW);
ip->i_flags &= ~(XFS_INEW | XFS_IRECLAIM);
+ if (wake)
+ wake_up_bit(&ip->i_flags, __XFS_INEW_BIT);
ASSERT(ip->i_flags & XFS_IRECLAIMABLE);
trace_xfs_iget_reclaim_fail(ip);
goto out_error;
@@ -363,6 +366,22 @@
return error;
}
+static void
+xfs_inew_wait(
+ struct xfs_inode *ip)
+{
+ wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_INEW_BIT);
+ DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_INEW_BIT);
+
+ do {
+ prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE);
+ if (!xfs_iflags_test(ip, XFS_INEW))
+ break;
+ schedule();
+ } while (true);
+ finish_wait(wq, &wait.wait);
+}
+
/*
* Look up an inode by number in the given file system.
* The inode is looked up in the cache held in each AG.
@@ -467,9 +486,11 @@
STATIC int
xfs_inode_ag_walk_grab(
- struct xfs_inode *ip)
+ struct xfs_inode *ip,
+ int flags)
{
struct inode *inode = VFS_I(ip);
+ bool newinos = !!(flags & XFS_AGITER_INEW_WAIT);
ASSERT(rcu_read_lock_held());
@@ -487,7 +508,8 @@
goto out_unlock_noent;
/* avoid new or reclaimable inodes. Leave for reclaim code to flush */
- if (__xfs_iflags_test(ip, XFS_INEW | XFS_IRECLAIMABLE | XFS_IRECLAIM))
+ if ((!newinos && __xfs_iflags_test(ip, XFS_INEW)) ||
+ __xfs_iflags_test(ip, XFS_IRECLAIMABLE | XFS_IRECLAIM))
goto out_unlock_noent;
spin_unlock(&ip->i_flags_lock);
@@ -515,7 +537,8 @@
void *args),
int flags,
void *args,
- int tag)
+ int tag,
+ int iter_flags)
{
uint32_t first_index;
int last_error = 0;
@@ -557,7 +580,7 @@
for (i = 0; i < nr_found; i++) {
struct xfs_inode *ip = batch[i];
- if (done || xfs_inode_ag_walk_grab(ip))
+ if (done || xfs_inode_ag_walk_grab(ip, iter_flags))
batch[i] = NULL;
/*
@@ -585,6 +608,9 @@
for (i = 0; i < nr_found; i++) {
if (!batch[i])
continue;
+ if ((iter_flags & XFS_AGITER_INEW_WAIT) &&
+ xfs_iflags_test(batch[i], XFS_INEW))
+ xfs_inew_wait(batch[i]);
error = execute(batch[i], flags, args);
IRELE(batch[i]);
if (error == -EAGAIN) {
@@ -637,12 +663,13 @@
}
int
-xfs_inode_ag_iterator(
+xfs_inode_ag_iterator_flags(
struct xfs_mount *mp,
int (*execute)(struct xfs_inode *ip, int flags,
void *args),
int flags,
- void *args)
+ void *args,
+ int iter_flags)
{
struct xfs_perag *pag;
int error = 0;
@@ -652,7 +679,8 @@
ag = 0;
while ((pag = xfs_perag_get(mp, ag))) {
ag = pag->pag_agno + 1;
- error = xfs_inode_ag_walk(mp, pag, execute, flags, args, -1);
+ error = xfs_inode_ag_walk(mp, pag, execute, flags, args, -1,
+ iter_flags);
xfs_perag_put(pag);
if (error) {
last_error = error;
@@ -664,6 +692,17 @@
}
int
+xfs_inode_ag_iterator(
+ struct xfs_mount *mp,
+ int (*execute)(struct xfs_inode *ip, int flags,
+ void *args),
+ int flags,
+ void *args)
+{
+ return xfs_inode_ag_iterator_flags(mp, execute, flags, args, 0);
+}
+
+int
xfs_inode_ag_iterator_tag(
struct xfs_mount *mp,
int (*execute)(struct xfs_inode *ip, int flags,
@@ -680,7 +719,8 @@
ag = 0;
while ((pag = xfs_perag_get_tag(mp, ag, tag))) {
ag = pag->pag_agno + 1;
- error = xfs_inode_ag_walk(mp, pag, execute, flags, args, tag);
+ error = xfs_inode_ag_walk(mp, pag, execute, flags, args, tag,
+ 0);
xfs_perag_put(pag);
if (error) {
last_error = error;
diff --git a/fs/xfs/xfs_icache.h b/fs/xfs/xfs_icache.h
index 62f1f91..147a792 100644
--- a/fs/xfs/xfs_icache.h
+++ b/fs/xfs/xfs_icache.h
@@ -48,6 +48,11 @@
#define XFS_IGET_UNTRUSTED 0x2
#define XFS_IGET_DONTCACHE 0x4
+/*
+ * flags for AG inode iterator
+ */
+#define XFS_AGITER_INEW_WAIT 0x1 /* wait on new inodes */
+
int xfs_iget(struct xfs_mount *mp, struct xfs_trans *tp, xfs_ino_t ino,
uint flags, uint lock_flags, xfs_inode_t **ipp);
@@ -72,6 +77,9 @@
int xfs_inode_ag_iterator(struct xfs_mount *mp,
int (*execute)(struct xfs_inode *ip, int flags, void *args),
int flags, void *args);
+int xfs_inode_ag_iterator_flags(struct xfs_mount *mp,
+ int (*execute)(struct xfs_inode *ip, int flags, void *args),
+ int flags, void *args, int iter_flags);
int xfs_inode_ag_iterator_tag(struct xfs_mount *mp,
int (*execute)(struct xfs_inode *ip, int flags, void *args),
int flags, void *args, int tag);
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
index ca9e119..ae1a498 100644
--- a/fs/xfs/xfs_inode.h
+++ b/fs/xfs/xfs_inode.h
@@ -208,7 +208,8 @@
#define XFS_IRECLAIM (1 << 0) /* started reclaiming this inode */
#define XFS_ISTALE (1 << 1) /* inode has been staled */
#define XFS_IRECLAIMABLE (1 << 2) /* inode can be reclaimed */
-#define XFS_INEW (1 << 3) /* inode has just been allocated */
+#define __XFS_INEW_BIT 3 /* inode has just been allocated */
+#define XFS_INEW (1 << __XFS_INEW_BIT)
#define XFS_ITRUNCATED (1 << 5) /* truncated down so flush-on-close */
#define XFS_IDIRTY_RELEASE (1 << 6) /* dirty release already seen */
#define __XFS_IFLOCK_BIT 7 /* inode is being flushed right now */
@@ -453,6 +454,7 @@
xfs_iflags_clear(ip, XFS_INEW);
barrier();
unlock_new_inode(VFS_I(ip));
+ wake_up_bit(&ip->i_flags, __XFS_INEW_BIT);
}
static inline void xfs_setup_existing_inode(struct xfs_inode *ip)
diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
index d42738d..e4a4f82 100644
--- a/fs/xfs/xfs_ioctl.c
+++ b/fs/xfs/xfs_ioctl.c
@@ -403,6 +403,7 @@
{
int error = -ENOMEM;
attrlist_cursor_kern_t *cursor;
+ struct xfs_fsop_attrlist_handlereq __user *p = arg;
xfs_fsop_attrlist_handlereq_t al_hreq;
struct dentry *dentry;
char *kbuf;
@@ -435,6 +436,11 @@
if (error)
goto out_kfree;
+ if (copy_to_user(&p->pos, cursor, sizeof(attrlist_cursor_kern_t))) {
+ error = -EFAULT;
+ goto out_kfree;
+ }
+
if (copy_to_user(al_hreq.buffer, kbuf, al_hreq.buflen))
error = -EFAULT;
@@ -1379,10 +1385,11 @@
unsigned int cmd,
void __user *arg)
{
- struct getbmapx bmx;
+ struct getbmapx bmx = { 0 };
int error;
- if (copy_from_user(&bmx, arg, sizeof(struct getbmapx)))
+ /* struct getbmap is a strict subset of struct getbmapx. */
+ if (copy_from_user(&bmx, arg, offsetof(struct getbmapx, bmv_iflags)))
return -EFAULT;
if (bmx.bmv_count < 2)
diff --git a/fs/xfs/xfs_linux.h b/fs/xfs/xfs_linux.h
index ec0e239..201aae0 100644
--- a/fs/xfs/xfs_linux.h
+++ b/fs/xfs/xfs_linux.h
@@ -369,7 +369,14 @@
#endif /* DEBUG */
#ifdef CONFIG_XFS_RT
-#define XFS_IS_REALTIME_INODE(ip) ((ip)->i_d.di_flags & XFS_DIFLAG_REALTIME)
+
+/*
+ * make sure we ignore the inode flag if the filesystem doesn't have a
+ * configured realtime device.
+ */
+#define XFS_IS_REALTIME_INODE(ip) \
+ (((ip)->i_d.di_flags & XFS_DIFLAG_REALTIME) && \
+ (ip)->i_mount->m_rtdev_targp)
#else
#define XFS_IS_REALTIME_INODE(ip) (0)
#endif
diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c
index 532ab79d..572b64a 100644
--- a/fs/xfs/xfs_qm.c
+++ b/fs/xfs/xfs_qm.c
@@ -1355,12 +1355,7 @@
mp->m_qflags |= flags;
error_return:
- while (!list_empty(&buffer_list)) {
- struct xfs_buf *bp =
- list_first_entry(&buffer_list, struct xfs_buf, b_list);
- list_del_init(&bp->b_list);
- xfs_buf_relse(bp);
- }
+ xfs_buf_delwri_cancel(&buffer_list);
if (error) {
xfs_warn(mp,
diff --git a/fs/xfs/xfs_qm_syscalls.c b/fs/xfs/xfs_qm_syscalls.c
index 3640c6e..4d33444 100644
--- a/fs/xfs/xfs_qm_syscalls.c
+++ b/fs/xfs/xfs_qm_syscalls.c
@@ -764,5 +764,6 @@
uint flags)
{
ASSERT(mp->m_quotainfo);
- xfs_inode_ag_iterator(mp, xfs_dqrele_inode, flags, NULL);
+ xfs_inode_ag_iterator_flags(mp, xfs_dqrele_inode, flags, NULL,
+ XFS_AGITER_INEW_WAIT);
}
diff --git a/fs/xfs/xfs_xattr.c b/fs/xfs/xfs_xattr.c
index 839b35c..9beaf19 100644
--- a/fs/xfs/xfs_xattr.c
+++ b/fs/xfs/xfs_xattr.c
@@ -180,7 +180,8 @@
arraytop = context->count + prefix_len + namelen + 1;
if (arraytop > context->firstu) {
context->count = -1; /* insufficient space */
- return 1;
+ context->seen_enough = 1;
+ return 0;
}
offset = (char *)context->alist + context->count;
strncpy(offset, xfs_xattr_prefix(flags), prefix_len);
@@ -222,12 +223,15 @@
}
ssize_t
-xfs_vn_listxattr(struct dentry *dentry, char *data, size_t size)
+xfs_vn_listxattr(
+ struct dentry *dentry,
+ char *data,
+ size_t size)
{
struct xfs_attr_list_context context;
struct attrlist_cursor_kern cursor = { 0 };
- struct inode *inode = d_inode(dentry);
- int error;
+ struct inode *inode = d_inode(dentry);
+ int error;
/*
* First read the regular on-disk attributes.
@@ -245,7 +249,9 @@
else
context.put_listent = xfs_xattr_put_listent_sizes;
- xfs_attr_list_int(&context);
+ error = xfs_attr_list_int(&context);
+ if (error)
+ return error;
if (context.count < 0)
return -ERANGE;
diff --git a/include/asm-generic/topology.h b/include/asm-generic/topology.h
index fc824e2..5d2add1 100644
--- a/include/asm-generic/topology.h
+++ b/include/asm-generic/topology.h
@@ -48,7 +48,11 @@
#define parent_node(node) ((void)(node),0)
#endif
#ifndef cpumask_of_node
-#define cpumask_of_node(node) ((void)node, cpu_online_mask)
+ #ifdef CONFIG_NEED_MULTIPLE_NODES
+ #define cpumask_of_node(node) ((node) == 0 ? cpu_online_mask : cpu_none_mask)
+ #else
+ #define cpumask_of_node(node) ((void)node, cpu_online_mask)
+ #endif
#endif
#ifndef pcibus_to_node
#define pcibus_to_node(bus) ((void)(bus), -1)
diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h
index 476d99d..611b3d3 100644
--- a/include/crypto/internal/hash.h
+++ b/include/crypto/internal/hash.h
@@ -181,6 +181,16 @@
return crypto_alloc_instance2(name, alg, ahash_instance_headroom());
}
+static inline void ahash_request_complete(struct ahash_request *req, int err)
+{
+ req->base.complete(&req->base, err);
+}
+
+static inline u32 ahash_request_flags(struct ahash_request *req)
+{
+ return req->base.flags;
+}
+
static inline struct crypto_ahash *crypto_spawn_ahash(
struct crypto_ahash_spawn *spawn)
{
diff --git a/include/drm/ttm/ttm_object.h b/include/drm/ttm/ttm_object.h
index ed953f9..1487011 100644
--- a/include/drm/ttm/ttm_object.h
+++ b/include/drm/ttm/ttm_object.h
@@ -229,6 +229,8 @@
* @ref_type: The type of reference.
* @existed: Upon completion, indicates that an identical reference object
* already existed, and the refcount was upped on that object instead.
+ * @require_existed: Fail with -EPERM if an identical ref object didn't
+ * already exist.
*
* Checks that the base object is shareable and adds a ref object to it.
*
@@ -243,7 +245,8 @@
*/
extern int ttm_ref_object_add(struct ttm_object_file *tfile,
struct ttm_base_object *base,
- enum ttm_ref_type ref_type, bool *existed);
+ enum ttm_ref_type ref_type, bool *existed,
+ bool require_existed);
extern bool ttm_ref_object_exists(struct ttm_object_file *tfile,
struct ttm_base_object *base);
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index cb91b44..210ccc4 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -340,6 +340,26 @@
}
/**
+ * css_is_dying - test whether the specified css is dying
+ * @css: target css
+ *
+ * Test whether @css is in the process of offlining or already offline. In
+ * most cases, ->css_online() and ->css_offline() callbacks should be
+ * enough; however, the actual offline operations are RCU delayed and this
+ * test returns %true also when @css is scheduled to be offlined.
+ *
+ * This is useful, for example, when the use case requires synchronous
+ * behavior with respect to cgroup removal. cgroup removal schedules css
+ * offlining but the css can seem alive while the operation is being
+ * delayed. If the delay affects user visible semantics, this test can be
+ * used to resolve the situation.
+ */
+static inline bool css_is_dying(struct cgroup_subsys_state *css)
+{
+ return !(css->flags & CSS_NO_REF) && percpu_ref_is_dying(&css->refcnt);
+}
+
+/**
* css_put - put a css reference
* @css: target css
*
@@ -528,6 +548,25 @@
pr_cont_kernfs_path(cgrp->kn);
}
+static inline void cgroup_init_kthreadd(void)
+{
+ /*
+ * kthreadd is inherited by all kthreads, keep it in the root so
+ * that the new kthreads are guaranteed to stay in the root until
+ * initialization is finished.
+ */
+ current->no_cgroup_migration = 1;
+}
+
+static inline void cgroup_kthread_ready(void)
+{
+ /*
+ * This kthread finished initialization. The creator should have
+ * set PF_NO_SETAFFINITY if this kthread should stay in the root.
+ */
+ current->no_cgroup_migration = 0;
+}
+
#else /* !CONFIG_CGROUPS */
struct cgroup_subsys_state;
@@ -551,6 +590,8 @@
static inline int cgroup_init_early(void) { return 0; }
static inline int cgroup_init(void) { return 0; }
+static inline void cgroup_init_kthreadd(void) {}
+static inline void cgroup_kthread_ready(void) {}
#endif /* !CONFIG_CGROUPS */
diff --git a/include/linux/coredump.h b/include/linux/coredump.h
index d016a12..28ffa94 100644
--- a/include/linux/coredump.h
+++ b/include/linux/coredump.h
@@ -14,6 +14,7 @@
extern int dump_skip(struct coredump_params *cprm, size_t nr);
extern int dump_emit(struct coredump_params *cprm, const void *addr, int nr);
extern int dump_align(struct coredump_params *cprm, int align);
+extern void dump_truncate(struct coredump_params *cprm);
#ifdef CONFIG_COREDUMP
extern void do_coredump(const siginfo_t *siginfo);
#else
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 645420a..7a34ba1 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -662,6 +662,9 @@
int cpufreq_boost_enabled(void);
int cpufreq_enable_boost_support(void);
bool policy_has_boost_freq(struct cpufreq_policy *policy);
+void acct_update_power(struct task_struct *p, cputime_t cputime);
+void cpufreq_task_stats_init(struct task_struct *p);
+void cpufreq_task_stats_exit(struct task_struct *p);
#else
static inline int cpufreq_boost_trigger_state(int state)
{
@@ -705,3 +708,24 @@
unsigned long cpufreq_scale_freq_capacity(struct sched_domain *sd, int cpu);
unsigned long cpufreq_scale_max_freq_capacity(int cpu);
#endif /* _LINUX_CPUFREQ_H */
+
+/*********************************************************************
+ * CPUFREQ STATS *
+ *********************************************************************/
+
+#ifdef CONFIG_CPU_FREQ_STAT
+
+void acct_update_power(struct task_struct *p, cputime_t cputime);
+void cpufreq_task_stats_init(struct task_struct *p);
+void cpufreq_task_stats_exit(struct task_struct *p);
+void cpufreq_task_stats_remove_uids(uid_t uid_start, uid_t uid_end);
+int proc_time_in_state_show(struct seq_file *m, struct pid_namespace *ns,
+ struct pid *pid, struct task_struct *p);
+#else
+static inline void acct_update_power(struct task_struct *p,
+ cputime_t cputime) {}
+static inline void cpufreq_task_stats_init(struct task_struct *p) {}
+static inline void cpufreq_task_stats_exit(struct task_struct *p) {}
+static inline void cpufreq_task_stats_remove_uids(uid_t uid_start,
+ uid_t uid_end) {}
+#endif
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index 85a868c..8397dc2 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -16,6 +16,7 @@
#ifdef CONFIG_CPUSETS
+extern struct static_key cpusets_pre_enable_key;
extern struct static_key cpusets_enabled_key;
static inline bool cpusets_enabled(void)
{
@@ -30,12 +31,14 @@
static inline void cpuset_inc(void)
{
+ static_key_slow_inc(&cpusets_pre_enable_key);
static_key_slow_inc(&cpusets_enabled_key);
}
static inline void cpuset_dec(void)
{
static_key_slow_dec(&cpusets_enabled_key);
+ static_key_slow_dec(&cpusets_pre_enable_key);
}
extern int cpuset_init(void);
@@ -104,7 +107,7 @@
*/
static inline unsigned int read_mems_allowed_begin(void)
{
- if (!cpusets_enabled())
+ if (!static_key_false(&cpusets_pre_enable_key))
return 0;
return read_seqcount_begin(¤t->mems_allowed_seq);
@@ -118,7 +121,7 @@
*/
static inline bool read_mems_allowed_retry(unsigned int seq)
{
- if (!cpusets_enabled())
+ if (!static_key_false(&cpusets_enabled_key))
return false;
return read_seqcount_retry(¤t->mems_allowed_seq, seq);
diff --git a/include/linux/device.h b/include/linux/device.h
index 4b4e2d5..30c52d7 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -368,6 +368,7 @@
* @suspend: Used to put the device to sleep mode, usually to a low power
* state.
* @resume: Used to bring the device from the sleep mode.
+ * @shutdown: Called at shut-down time to quiesce the device.
* @ns_type: Callbacks so sysfs can detemine namespaces.
* @namespace: Namespace of the device belongs to this class.
* @pm: The default device power management operations of this class.
@@ -396,6 +397,7 @@
int (*suspend)(struct device *dev, pm_message_t state);
int (*resume)(struct device *dev);
+ int (*shutdown)(struct device *dev);
const struct kobj_ns_type_operations *ns_type;
const void *(*namespace)(struct device *dev);
diff --git a/include/linux/fcntl.h b/include/linux/fcntl.h
index 76ce329..1b48d9c 100644
--- a/include/linux/fcntl.h
+++ b/include/linux/fcntl.h
@@ -3,6 +3,12 @@
#include <uapi/linux/fcntl.h>
+/* list of all valid flags for the open/openat flags argument: */
+#define VALID_OPEN_FLAGS \
+ (O_RDONLY | O_WRONLY | O_RDWR | O_CREAT | O_EXCL | O_NOCTTY | O_TRUNC | \
+ O_APPEND | O_NDELAY | O_NONBLOCK | O_NDELAY | __O_SYNC | O_DSYNC | \
+ FASYNC | O_DIRECT | O_LARGEFILE | O_DIRECTORY | O_NOFOLLOW | \
+ O_NOATIME | O_CLOEXEC | O_PATH | __O_TMPFILE)
#ifndef force_o_largefile
#define force_o_largefile() (BITS_PER_LONG != 32)
diff --git a/include/linux/file.h b/include/linux/file.h
index f87d308..ae0f3bb 100644
--- a/include/linux/file.h
+++ b/include/linux/file.h
@@ -71,6 +71,7 @@
extern void fd_install(unsigned int fd, struct file *file);
extern void flush_delayed_fput(void);
+extern void flush_delayed_fput_wait(void);
extern void __fput_sync(struct file *);
#endif /* __LINUX_FILE_H */
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 28c8f70..caae0a8 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -689,6 +689,11 @@
void *i_private; /* fs or device private pointer */
};
+static inline unsigned int i_blocksize(const struct inode *node)
+{
+ return (1 << node->i_blkbits);
+}
+
static inline int inode_unhashed(struct inode *inode)
{
return hlist_unhashed(&inode->i_hash);
@@ -1756,6 +1761,7 @@
void *(*clone_mnt_data) (void *);
void (*copy_mnt_data) (void *, void *);
void (*umount_begin) (struct super_block *);
+ void (*umount_end) (struct super_block *, int);
int (*show_options)(struct seq_file *, struct dentry *);
int (*show_options2)(struct vfsmount *,struct seq_file *, struct dentry *);
diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
index 604e152..eb19bf2 100644
--- a/include/linux/fscache-cache.h
+++ b/include/linux/fscache-cache.h
@@ -360,6 +360,7 @@
#define FSCACHE_OBJECT_IS_AVAILABLE 5 /* T if object has become active */
#define FSCACHE_OBJECT_RETIRED 6 /* T if object was retired on relinquishment */
#define FSCACHE_OBJECT_KILLED_BY_CACHE 7 /* T if object was killed by the cache */
+#define FSCACHE_OBJECT_RUN_AFTER_DEAD 8 /* T if object has been dispatched after death */
struct list_head cache_link; /* link in cache->object_list */
struct hlist_node cookie_link; /* link in cookie->backing_objects */
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index 847cc1d..5012fcd 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -742,11 +742,9 @@
#if defined(CONFIG_BLK_DEV_INTEGRITY)
extern void blk_integrity_add(struct gendisk *);
extern void blk_integrity_del(struct gendisk *);
-extern void blk_integrity_revalidate(struct gendisk *);
#else /* CONFIG_BLK_DEV_INTEGRITY */
static inline void blk_integrity_add(struct gendisk *disk) { }
static inline void blk_integrity_del(struct gendisk *disk) { }
-static inline void blk_integrity_revalidate(struct gendisk *disk) { }
#endif /* CONFIG_BLK_DEV_INTEGRITY */
#else /* CONFIG_BLOCK */
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index 67ce5bd..19db03d 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -616,15 +616,16 @@
static inline netdev_features_t vlan_features_check(const struct sk_buff *skb,
netdev_features_t features)
{
- if (skb_vlan_tagged_multi(skb))
- features = netdev_intersect_features(features,
- NETIF_F_SG |
- NETIF_F_HIGHDMA |
- NETIF_F_FRAGLIST |
- NETIF_F_GEN_CSUM |
- NETIF_F_HW_VLAN_CTAG_TX |
- NETIF_F_HW_VLAN_STAG_TX);
-
+ if (skb_vlan_tagged_multi(skb)) {
+ /* In the case of multi-tagged packets, use a direct mask
+ * instead of using netdev_interesect_features(), to make
+ * sure that only devices supporting NETIF_F_HW_CSUM will
+ * have checksum offloading support.
+ */
+ features &= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_HW_CSUM |
+ NETIF_F_FRAGLIST | NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_STAG_TX;
+ }
return features;
}
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 1c1ff7e..021b1e9 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -15,6 +15,8 @@
#include <net/net_namespace.h>
#include <linux/sched/rt.h>
+#include <asm/thread_info.h>
+
#ifdef CONFIG_SMP
# define INIT_PUSHABLE_TASKS(tsk) \
.pushable_tasks = PLIST_NODE_INIT(tsk.pushable_tasks, MAX_PRIO),
@@ -183,14 +185,21 @@
# define INIT_KASAN(tsk)
#endif
+#ifdef CONFIG_THREAD_INFO_IN_TASK
+# define INIT_TASK_TI(tsk) .thread_info = INIT_THREAD_INFO(tsk),
+#else
+# define INIT_TASK_TI(tsk)
+#endif
+
/*
* INIT_TASK is used to set up the first task table, touch at
* your own risk!. Base=0, limit=0x1fffff (=2MB)
*/
#define INIT_TASK(tsk) \
{ \
+ INIT_TASK_TI(tsk) \
.state = 0, \
- .stack = &init_thread_info, \
+ .stack = init_stack, \
.usage = ATOMIC_INIT(2), \
.flags = PF_KTHREAD, \
.prio = MAX_PRIO-20, \
diff --git a/include/linux/kdb.h b/include/linux/kdb.h
index a19bcf9..410deca 100644
--- a/include/linux/kdb.h
+++ b/include/linux/kdb.h
@@ -177,7 +177,7 @@
static inline
int kdb_process_cpu(const struct task_struct *p)
{
- unsigned int cpu = task_thread_info(p)->cpu;
+ unsigned int cpu = task_cpu(p);
if (cpu > num_possible_cpus())
cpu = 0;
return cpu;
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index 8f68490..e233925 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -330,7 +330,9 @@
int write, void __user *buffer,
size_t *length, loff_t *ppos);
#endif
-
+extern void wait_for_kprobe_optimizer(void);
+#else
+static inline void wait_for_kprobe_optimizer(void) { }
#endif /* CONFIG_OPTPROBES */
#ifdef CONFIG_KPROBES_ON_FTRACE
extern void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index c923350..d7ce4e3 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -182,8 +182,8 @@
int len, void *val);
int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
int len, struct kvm_io_device *dev);
-int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
- struct kvm_io_device *dev);
+void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
+ struct kvm_io_device *dev);
#ifdef CONFIG_KVM_ASYNC_PF
struct kvm_async_pf {
diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h
index 782d4e8..4bc4b1b 100644
--- a/include/linux/lightnvm.h
+++ b/include/linux/lightnvm.h
@@ -310,6 +310,7 @@
{
struct ppa_addr l;
+ l.ppa = 0;
/*
* (r.ppa << X offset) & X len bitmask. X eq. blk, pg, etc.
*/
diff --git a/include/linux/list_lru.h b/include/linux/list_lru.h
index 2a6b994..743b34f5 100644
--- a/include/linux/list_lru.h
+++ b/include/linux/list_lru.h
@@ -44,6 +44,7 @@
/* for cgroup aware lrus points to per cgroup lists, otherwise NULL */
struct list_lru_memcg *memcg_lrus;
#endif
+ long nr_items;
} ____cacheline_aligned_in_smp;
struct list_lru {
diff --git a/include/linux/llist.h b/include/linux/llist.h
index c8091a3..ac67961 100644
--- a/include/linux/llist.h
+++ b/include/linux/llist.h
@@ -88,6 +88,23 @@
container_of(ptr, type, member)
/**
+ * member_address_is_nonnull - check whether the member address is not NULL
+ * @ptr: the object pointer (struct type * that contains the llist_node)
+ * @member: the name of the llist_node within the struct.
+ *
+ * This macro is conceptually the same as
+ * &ptr->member != NULL
+ * but it works around the fact that compilers can decide that taking a member
+ * address is never a NULL pointer.
+ *
+ * Real objects that start at a high address and have a member at NULL are
+ * unlikely to exist, but such pointers may be returned e.g. by the
+ * container_of() macro.
+ */
+#define member_address_is_nonnull(ptr, member) \
+ ((uintptr_t)(ptr) + offsetof(typeof(*(ptr)), member) != 0)
+
+/**
* llist_for_each - iterate over some deleted entries of a lock-less list
* @pos: the &struct llist_node to use as a loop cursor
* @node: the first entry of deleted list entries
@@ -121,7 +138,7 @@
*/
#define llist_for_each_entry(pos, node, member) \
for ((pos) = llist_entry((node), typeof(*(pos)), member); \
- (pos) != llist_entry(NULL, typeof(*(pos)), member); \
+ member_address_is_nonnull(pos, member); \
(pos) = llist_entry((pos)->member.next, typeof(*(pos)), member))
/**
@@ -143,7 +160,7 @@
*/
#define llist_for_each_entry_safe(pos, n, node, member) \
for (pos = llist_entry((node), typeof(*pos), member); \
- (pos) != llist_entry(NULL, typeof(*(pos)), member) && \
+ member_address_is_nonnull(pos, member) && \
(n = llist_entry(pos->member.next, typeof(*n), member), true); \
pos = n)
diff --git a/include/linux/log2.h b/include/linux/log2.h
index fd7ff3d..c373295f 100644
--- a/include/linux/log2.h
+++ b/include/linux/log2.h
@@ -16,12 +16,6 @@
#include <linux/bitops.h>
/*
- * deal with unrepresentable constant logarithms
- */
-extern __attribute__((const, noreturn))
-int ____ilog2_NaN(void);
-
-/*
* non-constant log of base 2 calculators
* - the arch may override these in asm/bitops.h if they can be implemented
* more efficiently than using fls() and fls64()
@@ -85,7 +79,7 @@
#define ilog2(n) \
( \
__builtin_constant_p(n) ? ( \
- (n) < 1 ? ____ilog2_NaN() : \
+ (n) < 2 ? 0 : \
(n) & (1ULL << 63) ? 63 : \
(n) & (1ULL << 62) ? 62 : \
(n) & (1ULL << 61) ? 61 : \
@@ -148,10 +142,7 @@
(n) & (1ULL << 4) ? 4 : \
(n) & (1ULL << 3) ? 3 : \
(n) & (1ULL << 2) ? 2 : \
- (n) & (1ULL << 1) ? 1 : \
- (n) & (1ULL << 0) ? 0 : \
- ____ilog2_NaN() \
- ) : \
+ 1 ) : \
(sizeof(n) <= 4) ? \
__ilog2_u32(n) : \
__ilog2_u64(n) \
@@ -203,6 +194,17 @@
* ... and so on.
*/
-#define order_base_2(n) ilog2(roundup_pow_of_two(n))
+static inline __attribute_const__
+int __order_base_2(unsigned long n)
+{
+ return n > 1 ? ilog2(n - 1) + 1 : 0;
+}
+#define order_base_2(n) \
+( \
+ __builtin_constant_p(n) ? ( \
+ ((n) == 0 || (n) == 1) ? 0 : \
+ ilog2((n) - 1) + 1) : \
+ __order_base_2(n) \
+)
#endif /* _LINUX_LOG2_H */
diff --git a/include/linux/lz4.h b/include/linux/lz4.h
index 4356686..1b0f8ca 100644
--- a/include/linux/lz4.h
+++ b/include/linux/lz4.h
@@ -1,87 +1,717 @@
-#ifndef __LZ4_H__
-#define __LZ4_H__
-/*
- * LZ4 Kernel Interface
+/* LZ4 Kernel Interface
*
* Copyright (C) 2013, LG Electronics, Kyungsik Lee <kyungsik.lee@lge.com>
+ * Copyright (C) 2016, Sven Schmidt <4sschmid@informatik.uni-hamburg.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
+ *
+ * This file is based on the original header file
+ * for LZ4 - Fast LZ compression algorithm.
+ *
+ * LZ4 - Fast LZ compression algorithm
+ * Copyright (C) 2011-2016, Yann Collet.
+ * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * You can contact the author at :
+ * - LZ4 homepage : http://www.lz4.org
+ * - LZ4 source repository : https://github.com/lz4/lz4
*/
-#define LZ4_MEM_COMPRESS (4096 * sizeof(unsigned char *))
-#define LZ4HC_MEM_COMPRESS (65538 * sizeof(unsigned char *))
+
+#ifndef __LZ4_H__
+#define __LZ4_H__
+
+#include <linux/types.h>
+#include <linux/string.h> /* memset, memcpy */
+
+/*-************************************************************************
+ * CONSTANTS
+ **************************************************************************/
+/*
+ * LZ4_MEMORY_USAGE :
+ * Memory usage formula : N->2^N Bytes
+ * (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)
+ * Increasing memory usage improves compression ratio
+ * Reduced memory usage can improve speed, due to cache effect
+ * Default value is 14, for 16KB, which nicely fits into Intel x86 L1 cache
+ */
+#define LZ4_MEMORY_USAGE 14
+
+#define LZ4_MAX_INPUT_SIZE 0x7E000000 /* 2 113 929 216 bytes */
+#define LZ4_COMPRESSBOUND(isize) (\
+ (unsigned int)(isize) > (unsigned int)LZ4_MAX_INPUT_SIZE \
+ ? 0 \
+ : (isize) + ((isize)/255) + 16)
+
+#define LZ4_ACCELERATION_DEFAULT 1
+#define LZ4_HASHLOG (LZ4_MEMORY_USAGE-2)
+#define LZ4_HASHTABLESIZE (1 << LZ4_MEMORY_USAGE)
+#define LZ4_HASH_SIZE_U32 (1 << LZ4_HASHLOG)
+
+#define LZ4HC_MIN_CLEVEL 3
+#define LZ4HC_DEFAULT_CLEVEL 9
+#define LZ4HC_MAX_CLEVEL 16
+
+#define LZ4HC_DICTIONARY_LOGSIZE 16
+#define LZ4HC_MAXD (1<<LZ4HC_DICTIONARY_LOGSIZE)
+#define LZ4HC_MAXD_MASK (LZ4HC_MAXD - 1)
+#define LZ4HC_HASH_LOG (LZ4HC_DICTIONARY_LOGSIZE - 1)
+#define LZ4HC_HASHTABLESIZE (1 << LZ4HC_HASH_LOG)
+#define LZ4HC_HASH_MASK (LZ4HC_HASHTABLESIZE - 1)
+
+/*-************************************************************************
+ * STREAMING CONSTANTS AND STRUCTURES
+ **************************************************************************/
+#define LZ4_STREAMSIZE_U64 ((1 << (LZ4_MEMORY_USAGE - 3)) + 4)
+#define LZ4_STREAMSIZE (LZ4_STREAMSIZE_U64 * sizeof(unsigned long long))
+
+#define LZ4_STREAMHCSIZE 262192
+#define LZ4_STREAMHCSIZE_SIZET (262192 / sizeof(size_t))
+
+#define LZ4_STREAMDECODESIZE_U64 4
+#define LZ4_STREAMDECODESIZE (LZ4_STREAMDECODESIZE_U64 * \
+ sizeof(unsigned long long))
/*
- * lz4_compressbound()
- * Provides the maximum size that LZ4 may output in a "worst case" scenario
- * (input data not compressible)
+ * LZ4_stream_t - information structure to track an LZ4 stream.
*/
-static inline size_t lz4_compressbound(size_t isize)
+typedef struct {
+ uint32_t hashTable[LZ4_HASH_SIZE_U32];
+ uint32_t currentOffset;
+ uint32_t initCheck;
+ const uint8_t *dictionary;
+ uint8_t *bufferStart;
+ uint32_t dictSize;
+} LZ4_stream_t_internal;
+typedef union {
+ unsigned long long table[LZ4_STREAMSIZE_U64];
+ LZ4_stream_t_internal internal_donotuse;
+} LZ4_stream_t;
+
+/*
+ * LZ4_streamHC_t - information structure to track an LZ4HC stream.
+ */
+typedef struct {
+ unsigned int hashTable[LZ4HC_HASHTABLESIZE];
+ unsigned short chainTable[LZ4HC_MAXD];
+ /* next block to continue on current prefix */
+ const unsigned char *end;
+ /* All index relative to this position */
+ const unsigned char *base;
+ /* alternate base for extDict */
+ const unsigned char *dictBase;
+ /* below that point, need extDict */
+ unsigned int dictLimit;
+ /* below that point, no more dict */
+ unsigned int lowLimit;
+ /* index from which to continue dict update */
+ unsigned int nextToUpdate;
+ unsigned int compressionLevel;
+} LZ4HC_CCtx_internal;
+typedef union {
+ size_t table[LZ4_STREAMHCSIZE_SIZET];
+ LZ4HC_CCtx_internal internal_donotuse;
+} LZ4_streamHC_t;
+
+/*
+ * LZ4_streamDecode_t - information structure to track an
+ * LZ4 stream during decompression.
+ *
+ * init this structure using LZ4_setStreamDecode (or memset()) before first use
+ */
+typedef struct {
+ const uint8_t *externalDict;
+ size_t extDictSize;
+ const uint8_t *prefixEnd;
+ size_t prefixSize;
+} LZ4_streamDecode_t_internal;
+typedef union {
+ unsigned long long table[LZ4_STREAMDECODESIZE_U64];
+ LZ4_streamDecode_t_internal internal_donotuse;
+} LZ4_streamDecode_t;
+
+/*-************************************************************************
+ * SIZE OF STATE
+ **************************************************************************/
+#define LZ4_MEM_COMPRESS LZ4_STREAMSIZE
+#define LZ4HC_MEM_COMPRESS LZ4_STREAMHCSIZE
+
+/*-************************************************************************
+ * Compression Functions
+ **************************************************************************/
+
+/**
+ * LZ4_compressBound() - Max. output size in worst case szenarios
+ * @isize: Size of the input data
+ *
+ * Return: Max. size LZ4 may output in a "worst case" szenario
+ * (data not compressible)
+ */
+static inline int LZ4_compressBound(size_t isize)
{
- return isize + (isize / 255) + 16;
+ return LZ4_COMPRESSBOUND(isize);
}
-/*
- * lz4_compress()
- * src : source address of the original data
- * src_len : size of the original data
- * dst : output buffer address of the compressed data
- * This requires 'dst' of size LZ4_COMPRESSBOUND.
- * dst_len : is the output size, which is returned after compress done
- * workmem : address of the working memory.
- * This requires 'workmem' of size LZ4_MEM_COMPRESS.
- * return : Success if return 0
- * Error if return (< 0)
- * note : Destination buffer and workmem must be already allocated with
- * the defined size.
+/**
+ * lz4_compressbound() - For backwards compatibility; see LZ4_compressBound
+ * @isize: Size of the input data
+ *
+ * Return: Max. size LZ4 may output in a "worst case" szenario
+ * (data not compressible)
*/
-int lz4_compress(const unsigned char *src, size_t src_len,
- unsigned char *dst, size_t *dst_len, void *wrkmem);
+static inline int lz4_compressbound(size_t isize)
+{
+ return LZ4_COMPRESSBOUND(isize);
+}
- /*
- * lz4hc_compress()
- * src : source address of the original data
- * src_len : size of the original data
- * dst : output buffer address of the compressed data
- * This requires 'dst' of size LZ4_COMPRESSBOUND.
- * dst_len : is the output size, which is returned after compress done
- * workmem : address of the working memory.
- * This requires 'workmem' of size LZ4HC_MEM_COMPRESS.
- * return : Success if return 0
- * Error if return (< 0)
- * note : Destination buffer and workmem must be already allocated with
- * the defined size.
- */
-int lz4hc_compress(const unsigned char *src, size_t src_len,
- unsigned char *dst, size_t *dst_len, void *wrkmem);
-
-/*
- * lz4_decompress()
- * src : source address of the compressed data
- * src_len : is the input size, whcih is returned after decompress done
- * dest : output buffer address of the decompressed data
- * actual_dest_len: is the size of uncompressed data, supposing it's known
- * return : Success if return 0
- * Error if return (< 0)
- * note : Destination buffer must be already allocated.
- * slightly faster than lz4_decompress_unknownoutputsize()
+/**
+ * LZ4_compress_default() - Compress data from source to dest
+ * @source: source address of the original data
+ * @dest: output buffer address of the compressed data
+ * @inputSize: size of the input data. Max supported value is LZ4_MAX_INPUT_SIZE
+ * @maxOutputSize: full or partial size of buffer 'dest'
+ * which must be already allocated
+ * @wrkmem: address of the working memory.
+ * This requires 'workmem' of LZ4_MEM_COMPRESS.
+ *
+ * Compresses 'sourceSize' bytes from buffer 'source'
+ * into already allocated 'dest' buffer of size 'maxOutputSize'.
+ * Compression is guaranteed to succeed if
+ * 'maxOutputSize' >= LZ4_compressBound(inputSize).
+ * It also runs faster, so it's a recommended setting.
+ * If the function cannot compress 'source' into a more limited 'dest' budget,
+ * compression stops *immediately*, and the function result is zero.
+ * As a consequence, 'dest' content is not valid.
+ *
+ * Return: Number of bytes written into buffer 'dest'
+ * (necessarily <= maxOutputSize) or 0 if compression fails
*/
-int lz4_decompress(const unsigned char *src, size_t *src_len,
- unsigned char *dest, size_t actual_dest_len);
+int LZ4_compress_default(const char *source, char *dest, int inputSize,
+ int maxOutputSize, void *wrkmem);
+
+/**
+ * LZ4_compress_fast() - As LZ4_compress_default providing an acceleration param
+ * @source: source address of the original data
+ * @dest: output buffer address of the compressed data
+ * @inputSize: size of the input data. Max supported value is LZ4_MAX_INPUT_SIZE
+ * @maxOutputSize: full or partial size of buffer 'dest'
+ * which must be already allocated
+ * @acceleration: acceleration factor
+ * @wrkmem: address of the working memory.
+ * This requires 'workmem' of LZ4_MEM_COMPRESS.
+ *
+ * Same as LZ4_compress_default(), but allows to select an "acceleration"
+ * factor. The larger the acceleration value, the faster the algorithm,
+ * but also the lesser the compression. It's a trade-off. It can be fine tuned,
+ * with each successive value providing roughly +~3% to speed.
+ * An acceleration value of "1" is the same as regular LZ4_compress_default()
+ * Values <= 0 will be replaced by LZ4_ACCELERATION_DEFAULT, which is 1.
+ *
+ * Return: Number of bytes written into buffer 'dest'
+ * (necessarily <= maxOutputSize) or 0 if compression fails
+ */
+int LZ4_compress_fast(const char *source, char *dest, int inputSize,
+ int maxOutputSize, int acceleration, void *wrkmem);
+
+/**
+ * LZ4_compress_destSize() - Compress as much data as possible
+ * from source to dest
+ * @source: source address of the original data
+ * @dest: output buffer address of the compressed data
+ * @sourceSizePtr: will be modified to indicate how many bytes where read
+ * from 'source' to fill 'dest'. New value is necessarily <= old value.
+ * @targetDestSize: Size of buffer 'dest' which must be already allocated
+ * @wrkmem: address of the working memory.
+ * This requires 'workmem' of LZ4_MEM_COMPRESS.
+ *
+ * Reverse the logic, by compressing as much data as possible
+ * from 'source' buffer into already allocated buffer 'dest'
+ * of size 'targetDestSize'.
+ * This function either compresses the entire 'source' content into 'dest'
+ * if it's large enough, or fill 'dest' buffer completely with as much data as
+ * possible from 'source'.
+ *
+ * Return: Number of bytes written into 'dest' (necessarily <= targetDestSize)
+ * or 0 if compression fails
+ */
+int LZ4_compress_destSize(const char *source, char *dest, int *sourceSizePtr,
+ int targetDestSize, void *wrkmem);
/*
- * lz4_decompress_unknownoutputsize()
- * src : source address of the compressed data
- * src_len : is the input size, therefore the compressed size
- * dest : output buffer address of the decompressed data
- * dest_len: is the max size of the destination buffer, which is
- * returned with actual size of decompressed data after
- * decompress done
- * return : Success if return 0
- * Error if return (< 0)
- * note : Destination buffer must be already allocated.
+ * lz4_compress() - For backward compatibility, see LZ4_compress_default
+ * @src: source address of the original data
+ * @src_len: size of the original data
+ * @dst: output buffer address of the compressed data. This requires 'dst'
+ * of size LZ4_COMPRESSBOUND
+ * @dst_len: is the output size, which is returned after compress done
+ * @workmem: address of the working memory.
+ *
+ * Return: Success if return 0, Error if return < 0
+ */
+int lz4_compress(const unsigned char *src, size_t src_len, unsigned char *dst,
+ size_t *dst_len, void *wrkmem);
+
+/*-************************************************************************
+ * Decompression Functions
+ **************************************************************************/
+
+/**
+ * LZ4_decompress_fast() - Decompresses data from 'source' into 'dest'
+ * @source: source address of the compressed data
+ * @dest: output buffer address of the uncompressed data
+ * which must be already allocated with 'originalSize' bytes
+ * @originalSize: is the original and therefore uncompressed size
+ *
+ * Decompresses data from 'source' into 'dest'.
+ * This function fully respect memory boundaries for properly formed
+ * compressed data.
+ * It is a bit faster than LZ4_decompress_safe().
+ * However, it does not provide any protection against intentionally
+ * modified data stream (malicious input).
+ * Use this function in trusted environment only
+ * (data to decode comes from a trusted source).
+ *
+ * Return: number of bytes read from the source buffer
+ * or a negative result if decompression fails.
+ */
+int LZ4_decompress_fast(const char *source, char *dest, int originalSize);
+
+/**
+ * LZ4_decompress_safe() - Decompression protected against buffer overflow
+ * @source: source address of the compressed data
+ * @dest: output buffer address of the uncompressed data
+ * which must be already allocated
+ * @compressedSize: is the precise full size of the compressed block
+ * @maxDecompressedSize: is the size of 'dest' buffer
+ *
+ * Decompresses data fom 'source' into 'dest'.
+ * If the source stream is detected malformed, the function will
+ * stop decoding and return a negative result.
+ * This function is protected against buffer overflow exploits,
+ * including malicious data packets. It never writes outside output buffer,
+ * nor reads outside input buffer.
+ *
+ * Return: number of bytes decompressed into destination buffer
+ * (necessarily <= maxDecompressedSize)
+ * or a negative result in case of error
+ */
+int LZ4_decompress_safe(const char *source, char *dest, int compressedSize,
+ int maxDecompressedSize);
+
+/**
+ * LZ4_decompress_safe_partial() - Decompress a block of size 'compressedSize'
+ * at position 'source' into buffer 'dest'
+ * @source: source address of the compressed data
+ * @dest: output buffer address of the decompressed data which must be
+ * already allocated
+ * @compressedSize: is the precise full size of the compressed block.
+ * @targetOutputSize: the decompression operation will try
+ * to stop as soon as 'targetOutputSize' has been reached
+ * @maxDecompressedSize: is the size of destination buffer
+ *
+ * This function decompresses a compressed block of size 'compressedSize'
+ * at position 'source' into destination buffer 'dest'
+ * of size 'maxDecompressedSize'.
+ * The function tries to stop decompressing operation as soon as
+ * 'targetOutputSize' has been reached, reducing decompression time.
+ * This function never writes outside of output buffer,
+ * and never reads outside of input buffer.
+ * It is therefore protected against malicious data packets.
+ *
+ * Return: the number of bytes decoded in the destination buffer
+ * (necessarily <= maxDecompressedSize)
+ * or a negative result in case of error
+ *
+ */
+int LZ4_decompress_safe_partial(const char *source, char *dest,
+ int compressedSize, int targetOutputSize, int maxDecompressedSize);
+
+/*
+ * lz4_decompress_unknownoutputsize() - For backwards compatibility,
+ * see LZ4_decompress_safe
+ * @src: source address of the compressed data
+ * @src_len: is the input size, therefore the compressed size
+ * @dest: output buffer address of the decompressed data
+ * which must be already allocated
+ * @dest_len: is the max size of the destination buffer, which is
+ * returned with actual size of decompressed data after decompress done
+ *
+ * Return: Success if return 0, Error if return (< 0)
*/
int lz4_decompress_unknownoutputsize(const unsigned char *src, size_t src_len,
- unsigned char *dest, size_t *dest_len);
+ unsigned char *dest, size_t *dest_len);
+
+/**
+ * lz4_decompress() - For backwards cocmpatibility, see LZ4_decompress_fast
+ * @src: source address of the compressed data
+ * @src_len: is the input size, which is returned after decompress done
+ * @dest: output buffer address of the decompressed data,
+ * which must be already allocated
+ * @actual_dest_len: is the size of uncompressed data, supposing it's known
+ *
+ * Return: Success if return 0, Error if return (< 0)
+ */
+int lz4_decompress(const unsigned char *src, size_t *src_len,
+ unsigned char *dest, size_t actual_dest_len);
+
+/*-************************************************************************
+ * LZ4 HC Compression
+ **************************************************************************/
+
+/**
+ * LZ4_compress_HC() - Compress data from `src` into `dst`, using HC algorithm
+ * @src: source address of the original data
+ * @dst: output buffer address of the compressed data
+ * @srcSize: size of the input data. Max supported value is LZ4_MAX_INPUT_SIZE
+ * @dstCapacity: full or partial size of buffer 'dst',
+ * which must be already allocated
+ * @compressionLevel: Recommended values are between 4 and 9, although any
+ * value between 1 and LZ4HC_MAX_CLEVEL will work.
+ * Values >LZ4HC_MAX_CLEVEL behave the same as 16.
+ * @wrkmem: address of the working memory.
+ * This requires 'wrkmem' of size LZ4HC_MEM_COMPRESS.
+ *
+ * Compress data from 'src' into 'dst', using the more powerful
+ * but slower "HC" algorithm. Compression is guaranteed to succeed if
+ * `dstCapacity >= LZ4_compressBound(srcSize)
+ *
+ * Return : the number of bytes written into 'dst' or 0 if compression fails.
+ */
+int LZ4_compress_HC(const char *src, char *dst, int srcSize, int dstCapacity,
+ int compressionLevel, void *wrkmem);
+
+/**
+ * lz4hc_compress() - For backwards compatibility, see LZ4_compress_HC
+ * @src: source address of the original data
+ * @src_len: size of the original data
+ * @dst: output buffer address of the compressed data. This requires 'dst'
+ * of size LZ4_COMPRESSBOUND.
+ * @dst_len: is the output size, which is returned after compress done
+ * @wrkmem: address of the working memory.
+ * This requires 'workmem' of size LZ4HC_MEM_COMPRESS.
+ *
+ * Return : Success if return 0, Error if return (< 0)
+ */
+int lz4hc_compress(const unsigned char *src, size_t src_len, unsigned char *dst,
+ size_t *dst_len, void *wrkmem);
+
+/**
+ * LZ4_resetStreamHC() - Init an allocated 'LZ4_streamHC_t' structure
+ * @streamHCPtr: pointer to the 'LZ4_streamHC_t' structure
+ * @compressionLevel: Recommended values are between 4 and 9, although any
+ * value between 1 and LZ4HC_MAX_CLEVEL will work.
+ * Values >LZ4HC_MAX_CLEVEL behave the same as 16.
+ *
+ * An LZ4_streamHC_t structure can be allocated once
+ * and re-used multiple times.
+ * Use this function to init an allocated `LZ4_streamHC_t` structure
+ * and start a new compression.
+ */
+void LZ4_resetStreamHC(LZ4_streamHC_t *streamHCPtr, int compressionLevel);
+
+/**
+ * LZ4_loadDictHC() - Load a static dictionary into LZ4_streamHC
+ * @streamHCPtr: pointer to the LZ4HC_stream_t
+ * @dictionary: dictionary to load
+ * @dictSize: size of dictionary
+ *
+ * Use this function to load a static dictionary into LZ4HC_stream.
+ * Any previous data will be forgotten, only 'dictionary'
+ * will remain in memory.
+ * Loading a size of 0 is allowed.
+ *
+ * Return : dictionary size, in bytes (necessarily <= 64 KB)
+ */
+int LZ4_loadDictHC(LZ4_streamHC_t *streamHCPtr, const char *dictionary,
+ int dictSize);
+
+/**
+ * LZ4_compress_HC_continue() - Compress 'src' using data from previously
+ * compressed blocks as a dictionary using the HC algorithm
+ * @streamHCPtr: Pointer to the previous 'LZ4_streamHC_t' structure
+ * @src: source address of the original data
+ * @dst: output buffer address of the compressed data,
+ * which must be already allocated
+ * @srcSize: size of the input data. Max supported value is LZ4_MAX_INPUT_SIZE
+ * @maxDstSize: full or partial size of buffer 'dest'
+ * which must be already allocated
+ *
+ * These functions compress data in successive blocks of any size, using
+ * previous blocks as dictionary. One key assumption is that previous
+ * blocks (up to 64 KB) remain read-accessible while
+ * compressing next blocks. There is an exception for ring buffers,
+ * which can be smaller than 64 KB.
+ * Ring buffers scenario is automatically detected and handled by
+ * LZ4_compress_HC_continue().
+ * Before starting compression, state must be properly initialized,
+ * using LZ4_resetStreamHC().
+ * A first "fictional block" can then be designated as
+ * initial dictionary, using LZ4_loadDictHC() (Optional).
+ * Then, use LZ4_compress_HC_continue()
+ * to compress each successive block. Previous memory blocks
+ * (including initial dictionary when present) must remain accessible
+ * and unmodified during compression.
+ * 'dst' buffer should be sized to handle worst case scenarios, using
+ * LZ4_compressBound(), to ensure operation success.
+ * If, for any reason, previous data blocks can't be preserved unmodified
+ * in memory during next compression block,
+ * you must save it to a safer memory space, using LZ4_saveDictHC().
+ * Return value of LZ4_saveDictHC() is the size of dictionary
+ * effectively saved into 'safeBuffer'.
+ *
+ * Return: Number of bytes written into buffer 'dst' or 0 if compression fails
+ */
+int LZ4_compress_HC_continue(LZ4_streamHC_t *streamHCPtr, const char *src,
+ char *dst, int srcSize, int maxDstSize);
+
+/**
+ * LZ4_saveDictHC() - Save static dictionary from LZ4HC_stream
+ * @streamHCPtr: pointer to the 'LZ4HC_stream_t' structure
+ * @safeBuffer: buffer to save dictionary to, must be already allocated
+ * @maxDictSize: size of 'safeBuffer'
+ *
+ * If previously compressed data block is not guaranteed
+ * to remain available at its memory location,
+ * save it into a safer place (char *safeBuffer).
+ * Note : you don't need to call LZ4_loadDictHC() afterwards,
+ * dictionary is immediately usable, you can therefore call
+ * LZ4_compress_HC_continue().
+ *
+ * Return : saved dictionary size in bytes (necessarily <= maxDictSize),
+ * or 0 if error.
+ */
+int LZ4_saveDictHC(LZ4_streamHC_t *streamHCPtr, char *safeBuffer,
+ int maxDictSize);
+
+/*-*********************************************
+ * Streaming Compression Functions
+ ***********************************************/
+
+/**
+ * LZ4_resetStream() - Init an allocated 'LZ4_stream_t' structure
+ * @LZ4_stream: pointer to the 'LZ4_stream_t' structure
+ *
+ * An LZ4_stream_t structure can be allocated once
+ * and re-used multiple times.
+ * Use this function to init an allocated `LZ4_stream_t` structure
+ * and start a new compression.
+ */
+void LZ4_resetStream(LZ4_stream_t *LZ4_stream);
+
+/**
+ * LZ4_loadDict() - Load a static dictionary into LZ4_stream
+ * @streamPtr: pointer to the LZ4_stream_t
+ * @dictionary: dictionary to load
+ * @dictSize: size of dictionary
+ *
+ * Use this function to load a static dictionary into LZ4_stream.
+ * Any previous data will be forgotten, only 'dictionary'
+ * will remain in memory.
+ * Loading a size of 0 is allowed.
+ *
+ * Return : dictionary size, in bytes (necessarily <= 64 KB)
+ */
+int LZ4_loadDict(LZ4_stream_t *streamPtr, const char *dictionary,
+ int dictSize);
+
+/**
+ * LZ4_saveDict() - Save static dictionary from LZ4_stream
+ * @streamPtr: pointer to the 'LZ4_stream_t' structure
+ * @safeBuffer: buffer to save dictionary to, must be already allocated
+ * @dictSize: size of 'safeBuffer'
+ *
+ * If previously compressed data block is not guaranteed
+ * to remain available at its memory location,
+ * save it into a safer place (char *safeBuffer).
+ * Note : you don't need to call LZ4_loadDict() afterwards,
+ * dictionary is immediately usable, you can therefore call
+ * LZ4_compress_fast_continue().
+ *
+ * Return : saved dictionary size in bytes (necessarily <= dictSize),
+ * or 0 if error.
+ */
+int LZ4_saveDict(LZ4_stream_t *streamPtr, char *safeBuffer, int dictSize);
+
+/**
+ * LZ4_compress_fast_continue() - Compress 'src' using data from previously
+ * compressed blocks as a dictionary
+ * @streamPtr: Pointer to the previous 'LZ4_stream_t' structure
+ * @src: source address of the original data
+ * @dst: output buffer address of the compressed data,
+ * which must be already allocated
+ * @srcSize: size of the input data. Max supported value is LZ4_MAX_INPUT_SIZE
+ * @maxDstSize: full or partial size of buffer 'dest'
+ * which must be already allocated
+ * @acceleration: acceleration factor
+ *
+ * Compress buffer content 'src', using data from previously compressed blocks
+ * as dictionary to improve compression ratio.
+ * Important : Previous data blocks are assumed to still
+ * be present and unmodified !
+ * If maxDstSize >= LZ4_compressBound(srcSize),
+ * compression is guaranteed to succeed, and runs faster.
+ *
+ * Return: Number of bytes written into buffer 'dst' or 0 if compression fails
+ */
+int LZ4_compress_fast_continue(LZ4_stream_t *streamPtr, const char *src,
+ char *dst, int srcSize, int maxDstSize, int acceleration);
+
+/**
+ * LZ4_setStreamDecode() - Instruct where to find dictionary
+ * @LZ4_streamDecode: the 'LZ4_streamDecode_t' structure
+ * @dictionary: dictionary to use
+ * @dictSize: size of dictionary
+ *
+ * Use this function to instruct where to find the dictionary.
+ * Setting a size of 0 is allowed (same effect as reset).
+ *
+ * Return: 1 if OK, 0 if error
+ */
+int LZ4_setStreamDecode(LZ4_streamDecode_t *LZ4_streamDecode,
+ const char *dictionary, int dictSize);
+
+/**
+ * LZ4_decompress_fast_continue() - Decompress blocks in streaming mode
+ * @LZ4_streamDecode: the 'LZ4_streamDecode_t' structure
+ * @source: source address of the compressed data
+ * @dest: output buffer address of the uncompressed data
+ * which must be already allocated
+ * @compressedSize: is the precise full size of the compressed block
+ * @maxDecompressedSize: is the size of 'dest' buffer
+ *
+ * These decoding function allows decompression of multiple blocks
+ * in "streaming" mode.
+ * Previously decoded blocks *must* remain available at the memory position
+ * where they were decoded (up to 64 KB)
+ * In the case of a ring buffers, decoding buffer must be either :
+ * - Exactly same size as encoding buffer, with same update rule
+ * (block boundaries at same positions) In which case,
+ * the decoding & encoding ring buffer can have any size,
+ * including very small ones ( < 64 KB).
+ * - Larger than encoding buffer, by a minimum of maxBlockSize more bytes.
+ * maxBlockSize is implementation dependent.
+ * It's the maximum size you intend to compress into a single block.
+ * In which case, encoding and decoding buffers do not need
+ * to be synchronized, and encoding ring buffer can have any size,
+ * including small ones ( < 64 KB).
+ * - _At least_ 64 KB + 8 bytes + maxBlockSize.
+ * In which case, encoding and decoding buffers do not need to be
+ * synchronized, and encoding ring buffer can have any size,
+ * including larger than decoding buffer. W
+ * Whenever these conditions are not possible, save the last 64KB of decoded
+ * data into a safe buffer, and indicate where it is saved
+ * using LZ4_setStreamDecode()
+ *
+ * Return: number of bytes decompressed into destination buffer
+ * (necessarily <= maxDecompressedSize)
+ * or a negative result in case of error
+ */
+int LZ4_decompress_safe_continue(LZ4_streamDecode_t *LZ4_streamDecode,
+ const char *source, char *dest, int compressedSize,
+ int maxDecompressedSize);
+
+/**
+ * LZ4_decompress_fast_continue() - Decompress blocks in streaming mode
+ * @LZ4_streamDecode: the 'LZ4_streamDecode_t' structure
+ * @source: source address of the compressed data
+ * @dest: output buffer address of the uncompressed data
+ * which must be already allocated with 'originalSize' bytes
+ * @originalSize: is the original and therefore uncompressed size
+ *
+ * These decoding function allows decompression of multiple blocks
+ * in "streaming" mode.
+ * Previously decoded blocks *must* remain available at the memory position
+ * where they were decoded (up to 64 KB)
+ * In the case of a ring buffers, decoding buffer must be either :
+ * - Exactly same size as encoding buffer, with same update rule
+ * (block boundaries at same positions) In which case,
+ * the decoding & encoding ring buffer can have any size,
+ * including very small ones ( < 64 KB).
+ * - Larger than encoding buffer, by a minimum of maxBlockSize more bytes.
+ * maxBlockSize is implementation dependent.
+ * It's the maximum size you intend to compress into a single block.
+ * In which case, encoding and decoding buffers do not need
+ * to be synchronized, and encoding ring buffer can have any size,
+ * including small ones ( < 64 KB).
+ * - _At least_ 64 KB + 8 bytes + maxBlockSize.
+ * In which case, encoding and decoding buffers do not need to be
+ * synchronized, and encoding ring buffer can have any size,
+ * including larger than decoding buffer. W
+ * Whenever these conditions are not possible, save the last 64KB of decoded
+ * data into a safe buffer, and indicate where it is saved
+ * using LZ4_setStreamDecode()
+ *
+ * Return: number of bytes decompressed into destination buffer
+ * (necessarily <= maxDecompressedSize)
+ * or a negative result in case of error
+ */
+int LZ4_decompress_fast_continue(LZ4_streamDecode_t *LZ4_streamDecode,
+ const char *source, char *dest, int originalSize);
+
+/**
+ * LZ4_decompress_safe_usingDict() - Same as LZ4_setStreamDecode()
+ * followed by LZ4_decompress_safe_continue()
+ * @source: source address of the compressed data
+ * @dest: output buffer address of the uncompressed data
+ * which must be already allocated
+ * @compressedSize: is the precise full size of the compressed block
+ * @maxDecompressedSize: is the size of 'dest' buffer
+ * @dictStart: pointer to the start of the dictionary in memory
+ * @dictSize: size of dictionary
+ *
+ * These decoding function works the same as
+ * a combination of LZ4_setStreamDecode() followed by
+ * LZ4_decompress_safe_continue()
+ * It is stand-alone, and don'tn eed a LZ4_streamDecode_t structure.
+ *
+ * Return: number of bytes decompressed into destination buffer
+ * (necessarily <= maxDecompressedSize)
+ * or a negative result in case of error
+ */
+int LZ4_decompress_safe_usingDict(const char *source, char *dest,
+ int compressedSize, int maxDecompressedSize, const char *dictStart,
+ int dictSize);
+
+/**
+ * LZ4_decompress_fast_usingDict() - Same as LZ4_setStreamDecode()
+ * followed by LZ4_decompress_fast_continue()
+ * @source: source address of the compressed data
+ * @dest: output buffer address of the uncompressed data
+ * which must be already allocated with 'originalSize' bytes
+ * @originalSize: is the original and therefore uncompressed size
+ * @dictStart: pointer to the start of the dictionary in memory
+ * @dictSize: size of dictionary
+ *
+ * These decoding function works the same as
+ * a combination of LZ4_setStreamDecode() followed by
+ * LZ4_decompress_safe_continue()
+ * It is stand-alone, and don'tn eed a LZ4_streamDecode_t structure.
+ *
+ * Return: number of bytes decompressed into destination buffer
+ * (necessarily <= maxDecompressedSize)
+ * or a negative result in case of error
+ */
+int LZ4_decompress_fast_usingDict(const char *source, char *dest,
+ int originalSize, const char *dictStart, int dictSize);
+
#endif
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index ab93174..d4b5635 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -424,12 +424,20 @@
}
#endif
+extern unsigned long memblock_reserved_memory_within(phys_addr_t start_addr,
+ phys_addr_t end_addr);
#else
static inline phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align)
{
return 0;
}
+static inline unsigned long memblock_reserved_memory_within(phys_addr_t start_addr,
+ phys_addr_t end_addr)
+{
+ return 0;
+}
+
#endif /* CONFIG_HAVE_MEMBLOCK */
#endif /* __KERNEL__ */
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 734169f..eebc2c3 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -515,6 +515,10 @@
*/
bool tlb_flush_pending;
#endif
+#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
+ /* See flush_tlb_batched_pending() */
+ bool tlb_flush_batched;
+#endif
struct uprobes_state uprobes_state;
#ifdef CONFIG_X86_INTEL_MPX
/* address of the bounds directory */
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index ad7f915..9d1161a 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -712,6 +712,7 @@
* is the first PFN that needs to be initialised.
*/
unsigned long first_deferred_pfn;
+ unsigned long static_init_size;
#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
} pg_data_t;
diff --git a/include/linux/mount.h b/include/linux/mount.h
index 0e9b097..b606d8f 100644
--- a/include/linux/mount.h
+++ b/include/linux/mount.h
@@ -96,4 +96,6 @@
extern dev_t name_to_dev_t(const char *name);
+extern unsigned int sysctl_mount_max;
+
#endif /* _LINUX_MOUNT_H */
diff --git a/include/linux/mtd/map.h b/include/linux/mtd/map.h
index 366cf77..806d0ab 100644
--- a/include/linux/mtd/map.h
+++ b/include/linux/mtd/map.h
@@ -122,18 +122,13 @@
#endif
#ifdef CONFIG_MTD_MAP_BANK_WIDTH_32
-# ifdef map_bankwidth
-# undef map_bankwidth
-# define map_bankwidth(map) ((map)->bankwidth)
-# undef map_bankwidth_is_large
-# define map_bankwidth_is_large(map) (map_bankwidth(map) > BITS_PER_LONG/8)
-# undef map_words
-# define map_words(map) map_calc_words(map)
-# else
-# define map_bankwidth(map) 32
-# define map_bankwidth_is_large(map) (1)
-# define map_words(map) map_calc_words(map)
-# endif
+/* always use indirect access for 256-bit to preserve kernel stack */
+# undef map_bankwidth
+# define map_bankwidth(map) ((map)->bankwidth)
+# undef map_bankwidth_is_large
+# define map_bankwidth_is_large(map) (map_bankwidth(map) > BITS_PER_LONG/8)
+# undef map_words
+# define map_words(map) map_calc_words(map)
#define map_bankwidth_is_32(map) (map_bankwidth(map) == 32)
#undef MAX_MAP_BANKWIDTH
#define MAX_MAP_BANKWIDTH 32
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 37f05cb1..1af6161 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -573,6 +573,7 @@
#define PCI_DEVICE_ID_AMD_CS5536_EHC 0x2095
#define PCI_DEVICE_ID_AMD_CS5536_UDC 0x2096
#define PCI_DEVICE_ID_AMD_CS5536_UOC 0x2097
+#define PCI_DEVICE_ID_AMD_CS5536_DEV_IDE 0x2092
#define PCI_DEVICE_ID_AMD_CS5536_IDE 0x209A
#define PCI_DEVICE_ID_AMD_LX_VIDEO 0x2081
#define PCI_DEVICE_ID_AMD_LX_AES 0x2082
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 05fde31..b64825d6 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -785,6 +785,10 @@
int genphy_suspend(struct phy_device *phydev);
int genphy_resume(struct phy_device *phydev);
int genphy_soft_reset(struct phy_device *phydev);
+static inline int genphy_no_soft_reset(struct phy_device *phydev)
+{
+ return 0;
+}
void phy_driver_unregister(struct phy_driver *drv);
void phy_drivers_unregister(struct phy_driver *drv, int n);
int phy_driver_register(struct phy_driver *new_driver);
diff --git a/include/linux/pstore_ram.h b/include/linux/pstore_ram.h
index e81198b..e9e6beb 100644
--- a/include/linux/pstore_ram.h
+++ b/include/linux/pstore_ram.h
@@ -24,6 +24,13 @@
#include <linux/list.h>
#include <linux/types.h>
+/*
+ * Choose whether access to the RAM zone requires locking or not. If a zone
+ * can be written to from different CPUs like with ftrace for example, then
+ * PRZ_FLAG_NO_LOCK is used. For all other cases, locking is required.
+ */
+#define PRZ_FLAG_NO_LOCK BIT(0)
+
struct persistent_ram_buffer;
struct rs_control;
@@ -48,6 +55,8 @@
/* Common buffer size */
size_t buffer_size;
+ u32 flags;
+ raw_spinlock_t buffer_lock;
/* ECC correction */
char *par_buffer;
@@ -65,7 +74,7 @@
struct persistent_ram_zone *persistent_ram_new(phys_addr_t start,
phys_addr_t alt_start, size_t size,
u32 sig, struct persistent_ram_ecc_info *ecc_info,
- unsigned int memtype);
+ unsigned int memtype, u32 flags);
void persistent_ram_free(struct persistent_ram_zone *prz);
void persistent_ram_zap(struct persistent_ram_zone *prz, bool use_alt);
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
index e13bfdf..81fdf4b 100644
--- a/include/linux/ptrace.h
+++ b/include/linux/ptrace.h
@@ -50,7 +50,8 @@
unsigned long addr, unsigned long data);
extern void ptrace_notify(int exit_code);
extern void __ptrace_link(struct task_struct *child,
- struct task_struct *new_parent);
+ struct task_struct *new_parent,
+ const struct cred *ptracer_cred);
extern void __ptrace_unlink(struct task_struct *child);
extern void exit_ptrace(struct task_struct *tracer, struct list_head *dead);
#define PTRACE_MODE_READ 0x01
@@ -202,7 +203,7 @@
if (unlikely(ptrace) && current->ptrace) {
child->ptrace = current->ptrace;
- __ptrace_link(child, current->parent);
+ __ptrace_link(child, current->parent, current->ptracer_cred);
if (child->ptrace & PT_SEIZED)
task_set_jobctl_pending(child, JOBCTL_TRAP_STOP);
@@ -211,6 +212,8 @@
set_tsk_thread_flag(child, TIF_SIGPENDING);
}
+ else
+ child->ptracer_cred = NULL;
}
/**
diff --git a/include/linux/restart_block.h b/include/linux/restart_block.h
new file mode 100644
index 0000000..0d905d8
--- /dev/null
+++ b/include/linux/restart_block.h
@@ -0,0 +1,51 @@
+/*
+ * Common syscall restarting data
+ */
+#ifndef __LINUX_RESTART_BLOCK_H
+#define __LINUX_RESTART_BLOCK_H
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+
+struct timespec;
+struct compat_timespec;
+struct pollfd;
+
+/*
+ * System call restart block.
+ */
+struct restart_block {
+ long (*fn)(struct restart_block *);
+ union {
+ /* For futex_wait and futex_wait_requeue_pi */
+ struct {
+ u32 __user *uaddr;
+ u32 val;
+ u32 flags;
+ u32 bitset;
+ u64 time;
+ u32 __user *uaddr2;
+ } futex;
+ /* For nanosleep */
+ struct {
+ clockid_t clockid;
+ struct timespec __user *rmtp;
+#ifdef CONFIG_COMPAT
+ struct compat_timespec __user *compat_rmtp;
+#endif
+ u64 expires;
+ } nanosleep;
+ /* For poll */
+ struct {
+ struct pollfd __user *ufds;
+ int nfds;
+ int has_timeout;
+ unsigned long tv_sec;
+ unsigned long tv_nsec;
+ } poll;
+ };
+};
+
+extern long do_no_restart_syscall(struct restart_block *parm);
+
+#endif /* __LINUX_RESTART_BLOCK_H */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 6d313be..f02e7bb 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -170,6 +170,7 @@
DECLARE_PER_CPU(unsigned long, process_counts);
extern int nr_processes(void);
extern unsigned long nr_running(void);
+extern bool cpu_has_rt_task(int cpu);
extern bool single_task_running(void);
extern unsigned long nr_iowait(void);
extern unsigned long nr_iowait_cpu(int cpu);
@@ -831,6 +832,16 @@
#define SIGNAL_UNKILLABLE 0x00000040 /* for init: ignore fatal signals */
+#define SIGNAL_STOP_MASK (SIGNAL_CLD_MASK | SIGNAL_STOP_STOPPED | \
+ SIGNAL_STOP_CONTINUED)
+
+static inline void signal_set_stop_flags(struct signal_struct *sig,
+ unsigned int flags)
+{
+ WARN_ON(sig->flags & (SIGNAL_GROUP_EXIT|SIGNAL_GROUP_COREDUMP));
+ sig->flags = (sig->flags & ~SIGNAL_STOP_MASK) | flags;
+}
+
/* If true, all threads except ->group_exit_task have pending SIGKILL */
static inline int signal_group_exit(const struct signal_struct *sig)
{
@@ -1442,6 +1453,10 @@
unsigned short on_rq;
unsigned short on_list;
+ /* Accesses for these must be guarded by rq->lock of the task's rq */
+ bool schedtune_enqueued;
+ struct hrtimer schedtune_timer;
+
struct sched_rt_entity *back;
#ifdef CONFIG_RT_GROUP_SCHED
struct sched_rt_entity *parent;
@@ -1539,6 +1554,13 @@
};
struct task_struct {
+#ifdef CONFIG_THREAD_INFO_IN_TASK
+ /*
+ * For reasons of header soup (see current_thread_info()), this
+ * must be the first element of task_struct.
+ */
+ struct thread_info thread_info;
+#endif
volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
void *stack;
atomic_t usage;
@@ -1548,6 +1570,9 @@
#ifdef CONFIG_SMP
struct llist_node wake_entry;
int on_cpu;
+#ifdef CONFIG_THREAD_INFO_IN_TASK
+ unsigned int cpu; /* current CPU */
+#endif
unsigned int wakee_flips;
unsigned long wakee_flip_decay_ts;
struct task_struct *last_wakee;
@@ -1645,6 +1670,10 @@
#ifdef CONFIG_COMPAT_BRK
unsigned brk_randomized:1;
#endif
+#ifdef CONFIG_CGROUPS
+ /* disallow userland-initiated cgroup migration */
+ unsigned no_cgroup_migration:1;
+#endif
unsigned long atomic_flags; /* Flags needing atomic access. */
@@ -1690,6 +1719,8 @@
cputime_t utime, stime, utimescaled, stimescaled;
cputime_t gtime;
+ atomic64_t *time_in_state;
+ unsigned int max_state;
struct prev_cputime prev_cputime;
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
seqlock_t vtime_seqlock;
@@ -2121,24 +2152,8 @@
return tsk->tgid;
}
+
static inline int pid_alive(const struct task_struct *p);
-static inline pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);
-static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns)
-{
- pid_t pid = 0;
-
- rcu_read_lock();
- if (pid_alive(tsk))
- pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns);
- rcu_read_unlock();
-
- return pid;
-}
-
-static inline pid_t task_ppid_nr(const struct task_struct *tsk)
-{
- return task_ppid_nr_ns(tsk, &init_pid_ns);
-}
static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk,
struct pid_namespace *ns)
@@ -2173,6 +2188,23 @@
return __task_pid_nr_ns(tsk, __PIDTYPE_TGID, NULL);
}
+static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns)
+{
+ pid_t pid = 0;
+
+ rcu_read_lock();
+ if (pid_alive(tsk))
+ pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns);
+ rcu_read_unlock();
+
+ return pid;
+}
+
+static inline pid_t task_ppid_nr(const struct task_struct *tsk)
+{
+ return task_ppid_nr_ns(tsk, &init_pid_ns);
+}
+
/* obsolete, do not use */
static inline pid_t task_pgrp_nr(struct task_struct *tsk)
{
@@ -2596,7 +2628,9 @@
void yield(void);
union thread_union {
+#ifndef CONFIG_THREAD_INFO_IN_TASK
struct thread_info thread_info;
+#endif
unsigned long stack[THREAD_SIZE/sizeof(long)];
};
@@ -2986,10 +3020,34 @@
cgroup_threadgroup_change_end(tsk);
}
-#ifndef __HAVE_THREAD_FUNCTIONS
+#ifdef CONFIG_THREAD_INFO_IN_TASK
+
+static inline struct thread_info *task_thread_info(struct task_struct *task)
+{
+ return &task->thread_info;
+}
+
+/*
+ * When accessing the stack of a non-current task that might exit, use
+ * try_get_task_stack() instead. task_stack_page will return a pointer
+ * that could get freed out from under you.
+ */
+static inline void *task_stack_page(const struct task_struct *task)
+{
+ return task->stack;
+}
+
+#define setup_thread_stack(new,old) do { } while(0)
+
+static inline unsigned long *end_of_stack(const struct task_struct *task)
+{
+ return task->stack;
+}
+
+#elif !defined(__HAVE_THREAD_FUNCTIONS)
#define task_thread_info(task) ((struct thread_info *)(task)->stack)
-#define task_stack_page(task) ((task)->stack)
+#define task_stack_page(task) ((void *)(task)->stack)
static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org)
{
@@ -3016,6 +3074,14 @@
}
#endif
+
+static inline void *try_get_task_stack(struct task_struct *tsk)
+{
+ return task_stack_page(tsk);
+}
+
+static inline void put_task_stack(struct task_struct *tsk) {}
+
#define task_stack_end_corrupted(task) \
(*(end_of_stack(task)) != STACK_END_MAGIC)
@@ -3026,7 +3092,7 @@
return (obj >= stack) && (obj < (stack + THREAD_SIZE));
}
-extern void thread_info_cache_init(void);
+extern void thread_stack_cache_init(void);
#ifdef CONFIG_DEBUG_STACK_USAGE
static inline unsigned long stack_not_used(struct task_struct *p)
@@ -3290,7 +3356,11 @@
static inline unsigned int task_cpu(const struct task_struct *p)
{
+#ifdef CONFIG_THREAD_INFO_IN_TASK
+ return p->cpu;
+#else
return task_thread_info(p)->cpu;
+#endif
}
static inline int task_node(const struct task_struct *p)
diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
index dde00de..f3d45dd 100644
--- a/include/linux/seq_file.h
+++ b/include/linux/seq_file.h
@@ -7,13 +7,10 @@
#include <linux/mutex.h>
#include <linux/cpumask.h>
#include <linux/nodemask.h>
+#include <linux/fs.h>
+#include <linux/cred.h>
struct seq_operations;
-struct file;
-struct path;
-struct inode;
-struct dentry;
-struct user_namespace;
struct seq_file {
char *buf;
@@ -27,9 +24,7 @@
struct mutex lock;
const struct seq_operations *op;
int poll_event;
-#ifdef CONFIG_USER_NS
- struct user_namespace *user_ns;
-#endif
+ const struct file *file;
void *private;
};
@@ -147,7 +142,7 @@
static inline struct user_namespace *seq_user_ns(struct seq_file *seq)
{
#ifdef CONFIG_USER_NS
- return seq->user_ns;
+ return seq->file->f_cred->user_ns;
#else
extern struct user_namespace init_user_ns;
return &init_user_ns;
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index d443d9a..3f61c64 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -1084,9 +1084,6 @@
static inline void skb_sender_cpu_clear(struct sk_buff *skb)
{
-#ifdef CONFIG_XPS
- skb->sender_cpu = 0;
-#endif
}
#ifdef NET_SKBUFF_DATA_USES_OFFSET
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 13077c1..16dc1e4 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -221,7 +221,7 @@
* (PAGE_SIZE*2). Larger requests are passed to the page allocator.
*/
#define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1)
-#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT)
+#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT - 1)
#ifndef KMALLOC_SHIFT_LOW
#define KMALLOC_SHIFT_LOW 3
#endif
@@ -234,7 +234,7 @@
* be allocated from the same page.
*/
#define KMALLOC_SHIFT_HIGH PAGE_SHIFT
-#define KMALLOC_SHIFT_MAX 30
+#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT - 1)
#ifndef KMALLOC_SHIFT_LOW
#define KMALLOC_SHIFT_LOW 3
#endif
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
index ef17db6c..fa7bc29 100644
--- a/include/linux/sysctl.h
+++ b/include/linux/sysctl.h
@@ -41,8 +41,6 @@
void __user *, size_t *, loff_t *);
extern int proc_dointvec(struct ctl_table *, int,
void __user *, size_t *, loff_t *);
-extern int proc_douintvec(struct ctl_table *, int,
- void __user *, size_t *, loff_t *);
extern int proc_dointvec_minmax(struct ctl_table *, int,
void __user *, size_t *, loff_t *);
extern int proc_dointvec_jiffies(struct ctl_table *, int,
diff --git a/include/linux/syslog.h b/include/linux/syslog.h
index c3a7f0c..e1c3632 100644
--- a/include/linux/syslog.h
+++ b/include/linux/syslog.h
@@ -49,13 +49,4 @@
int do_syslog(int type, char __user *buf, int count, int source);
-#ifdef CONFIG_PRINTK
-int check_syslog_permissions(int type, int source);
-#else
-static inline int check_syslog_permissions(int type, int source)
-{
- return 0;
-}
-#endif
-
#endif /* _LINUX_SYSLOG_H */
diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
index 4cf8951..8933ecc 100644
--- a/include/linux/thread_info.h
+++ b/include/linux/thread_info.h
@@ -9,46 +9,17 @@
#include <linux/types.h>
#include <linux/bug.h>
+#include <linux/restart_block.h>
-struct timespec;
-struct compat_timespec;
-
+#ifdef CONFIG_THREAD_INFO_IN_TASK
/*
- * System call restart block.
+ * For CONFIG_THREAD_INFO_IN_TASK kernels we need <asm/current.h> for the
+ * definition of current, but for !CONFIG_THREAD_INFO_IN_TASK kernels,
+ * including <asm/current.h> can cause a circular dependency on some platforms.
*/
-struct restart_block {
- long (*fn)(struct restart_block *);
- union {
- /* For futex_wait and futex_wait_requeue_pi */
- struct {
- u32 __user *uaddr;
- u32 val;
- u32 flags;
- u32 bitset;
- u64 time;
- u32 __user *uaddr2;
- } futex;
- /* For nanosleep */
- struct {
- clockid_t clockid;
- struct timespec __user *rmtp;
-#ifdef CONFIG_COMPAT
- struct compat_timespec __user *compat_rmtp;
+#include <asm/current.h>
+#define current_thread_info() ((struct thread_info *)current)
#endif
- u64 expires;
- } nanosleep;
- /* For poll */
- struct {
- struct pollfd __user *ufds;
- int nfds;
- int has_timeout;
- unsigned long tv_sec;
- unsigned long tv_nsec;
- } poll;
- };
-};
-
-extern long do_no_restart_syscall(struct restart_block *parm);
#include <linux/bitops.h>
#include <asm/thread_info.h>
diff --git a/include/linux/timekeeper_internal.h b/include/linux/timekeeper_internal.h
index 2524722..f0f1793 100644
--- a/include/linux/timekeeper_internal.h
+++ b/include/linux/timekeeper_internal.h
@@ -29,7 +29,6 @@
*/
struct tk_read_base {
struct clocksource *clock;
- cycle_t (*read)(struct clocksource *cs);
cycle_t mask;
cycle_t cycle_last;
u32 mult;
diff --git a/include/linux/usb/chipidea.h b/include/linux/usb/chipidea.h
index 5dd75fa..f9be467 100644
--- a/include/linux/usb/chipidea.h
+++ b/include/linux/usb/chipidea.h
@@ -14,6 +14,7 @@
* struct ci_hdrc_cable - structure for external connector cable state tracking
* @state: current state of the line
* @changed: set to true when extcon event happen
+ * @enabled: set to true if we've enabled the vbus or id interrupt
* @edev: device which generate events
* @ci: driver state of the chipidea device
* @nb: hold event notification callback
@@ -22,6 +23,7 @@
struct ci_hdrc_cable {
bool state;
bool changed;
+ bool enabled;
struct extcon_dev *edev;
struct ci_hdrc *ci;
struct notifier_block nb;
diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
index e888eb9..dff7adb 100644
--- a/include/linux/usb/hcd.h
+++ b/include/linux/usb/hcd.h
@@ -579,9 +579,9 @@
((USB_DIR_IN|USB_TYPE_STANDARD|USB_RECIP_INTERFACE)<<8)
#define EndpointRequest \
- ((USB_DIR_IN|USB_TYPE_STANDARD|USB_RECIP_INTERFACE)<<8)
+ ((USB_DIR_IN|USB_TYPE_STANDARD|USB_RECIP_ENDPOINT)<<8)
#define EndpointOutRequest \
- ((USB_DIR_OUT|USB_TYPE_STANDARD|USB_RECIP_INTERFACE)<<8)
+ ((USB_DIR_OUT|USB_TYPE_STANDARD|USB_RECIP_ENDPOINT)<<8)
/* class requests from the USB 2.0 hub spec, table 11-15 */
/* GetBusState and SetHubDescriptor are optional, omitted */
diff --git a/include/linux/usb/quirks.h b/include/linux/usb/quirks.h
index 1d0043d..de2a722 100644
--- a/include/linux/usb/quirks.h
+++ b/include/linux/usb/quirks.h
@@ -50,4 +50,10 @@
/* device can't handle Link Power Management */
#define USB_QUIRK_NO_LPM BIT(10)
+/*
+ * Device reports its bInterval as linear frames instead of the
+ * USB 2.0 calculation.
+ */
+#define USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL BIT(11)
+
#endif /* __LINUX_USB_QUIRKS_H */
diff --git a/include/linux/vfio.h b/include/linux/vfio.h
index ddb4409..34851bf 100644
--- a/include/linux/vfio.h
+++ b/include/linux/vfio.h
@@ -85,6 +85,8 @@
*/
extern struct vfio_group *vfio_group_get_external_user(struct file *filep);
extern void vfio_group_put_external_user(struct vfio_group *group);
+extern bool vfio_external_group_match_file(struct vfio_group *group,
+ struct file *filep);
extern int vfio_external_user_iommu_id(struct vfio_group *group);
extern long vfio_external_check_extension(struct vfio_group *group,
unsigned long arg);
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 0e32bc7..4e4aee6 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -311,6 +311,7 @@
__WQ_DRAINING = 1 << 16, /* internal: workqueue is draining */
__WQ_ORDERED = 1 << 17, /* internal: workqueue is ordered */
+ __WQ_ORDERED_EXPLICIT = 1 << 19, /* internal: alloc_ordered_workqueue() */
WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */
WQ_MAX_UNBOUND_PER_CPU = 4, /* 4 * #cpus for unbound wq */
@@ -408,7 +409,8 @@
* Pointer to the allocated workqueue on success, %NULL on failure.
*/
#define alloc_ordered_workqueue(fmt, flags, args...) \
- alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | (flags), 1, ##args)
+ alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | \
+ __WQ_ORDERED_EXPLICIT | (flags), 1, ##args)
#define create_workqueue(name) \
alloc_workqueue("%s", WQ_MEM_RECLAIM, 1, (name))
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index d540657..9cba490 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -20,6 +20,8 @@
#define ADDRCONF_TIMER_FUZZ (HZ / 4)
#define ADDRCONF_TIMER_FUZZ_MAX (HZ)
+#define ADDRCONF_NOTIFY_PRIORITY 0
+
#include <linux/in.h>
#include <linux/in6.h>
diff --git a/include/net/dst.h b/include/net/dst.h
index c7329dc..e4f4506 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -110,10 +110,16 @@
};
};
+struct dst_metrics {
+ u32 metrics[RTAX_MAX];
+ atomic_t refcnt;
+};
+extern const struct dst_metrics dst_default_metrics;
+
u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old);
-extern const u32 dst_default_metrics[];
#define DST_METRICS_READ_ONLY 0x1UL
+#define DST_METRICS_REFCOUNTED 0x2UL
#define DST_METRICS_FLAGS 0x3UL
#define __DST_METRICS_PTR(Y) \
((u32 *)((Y) & ~DST_METRICS_FLAGS))
diff --git a/include/net/ip.h b/include/net/ip.h
index 8a83762..c10f738 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -317,7 +317,7 @@
!forwarding)
return dst_mtu(dst);
- return min(dst->dev->mtu, IP_MAX_MTU);
+ return min(READ_ONCE(dst->dev->mtu), IP_MAX_MTU);
}
static inline unsigned int ip_skb_dst_mtu(const struct sk_buff *skb)
@@ -330,7 +330,7 @@
return ip_dst_mtu_maybe_forward(skb_dst(skb), forwarding);
}
- return min(skb_dst(skb)->dev->mtu, IP_MAX_MTU);
+ return min(READ_ONCE(skb_dst(skb)->dev->mtu), IP_MAX_MTU);
}
u32 ip_idents_reserve(u32 hash, int segs);
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index af0e8c0..f9bdfb0 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -21,6 +21,7 @@
#include <net/flow.h>
#include <net/ip6_fib.h>
#include <net/sock.h>
+#include <net/lwtunnel.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/route.h>
@@ -77,6 +78,7 @@
struct dst_entry *ip6_route_lookup(struct net *net, struct flowi6 *fl6,
int flags);
+void ip6_route_init_special_entries(void);
int ip6_route_init(void);
void ip6_route_cleanup(void);
@@ -208,4 +210,11 @@
return daddr;
}
+static inline bool rt6_duplicate_nexthop(struct rt6_info *a, struct rt6_info *b)
+{
+ return a->dst.dev == b->dst.dev &&
+ a->rt6i_idev == b->rt6i_idev &&
+ ipv6_addr_equal(&a->rt6i_gateway, &b->rt6i_gateway) &&
+ !lwtunnel_cmp_encap(a->dst.lwtstate, b->dst.lwtstate);
+}
#endif
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index 3f98233..bda1721 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -112,11 +112,11 @@
unsigned char fib_type;
__be32 fib_prefsrc;
u32 fib_priority;
- u32 *fib_metrics;
-#define fib_mtu fib_metrics[RTAX_MTU-1]
-#define fib_window fib_metrics[RTAX_WINDOW-1]
-#define fib_rtt fib_metrics[RTAX_RTT-1]
-#define fib_advmss fib_metrics[RTAX_ADVMSS-1]
+ struct dst_metrics *fib_metrics;
+#define fib_mtu fib_metrics->metrics[RTAX_MTU-1]
+#define fib_window fib_metrics->metrics[RTAX_WINDOW-1]
+#define fib_rtt fib_metrics->metrics[RTAX_RTT-1]
+#define fib_advmss fib_metrics->metrics[RTAX_ADVMSS-1]
int fib_nhs;
#ifdef CONFIG_IP_ROUTE_MULTIPATH
int fib_weight;
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index 9a5c9f0..7a8066b 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -744,6 +744,11 @@
{
u32 hash;
+ /* @flowlabel may include more than a flow label, eg, the traffic class.
+ * Here we want only the flow label value.
+ */
+ flowlabel &= IPV6_FLOWLABEL_MASK;
+
if (flowlabel ||
net->ipv6.sysctl.auto_flowlabels == IP6_AUTO_FLOW_LABEL_OFF ||
(!autolabel &&
@@ -958,6 +963,7 @@
*/
extern const struct proto_ops inet6_stream_ops;
extern const struct proto_ops inet6_dgram_ops;
+extern const struct proto_ops inet6_sockraw_ops;
struct group_source_req;
struct group_filter;
diff --git a/include/net/iw_handler.h b/include/net/iw_handler.h
index e0f4109..c2aa73e 100644
--- a/include/net/iw_handler.h
+++ b/include/net/iw_handler.h
@@ -556,7 +556,8 @@
memcpy(stream + lcp_len,
((char *) &iwe->u) + IW_EV_POINT_OFF,
IW_EV_POINT_PK_LEN - IW_EV_LCP_PK_LEN);
- memcpy(stream + point_len, extra, iwe->u.data.length);
+ if (iwe->u.data.length && extra)
+ memcpy(stream + point_len, extra, iwe->u.data.length);
stream += event_len;
}
return stream;
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index 82e04cb..97d6240 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -1673,6 +1673,9 @@
* @supp_rates: Bitmap of supported rates (per band)
* @ht_cap: HT capabilities of this STA; restricted to our own capabilities
* @vht_cap: VHT capabilities of this STA; restricted to our own capabilities
+ * @max_rx_aggregation_subframes: maximal amount of frames in a single AMPDU
+ * that this station is allowed to transmit to us.
+ * Can be modified by driver.
* @wme: indicates whether the STA supports QoS/WME (if local devices does,
* otherwise always false)
* @drv_priv: data area for driver use, will always be aligned to
@@ -1699,6 +1702,7 @@
u16 aid;
struct ieee80211_sta_ht_cap ht_cap;
struct ieee80211_sta_vht_cap vht_cap;
+ u8 max_rx_aggregation_subframes;
bool wme;
u8 uapsd_queues;
u8 max_sp;
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index e5bba89..7a5d6a0 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -717,8 +717,11 @@
old = *pold;
*pold = new;
if (old != NULL) {
- qdisc_tree_reduce_backlog(old, old->q.qlen, old->qstats.backlog);
+ unsigned int qlen = old->q.qlen;
+ unsigned int backlog = old->qstats.backlog;
+
qdisc_reset(old);
+ qdisc_tree_reduce_backlog(old, qlen, backlog);
}
sch_tree_unlock(sch);
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index ce13cf2..d33b17b 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -444,6 +444,8 @@
#define _sctp_walk_params(pos, chunk, end, member)\
for (pos.v = chunk->member;\
+ (pos.v + offsetof(struct sctp_paramhdr, length) + sizeof(pos.p->length) <=\
+ (void *)chunk + end) &&\
pos.v <= (void *)chunk + end - ntohs(pos.p->length) &&\
ntohs(pos.p->length) >= sizeof(sctp_paramhdr_t);\
pos.v += WORD_ROUND(ntohs(pos.p->length)))
@@ -454,6 +456,8 @@
#define _sctp_walk_errors(err, chunk_hdr, end)\
for (err = (sctp_errhdr_t *)((void *)chunk_hdr + \
sizeof(sctp_chunkhdr_t));\
+ ((void *)err + offsetof(sctp_errhdr_t, length) + sizeof(err->length) <=\
+ (void *)chunk_hdr + end) &&\
(void *)err <= (void *)chunk_hdr + end - ntohs(err->length) &&\
ntohs(err->length) >= sizeof(sctp_errhdr_t); \
err = (sctp_errhdr_t *)((void *)err + WORD_ROUND(ntohs(err->length))))
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index d18cbaf..c81d806 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -947,10 +947,6 @@
struct flow_cache_object flo;
struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
int num_pols, num_xfrms;
-#ifdef CONFIG_XFRM_SUB_POLICY
- struct flowi *origin;
- struct xfrm_selector *partner;
-#endif
u32 xfrm_genid;
u32 policy_genid;
u32 route_mtu_cached;
@@ -966,12 +962,6 @@
dst_release(xdst->route);
if (likely(xdst->u.dst.xfrm))
xfrm_state_put(xdst->u.dst.xfrm);
-#ifdef CONFIG_XFRM_SUB_POLICY
- kfree(xdst->origin);
- xdst->origin = NULL;
- kfree(xdst->partner);
- xdst->partner = NULL;
-#endif
}
#endif
diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h
index 4d1c46a..c7b1dc7 100644
--- a/include/scsi/libiscsi.h
+++ b/include/scsi/libiscsi.h
@@ -196,6 +196,7 @@
struct iscsi_task *task; /* xmit task in progress */
/* xmit */
+ spinlock_t taskqueuelock; /* protects the next three lists */
struct list_head mgmtqueue; /* mgmt (control) xmit queue */
struct list_head cmdqueue; /* data-path cmd queue */
struct list_head requeue; /* tasks needing another run */
diff --git a/include/target/iscsi/iscsi_target_core.h b/include/target/iscsi/iscsi_target_core.h
index 373d334..22f442a 100644
--- a/include/target/iscsi/iscsi_target_core.h
+++ b/include/target/iscsi/iscsi_target_core.h
@@ -64,6 +64,14 @@
#define TA_DEFAULT_FABRIC_PROT_TYPE 0
/* TPG status needs to be enabled to return sendtargets discovery endpoint info */
#define TA_DEFAULT_TPG_ENABLED_SENDTARGETS 1
+/*
+ * Used to control the sending of keys with optional to respond state bit,
+ * as a workaround for non RFC compliant initiators,that do not propose,
+ * nor respond to specific keys required for login to complete.
+ *
+ * See iscsi_check_proposer_for_optional_reply() for more details.
+ */
+#define TA_DEFAULT_LOGIN_KEYS_WORKAROUND 1
#define ISCSI_IOV_DATA_BUFFER 5
@@ -554,6 +562,7 @@
#define LOGIN_FLAGS_READ_ACTIVE 1
#define LOGIN_FLAGS_CLOSED 2
#define LOGIN_FLAGS_READY 4
+#define LOGIN_FLAGS_INITIAL_PDU 8
unsigned long login_flags;
struct delayed_work login_work;
struct delayed_work login_cleanup_work;
@@ -765,6 +774,7 @@
u8 t10_pi;
u32 fabric_prot_type;
u32 tpg_enabled_sendtargets;
+ u32 login_keys_workaround;
struct iscsi_portal_group *tpg;
};
@@ -774,6 +784,7 @@
int np_sock_type;
enum np_thread_state_table np_thread_state;
bool enabled;
+ atomic_t np_reset_count;
enum iscsi_timer_flags_table np_login_timer_flags;
u32 np_exports;
enum np_flags_table np_flags;
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index ed66414..1adf873 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -714,6 +714,7 @@
#define SE_LUN_LINK_MAGIC 0xffff7771
u32 lun_link_magic;
u32 lun_access;
+ bool lun_shutdown;
u32 lun_index;
/* RELATIVE TARGET PORT IDENTIFER */
diff --git a/include/target/target_core_fabric.h b/include/target/target_core_fabric.h
index 97069ec..5f9b62c 100644
--- a/include/target/target_core_fabric.h
+++ b/include/target/target_core_fabric.h
@@ -117,7 +117,7 @@
struct se_node_acl *, struct se_session *, void *);
void transport_register_session(struct se_portal_group *,
struct se_node_acl *, struct se_session *, void *);
-void target_get_session(struct se_session *);
+int target_get_session(struct se_session *);
void target_put_session(struct se_session *);
ssize_t target_show_dynamic_sessions(struct se_portal_group *, char *);
void transport_free_session(struct se_session *);
@@ -172,8 +172,7 @@
const char *);
struct se_node_acl *core_tpg_check_initiator_node_acl(struct se_portal_group *,
unsigned char *);
-int core_tpg_set_initiator_node_queue_depth(struct se_portal_group *,
- unsigned char *, u32, int);
+int core_tpg_set_initiator_node_queue_depth(struct se_node_acl *, u32);
int core_tpg_set_initiator_node_tag(struct se_portal_group *,
struct se_node_acl *, const char *);
int core_tpg_register(struct se_wwn *, struct se_portal_group *, int);
diff --git a/include/uapi/linux/android/binder.h b/include/uapi/linux/android/binder.h
index c063d63..619a185 100644
--- a/include/uapi/linux/android/binder.h
+++ b/include/uapi/linux/android/binder.h
@@ -222,6 +222,19 @@
#define BINDER_CURRENT_PROTOCOL_VERSION 8
#endif
+/*
+ * Use with BINDER_GET_NODE_DEBUG_INFO, driver reads ptr, writes to all fields.
+ * Set ptr to NULL for the first call to get the info for the first node, and
+ * then repeat the call passing the previously returned value to get the next
+ * nodes. ptr will be 0 when there are no more nodes.
+ */
+struct binder_node_debug_info {
+ binder_uintptr_t ptr;
+ binder_uintptr_t cookie;
+ __u32 has_strong_ref;
+ __u32 has_weak_ref;
+};
+
#define BINDER_WRITE_READ _IOWR('b', 1, struct binder_write_read)
#define BINDER_SET_IDLE_TIMEOUT _IOW('b', 3, __s64)
#define BINDER_SET_MAX_THREADS _IOW('b', 5, __u32)
@@ -230,6 +243,7 @@
#define BINDER_THREAD_EXIT _IOW('b', 8, __s32)
#define BINDER_VERSION _IOWR('b', 9, struct binder_version)
#define BINDER_SET_INHERIT_FIFO_PRIO _IO('b', 10)
+#define BINDER_GET_NODE_DEBUG_INFO _IOWR('b', 11, struct binder_node_debug_info)
/*
* NOTE: Two special error codes you should check for when calling
diff --git a/include/uapi/linux/ipv6_route.h b/include/uapi/linux/ipv6_route.h
index f6598d1..316e838 100644
--- a/include/uapi/linux/ipv6_route.h
+++ b/include/uapi/linux/ipv6_route.h
@@ -34,7 +34,7 @@
#define RTF_PREF(pref) ((pref) << 27)
#define RTF_PREF_MASK 0x18000000
-#define RTF_PCPU 0x40000000
+#define RTF_PCPU 0x40000000 /* read-only: can not be set by user */
#define RTF_LOCAL 0x80000000
diff --git a/include/uapi/linux/mnh-sm.h b/include/uapi/linux/mnh-sm.h
index 112146d..ef19f34 100644
--- a/include/uapi/linux/mnh-sm.h
+++ b/include/uapi/linux/mnh-sm.h
@@ -39,6 +39,7 @@
#define MNH_SM_MAX 8
#define MNH_ION_BUFFER_SIZE SZ_64M
+#define FW_VER_SIZE 24
#define MNH_SM_IOC_GET_STATE \
_IOR(MNH_SM_IOC_MAGIC, 1, int *)
@@ -56,6 +57,8 @@
_IOR(MNH_SM_IOC_MAGIC, 7, int *)
#define MNH_SM_IOC_POST_UPDATE_BUF \
_IOW(MNH_SM_IOC_MAGIC, 8, struct mnh_update_configs *)
+#define MNH_SM_IOC_GET_FW_VER \
+ _IOR(MNH_SM_IOC_MAGIC, 9, char [FW_VER_SIZE])
enum mnh_sm_state {
MNH_STATE_OFF, /* powered off */
diff --git a/include/uapi/linux/usb/ch11.h b/include/uapi/linux/usb/ch11.h
index 331499d..9ce10d4a 100644
--- a/include/uapi/linux/usb/ch11.h
+++ b/include/uapi/linux/usb/ch11.h
@@ -22,6 +22,9 @@
*/
#define USB_MAXCHILDREN 31
+/* See USB 3.1 spec Table 10-5 */
+#define USB_SS_MAXPORTS 15
+
/*
* Hub request types
*/
diff --git a/init/Kconfig b/init/Kconfig
index a1187f4..d4dc262 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -26,6 +26,16 @@
config BUILDTIME_EXTABLE_SORT
bool
+config THREAD_INFO_IN_TASK
+ bool
+ help
+ Select this to move thread_info off the stack into task_struct. To
+ make this work, an arch will need to remove all thread_info fields
+ except flags and fix any runtime bugs.
+
+ One subtle change that will be needed is to use try_get_task_stack()
+ and put_task_stack() in save_thread_stack_tsk() and get_wchan().
+
menu "General setup"
config BROKEN
diff --git a/init/init_task.c b/init/init_task.c
index ba0a7f36..11f83be1 100644
--- a/init/init_task.c
+++ b/init/init_task.c
@@ -22,5 +22,8 @@
* Initial thread structure. Alignment of this is handled by a special
* linker map entry.
*/
-union thread_union init_thread_union __init_task_data =
- { INIT_THREAD_INFO(init_task) };
+union thread_union init_thread_union __init_task_data = {
+#ifndef CONFIG_THREAD_INFO_IN_TASK
+ INIT_THREAD_INFO(init_task)
+#endif
+};
diff --git a/init/main.c b/init/main.c
index 7d4532b..8c72af2 100644
--- a/init/main.c
+++ b/init/main.c
@@ -468,7 +468,7 @@
}
# if THREAD_SIZE >= PAGE_SIZE
-void __init __weak thread_info_cache_init(void)
+void __init __weak thread_stack_cache_init(void)
{
}
#endif
@@ -644,7 +644,7 @@
/* Should be run before the first non-init thread is created */
init_espfix_bsp();
#endif
- thread_info_cache_init();
+ thread_stack_cache_init();
cred_init();
fork_init();
proc_caches_init();
diff --git a/ipc/mqueue.c b/ipc/mqueue.c
index 25b7a67..4643654 100644
--- a/ipc/mqueue.c
+++ b/ipc/mqueue.c
@@ -1251,8 +1251,10 @@
timeo = MAX_SCHEDULE_TIMEOUT;
ret = netlink_attachskb(sock, nc, &timeo, NULL);
- if (ret == 1)
+ if (ret == 1) {
+ sock = NULL;
goto retry;
+ }
if (ret) {
sock = NULL;
nc = NULL;
diff --git a/kernel/audit_watch.c b/kernel/audit_watch.c
index 939945a..a162661 100644
--- a/kernel/audit_watch.c
+++ b/kernel/audit_watch.c
@@ -457,13 +457,15 @@
list_del(&krule->rlist);
if (list_empty(&watch->rules)) {
+ /*
+ * audit_remove_watch() drops our reference to 'parent' which
+ * can get freed. Grab our own reference to be safe.
+ */
+ audit_get_parent(parent);
audit_remove_watch(watch);
-
- if (list_empty(&parent->watches)) {
- audit_get_parent(parent);
+ if (list_empty(&parent->watches))
fsnotify_destroy_mark(&parent->mark, audit_watch_group);
- audit_put_parent(parent);
- }
+ audit_put_parent(parent);
}
}
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 85de509..c97bce6 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -765,6 +765,11 @@
if (err)
return err;
+ if (is_pointer_value(env, insn->src_reg)) {
+ verbose("R%d leaks addr into mem\n", insn->src_reg);
+ return -EACCES;
+ }
+
/* check whether atomic_add can read the memory */
err = check_mem_access(env, insn->dst_reg, insn->off,
BPF_SIZE(insn->code), BPF_READ, -1);
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index c526235..5ed7fc3 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -2770,11 +2770,12 @@
tsk = tsk->group_leader;
/*
- * Workqueue threads may acquire PF_NO_SETAFFINITY and become
- * trapped in a cpuset, or RT worker may be born in a cgroup
- * with no rt_runtime allocated. Just say no.
+ * kthreads may acquire PF_NO_SETAFFINITY during initialization.
+ * If userland migrates such a kthread to a non-root cgroup, it can
+ * become trapped in a cpuset, or RT kthread may be born in a
+ * cgroup with no rt_runtime allocated. Just say no.
*/
- if (tsk == kthreadd_task || (tsk->flags & PF_NO_SETAFFINITY)) {
+ if (tsk->no_cgroup_migration || (tsk->flags & PF_NO_SETAFFINITY)) {
ret = -EINVAL;
goto out_unlock_rcu;
}
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 0dbded2..9b0549a 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -60,6 +60,7 @@
#include <linux/cgroup.h>
#include <linux/wait.h>
+struct static_key cpusets_pre_enable_key __read_mostly = STATIC_KEY_INIT_FALSE;
struct static_key cpusets_enabled_key __read_mostly = STATIC_KEY_INIT_FALSE;
/* See "Frequency meter" comments, below. */
@@ -174,9 +175,9 @@
} cpuset_flagbits_t;
/* convenient tests for these bits */
-static inline bool is_cpuset_online(const struct cpuset *cs)
+static inline bool is_cpuset_online(struct cpuset *cs)
{
- return test_bit(CS_ONLINE, &cs->flags);
+ return test_bit(CS_ONLINE, &cs->flags) && !css_is_dying(&cs->css);
}
static inline int is_cpu_exclusive(const struct cpuset *cs)
@@ -1912,6 +1913,7 @@
{
.name = "memory_pressure",
.read_u64 = cpuset_read_u64,
+ .private = FILE_MEMORY_PRESSURE,
},
{
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 1ab5ef4..a58cdcb 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -8462,6 +8462,37 @@
return 0;
}
+/*
+ * Variation on perf_event_ctx_lock_nested(), except we take two context
+ * mutexes.
+ */
+static struct perf_event_context *
+__perf_event_ctx_lock_double(struct perf_event *group_leader,
+ struct perf_event_context *ctx)
+{
+ struct perf_event_context *gctx;
+
+again:
+ rcu_read_lock();
+ gctx = READ_ONCE(group_leader->ctx);
+ if (!atomic_inc_not_zero(&gctx->refcount)) {
+ rcu_read_unlock();
+ goto again;
+ }
+ rcu_read_unlock();
+
+ mutex_lock_double(&gctx->mutex, &ctx->mutex);
+
+ if (group_leader->ctx != gctx) {
+ mutex_unlock(&ctx->mutex);
+ mutex_unlock(&gctx->mutex);
+ put_ctx(gctx);
+ goto again;
+ }
+
+ return gctx;
+}
+
/**
* sys_perf_event_open - open a performance event, associate it to a task/cpu
*
@@ -8670,28 +8701,27 @@
goto err_context;
/*
- * Do not allow to attach to a group in a different
- * task or CPU context:
+ * Make sure we're both events for the same CPU;
+ * grouping events for different CPUs is broken; since
+ * you can never concurrently schedule them anyhow.
*/
- if (move_group) {
- /*
- * Make sure we're both on the same task, or both
- * per-cpu events.
- */
- if (group_leader->ctx->task != ctx->task)
- goto err_context;
+ if (group_leader->cpu != event->cpu)
+ goto err_context;
- /*
- * Make sure we're both events for the same CPU;
- * grouping events for different CPUs is broken; since
- * you can never concurrently schedule them anyhow.
- */
- if (group_leader->cpu != event->cpu)
- goto err_context;
- } else {
- if (group_leader->ctx != ctx)
- goto err_context;
- }
+ /*
+ * Make sure we're both on the same task, or both
+ * per-CPU events.
+ */
+ if (group_leader->ctx->task != ctx->task)
+ goto err_context;
+
+ /*
+ * Do not allow to attach to a group in a different task
+ * or CPU context. If we're moving SW events, we'll fix
+ * this up later, so allow that.
+ */
+ if (!move_group && group_leader->ctx != ctx)
+ goto err_context;
/*
* Only a group leader can be exclusive or pinned
@@ -8715,8 +8745,26 @@
}
if (move_group) {
- gctx = group_leader->ctx;
- mutex_lock_double(&gctx->mutex, &ctx->mutex);
+ gctx = __perf_event_ctx_lock_double(group_leader, ctx);
+
+ /*
+ * Check if we raced against another sys_perf_event_open() call
+ * moving the software group underneath us.
+ */
+ if (!(group_leader->group_flags & PERF_GROUP_SOFTWARE)) {
+ /*
+ * If someone moved the group out from under us, check
+ * if this new event wound up on the same ctx, if so
+ * its the regular !move_group case, otherwise fail.
+ */
+ if (gctx != ctx) {
+ err = -EINVAL;
+ goto err_locked;
+ } else {
+ perf_event_ctx_unlock(group_leader, gctx);
+ move_group = 0;
+ }
+ }
} else {
mutex_lock(&ctx->mutex);
}
@@ -8811,7 +8859,7 @@
perf_unpin_context(ctx);
if (move_group)
- mutex_unlock(&gctx->mutex);
+ perf_event_ctx_unlock(group_leader, gctx);
mutex_unlock(&ctx->mutex);
if (group_leader)
mutex_unlock(&group_leader->group_leader_mutex);
@@ -8841,7 +8889,7 @@
err_locked:
if (move_group)
- mutex_unlock(&gctx->mutex);
+ perf_event_ctx_unlock(group_leader, gctx);
mutex_unlock(&ctx->mutex);
/* err_file: */
fput(event_file);
@@ -9463,7 +9511,7 @@
ret = inherit_task_group(event, parent, parent_ctx,
child, ctxn, &inherited_all);
if (ret)
- break;
+ goto out_unlock;
}
/*
@@ -9479,7 +9527,7 @@
ret = inherit_task_group(event, parent, parent_ctx,
child, ctxn, &inherited_all);
if (ret)
- break;
+ goto out_unlock;
}
raw_spin_lock_irqsave(&parent_ctx->lock, flags);
@@ -9507,6 +9555,7 @@
}
raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
+out_unlock:
mutex_unlock(&parent_ctx->mutex);
perf_unpin_context(parent_ctx);
diff --git a/kernel/exit.c b/kernel/exit.c
index 4ed83a1..3fb5112 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -54,6 +54,7 @@
#include <linux/writeback.h>
#include <linux/shm.h>
#include <linux/kcov.h>
+#include <linux/cpufreq.h>
#include "sched/tune.h"
@@ -173,6 +174,9 @@
{
struct task_struct *leader;
int zap_leader;
+#ifdef CONFIG_CPU_FREQ_STAT
+ cpufreq_task_stats_exit(p);
+#endif
repeat:
/* don't need to get the RCU readlock here - the process is dead and
* can't be modifying its own credentials. But shut RCU-lockdep up */
diff --git a/kernel/extable.c b/kernel/extable.c
index e820cce..4f06fc3 100644
--- a/kernel/extable.c
+++ b/kernel/extable.c
@@ -66,7 +66,7 @@
return 0;
}
-int core_kernel_text(unsigned long addr)
+int notrace core_kernel_text(unsigned long addr)
{
if (addr >= (unsigned long)_stext &&
addr < (unsigned long)_etext)
diff --git a/kernel/fork.c b/kernel/fork.c
index 9dd4aad..737ed01 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -149,18 +149,18 @@
}
#endif
-void __weak arch_release_thread_info(struct thread_info *ti)
+void __weak arch_release_thread_stack(unsigned long *stack)
{
}
-#ifndef CONFIG_ARCH_THREAD_INFO_ALLOCATOR
+#ifndef CONFIG_ARCH_THREAD_STACK_ALLOCATOR
/*
* Allocate pages if THREAD_SIZE is >= PAGE_SIZE, otherwise use a
* kmemcache based allocator.
*/
# if THREAD_SIZE >= PAGE_SIZE
-static struct thread_info *alloc_thread_info_node(struct task_struct *tsk,
+static unsigned long *alloc_thread_stack_node(struct task_struct *tsk,
int node)
{
struct page *page = alloc_kmem_pages_node(node, THREADINFO_GFP,
@@ -169,30 +169,31 @@
return page ? page_address(page) : NULL;
}
-static inline void free_thread_info(struct thread_info *ti)
+static inline void free_thread_stack(unsigned long *stack)
{
- kasan_alloc_pages(virt_to_page(ti), THREAD_SIZE_ORDER);
- free_kmem_pages((unsigned long)ti, THREAD_SIZE_ORDER);
+ struct page *page = virt_to_page(stack);
+
+ __free_kmem_pages(page, THREAD_SIZE_ORDER);
}
# else
-static struct kmem_cache *thread_info_cache;
+static struct kmem_cache *thread_stack_cache;
-static struct thread_info *alloc_thread_info_node(struct task_struct *tsk,
+static struct thread_info *alloc_thread_stack_node(struct task_struct *tsk,
int node)
{
- return kmem_cache_alloc_node(thread_info_cache, THREADINFO_GFP, node);
+ return kmem_cache_alloc_node(thread_stack_cache, THREADINFO_GFP, node);
}
-static void free_thread_info(struct thread_info *ti)
+static void free_stack(unsigned long *stack)
{
- kmem_cache_free(thread_info_cache, ti);
+ kmem_cache_free(thread_stack_cache, stack);
}
-void thread_info_cache_init(void)
+void thread_stack_cache_init(void)
{
- thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE,
+ thread_stack_cache = kmem_cache_create("thread_stack", THREAD_SIZE,
THREAD_SIZE, 0, NULL);
- BUG_ON(thread_info_cache == NULL);
+ BUG_ON(thread_stack_cache == NULL);
}
# endif
#endif
@@ -215,9 +216,9 @@
/* SLAB cache for mm_struct structures (tsk->mm) */
static struct kmem_cache *mm_cachep;
-static void account_kernel_stack(struct thread_info *ti, int account)
+static void account_kernel_stack(unsigned long *stack, int account)
{
- struct zone *zone = page_zone(virt_to_page(ti));
+ struct zone *zone = page_zone(virt_to_page(stack));
mod_zone_page_state(zone, NR_KERNEL_STACK, account);
}
@@ -225,8 +226,8 @@
void free_task(struct task_struct *tsk)
{
account_kernel_stack(tsk->stack, -1);
- arch_release_thread_info(tsk->stack);
- free_thread_info(tsk->stack);
+ arch_release_thread_stack(tsk->stack);
+ free_thread_stack(tsk->stack);
rt_mutex_debug_task_free(tsk);
ftrace_graph_exit_task(tsk);
put_seccomp_filter(tsk);
@@ -334,26 +335,27 @@
*stackend = STACK_END_MAGIC; /* for overflow detection */
}
-static struct task_struct *dup_task_struct(struct task_struct *orig)
+static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
{
struct task_struct *tsk;
- struct thread_info *ti;
- int node = tsk_fork_get_node(orig);
+ unsigned long *stack;
int err;
+ if (node == NUMA_NO_NODE)
+ node = tsk_fork_get_node(orig);
tsk = alloc_task_struct_node(node);
if (!tsk)
return NULL;
- ti = alloc_thread_info_node(tsk, node);
- if (!ti)
+ stack = alloc_thread_stack_node(tsk, node);
+ if (!stack)
goto free_tsk;
err = arch_dup_task_struct(tsk, orig);
if (err)
- goto free_ti;
+ goto free_stack;
- tsk->stack = ti;
+ tsk->stack = stack;
#ifdef CONFIG_SECCOMP
/*
* We must handle setting up seccomp filters once we're under
@@ -370,7 +372,7 @@
set_task_stack_end_magic(tsk);
#ifdef CONFIG_CC_STACKPROTECTOR
- tsk->stack_canary = get_random_int();
+ tsk->stack_canary = get_random_long();
#endif
/*
@@ -385,14 +387,14 @@
tsk->task_frag.page = NULL;
tsk->wake_q.next = NULL;
- account_kernel_stack(ti, 1);
+ account_kernel_stack(stack, 1);
kcov_task_init(tsk);
return tsk;
-free_ti:
- free_thread_info(ti);
+free_stack:
+ free_thread_stack(stack);
free_tsk:
free_task_struct(tsk);
return NULL;
@@ -834,8 +836,7 @@
mm = get_task_mm(task);
if (mm && mm != current->mm &&
- !ptrace_may_access(task, mode) &&
- !capable(CAP_SYS_RESOURCE)) {
+ !ptrace_may_access(task, mode)) {
mmput(mm);
mm = ERR_PTR(-EACCES);
}
@@ -1279,7 +1280,8 @@
int __user *child_tidptr,
struct pid *pid,
int trace,
- unsigned long tls)
+ unsigned long tls,
+ int node)
{
int retval;
struct task_struct *p;
@@ -1332,7 +1334,7 @@
goto fork_out;
retval = -ENOMEM;
- p = dup_task_struct(current);
+ p = dup_task_struct(current, node);
if (!p)
goto fork_out;
@@ -1597,11 +1599,13 @@
*/
recalc_sigpending();
if (signal_pending(current)) {
- spin_unlock(¤t->sighand->siglock);
- write_unlock_irq(&tasklist_lock);
retval = -ERESTARTNOINTR;
goto bad_fork_cancel_cgroup;
}
+ if (unlikely(!(ns_of_pid(pid)->nr_hashed & PIDNS_HASH_ADDING))) {
+ retval = -ENOMEM;
+ goto bad_fork_cancel_cgroup;
+ }
if (likely(p->pid)) {
ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace);
@@ -1652,6 +1656,8 @@
return p;
bad_fork_cancel_cgroup:
+ spin_unlock(¤t->sighand->siglock);
+ write_unlock_irq(&tasklist_lock);
cgroup_cancel_fork(p, cgrp_ss_priv);
bad_fork_free_pid:
threadgroup_change_end(current);
@@ -1708,7 +1714,8 @@
struct task_struct *fork_idle(int cpu)
{
struct task_struct *task;
- task = copy_process(CLONE_VM, 0, 0, NULL, &init_struct_pid, 0, 0);
+ task = copy_process(CLONE_VM, 0, 0, NULL, &init_struct_pid, 0, 0,
+ cpu_to_node(cpu));
if (!IS_ERR(task)) {
init_idle_pids(task->pids);
init_idle(task, cpu);
@@ -1753,7 +1760,7 @@
}
p = copy_process(clone_flags, stack_start, stack_size,
- child_tidptr, NULL, trace, tls);
+ child_tidptr, NULL, trace, tls, NUMA_NO_NODE);
/*
* Do this prior waking up the new thread - the thread pointer
* might get invalid after that point, if the thread exits quickly.
diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
index 7080ae1..f850e90 100644
--- a/kernel/gcov/base.c
+++ b/kernel/gcov/base.c
@@ -98,6 +98,12 @@
}
EXPORT_SYMBOL(__gcov_merge_icall_topn);
+void __gcov_exit(void)
+{
+ /* Unused. */
+}
+EXPORT_SYMBOL(__gcov_exit);
+
/**
* gcov_enable_events - enable event reporting through gcov_event()
*
diff --git a/kernel/gcov/gcc_4_7.c b/kernel/gcov/gcc_4_7.c
index e25e92f..46a18e7 100644
--- a/kernel/gcov/gcc_4_7.c
+++ b/kernel/gcov/gcc_4_7.c
@@ -18,7 +18,9 @@
#include <linux/vmalloc.h>
#include "gcov.h"
-#if __GNUC__ == 5 && __GNUC_MINOR__ >= 1
+#if (__GNUC__ >= 7)
+#define GCOV_COUNTERS 9
+#elif (__GNUC__ > 5) || (__GNUC__ == 5 && __GNUC_MINOR__ >= 1)
#define GCOV_COUNTERS 10
#elif __GNUC__ == 4 && __GNUC_MINOR__ >= 9
#define GCOV_COUNTERS 9
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index b1005c3..f0fa2d9 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -839,8 +839,8 @@
if (!desc)
return;
- __irq_do_set_handler(desc, handle, 1, NULL);
desc->irq_common_data.handler_data = data;
+ __irq_do_set_handler(desc, handle, 1, NULL);
irq_put_desc_busunlock(desc, flags);
}
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index e5c70dc..2c2effd 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -1305,8 +1305,10 @@
ret = __irq_set_trigger(desc,
new->flags & IRQF_TRIGGER_MASK);
- if (ret)
+ if (ret) {
+ irq_release_resources(desc);
goto out_mask;
+ }
}
desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index d10ab6b..6957635 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -563,7 +563,7 @@
}
/* Wait for completing optimization and unoptimization */
-static void wait_for_kprobe_optimizer(void)
+void wait_for_kprobe_optimizer(void)
{
mutex_lock(&kprobe_mutex);
diff --git a/kernel/kthread.c b/kernel/kthread.c
index da270a1..d9b0be5 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -18,6 +18,7 @@
#include <linux/freezer.h>
#include <linux/ptrace.h>
#include <linux/uaccess.h>
+#include <linux/cgroup.h>
#include <trace/events/sched.h>
static DEFINE_SPINLOCK(kthread_create_lock);
@@ -64,7 +65,7 @@
static struct kthread *to_live_kthread(struct task_struct *k)
{
struct completion *vfork = ACCESS_ONCE(k->vfork_done);
- if (likely(vfork))
+ if (likely(vfork) && try_get_task_stack(k))
return __to_kthread(vfork);
return NULL;
}
@@ -205,6 +206,7 @@
ret = -EINTR;
if (!test_bit(KTHREAD_SHOULD_STOP, &self.flags)) {
+ cgroup_kthread_ready();
__kthread_parkme(&self);
ret = threadfn(data);
}
@@ -425,8 +427,10 @@
{
struct kthread *kthread = to_live_kthread(k);
- if (kthread)
+ if (kthread) {
__kthread_unpark(k, kthread);
+ put_task_stack(k);
+ }
}
EXPORT_SYMBOL_GPL(kthread_unpark);
@@ -455,6 +459,7 @@
wait_for_completion(&kthread->parked);
}
}
+ put_task_stack(k);
ret = 0;
}
return ret;
@@ -490,6 +495,7 @@
__kthread_unpark(k, kthread);
wake_up_process(k);
wait_for_completion(&kthread->exited);
+ put_task_stack(k);
}
ret = k->exit_code;
put_task_struct(k);
@@ -510,6 +516,7 @@
set_mems_allowed(node_states[N_MEMORY]);
current->flags |= PF_NOFREEZE;
+ cgroup_init_kthreadd();
for (;;) {
set_current_state(TASK_INTERRUPTIBLE);
diff --git a/kernel/locking/locktorture.c b/kernel/locking/locktorture.c
index 8ef1919..d580b7d6e 100644
--- a/kernel/locking/locktorture.c
+++ b/kernel/locking/locktorture.c
@@ -776,6 +776,8 @@
else
lock_torture_print_module_parms(cxt.cur_ops,
"End of test: SUCCESS");
+ kfree(cxt.lwsa);
+ kfree(cxt.lrsa);
torture_cleanup_end();
}
@@ -917,6 +919,8 @@
GFP_KERNEL);
if (reader_tasks == NULL) {
VERBOSE_TOROUT_ERRSTRING("reader_tasks: Out of memory");
+ kfree(writer_tasks);
+ writer_tasks = NULL;
firsterr = -ENOMEM;
goto unwind;
}
diff --git a/kernel/locking/mutex-debug.c b/kernel/locking/mutex-debug.c
index 3ef3736..9c951fa 100644
--- a/kernel/locking/mutex-debug.c
+++ b/kernel/locking/mutex-debug.c
@@ -49,21 +49,21 @@
}
void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
- struct thread_info *ti)
+ struct task_struct *task)
{
SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
/* Mark the current thread as blocked on the lock: */
- ti->task->blocked_on = waiter;
+ task->blocked_on = waiter;
}
void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
- struct thread_info *ti)
+ struct task_struct *task)
{
DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
- DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
- DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
- ti->task->blocked_on = NULL;
+ DEBUG_LOCKS_WARN_ON(waiter->task != task);
+ DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
+ task->blocked_on = NULL;
list_del_init(&waiter->list);
waiter->task = NULL;
diff --git a/kernel/locking/mutex-debug.h b/kernel/locking/mutex-debug.h
index 0799fd3..d06ae3b 100644
--- a/kernel/locking/mutex-debug.h
+++ b/kernel/locking/mutex-debug.h
@@ -20,9 +20,9 @@
extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
extern void debug_mutex_add_waiter(struct mutex *lock,
struct mutex_waiter *waiter,
- struct thread_info *ti);
+ struct task_struct *task);
extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
- struct thread_info *ti);
+ struct task_struct *task);
extern void debug_mutex_unlock(struct mutex *lock);
extern void debug_mutex_init(struct mutex *lock, const char *name,
struct lock_class_key *key);
diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
index 14b9cca..bf5277e 100644
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -549,7 +549,7 @@
goto skip_wait;
debug_mutex_lock_common(lock, &waiter);
- debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
+ debug_mutex_add_waiter(lock, &waiter, task);
/* add waiting tasks to the end of the waitqueue (FIFO): */
list_add_tail(&waiter.list, &lock->wait_list);
@@ -596,7 +596,7 @@
}
__set_task_state(task, TASK_RUNNING);
- mutex_remove_waiter(lock, &waiter, current_thread_info());
+ mutex_remove_waiter(lock, &waiter, task);
/* set it to 0 if there are no waiters left: */
if (likely(list_empty(&lock->wait_list)))
atomic_set(&lock->count, 0);
@@ -617,7 +617,7 @@
return 0;
err:
- mutex_remove_waiter(lock, &waiter, task_thread_info(task));
+ mutex_remove_waiter(lock, &waiter, task);
spin_unlock_mutex(&lock->wait_lock, flags);
debug_mutex_free_waiter(&waiter);
mutex_release(&lock->dep_map, 1, ip);
diff --git a/kernel/locking/mutex.h b/kernel/locking/mutex.h
index 5cda397..a68bae5 100644
--- a/kernel/locking/mutex.h
+++ b/kernel/locking/mutex.h
@@ -13,7 +13,7 @@
do { spin_lock(lock); (void)(flags); } while (0)
#define spin_unlock_mutex(lock, flags) \
do { spin_unlock(lock); (void)(flags); } while (0)
-#define mutex_remove_waiter(lock, waiter, ti) \
+#define mutex_remove_waiter(lock, waiter, task) \
__list_del((waiter)->list.prev, (waiter)->list.next)
#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
diff --git a/kernel/padata.c b/kernel/padata.c
index b38bea9..ecc7b3f 100644
--- a/kernel/padata.c
+++ b/kernel/padata.c
@@ -189,19 +189,20 @@
reorder = &next_queue->reorder;
+ spin_lock(&reorder->lock);
if (!list_empty(&reorder->list)) {
padata = list_entry(reorder->list.next,
struct padata_priv, list);
- spin_lock(&reorder->lock);
list_del_init(&padata->list);
atomic_dec(&pd->reorder_objects);
- spin_unlock(&reorder->lock);
pd->processed++;
+ spin_unlock(&reorder->lock);
goto out;
}
+ spin_unlock(&reorder->lock);
if (__this_cpu_read(pd->pqueue->cpu_index) == next_queue->cpu_index) {
padata = ERR_PTR(-ENODATA);
@@ -356,7 +357,7 @@
cpumask_and(pd->cpumask.pcpu, pcpumask, cpu_online_mask);
if (!alloc_cpumask_var(&pd->cpumask.cbcpu, GFP_KERNEL)) {
- free_cpumask_var(pd->cpumask.cbcpu);
+ free_cpumask_var(pd->cpumask.pcpu);
return -ENOMEM;
}
diff --git a/kernel/panic.c b/kernel/panic.c
index 982a523..6792544 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -172,7 +172,7 @@
* Delay timeout seconds before rebooting the machine.
* We can't use the "normal" timers since we just panicked.
*/
- pr_emerg("Rebooting in %d seconds..", panic_timeout);
+ pr_emerg("Rebooting in %d seconds..\n", panic_timeout);
for (i = 0; i < panic_timeout * 1000; i += PANIC_TIMER_STEP) {
touch_nmi_watchdog();
diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
index a65ba13..567ecc8 100644
--- a/kernel/pid_namespace.c
+++ b/kernel/pid_namespace.c
@@ -255,7 +255,7 @@
* if reparented.
*/
for (;;) {
- set_current_state(TASK_UNINTERRUPTIBLE);
+ set_current_state(TASK_INTERRUPTIBLE);
if (pid_ns->nr_hashed == init_pids)
break;
schedule();
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index 93aaa31..0ff3a4d 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -503,7 +503,7 @@
type != SYSLOG_ACTION_SIZE_BUFFER;
}
-int check_syslog_permissions(int type, int source)
+static int check_syslog_permissions(int type, int source)
{
/*
* If this is from /proc/kmsg and we've already opened it, then we've
@@ -531,7 +531,6 @@
ok:
return security_syslog(type);
}
-EXPORT_SYMBOL_GPL(check_syslog_permissions);
static void append_char(char **pp, char *e, char c)
{
@@ -3237,9 +3236,8 @@
{
dump_stack_print_info(log_lvl);
- printk("%stask: %pP ti: %pP task.ti: %pP\n",
- log_lvl, current, current_thread_info(),
- task_thread_info(current));
+ printk("%stask: %p task.stack: %p\n",
+ log_lvl, current, task_stack_page(current));
}
#endif
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index a46c40b..5e2cd10 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -28,19 +28,25 @@
#include <linux/compat.h>
+void __ptrace_link(struct task_struct *child, struct task_struct *new_parent,
+ const struct cred *ptracer_cred)
+{
+ BUG_ON(!list_empty(&child->ptrace_entry));
+ list_add(&child->ptrace_entry, &new_parent->ptraced);
+ child->parent = new_parent;
+ child->ptracer_cred = get_cred(ptracer_cred);
+}
+
/*
* ptrace a task: make the debugger its new parent and
* move it to the ptrace list.
*
* Must be called with the tasklist lock write-held.
*/
-void __ptrace_link(struct task_struct *child, struct task_struct *new_parent)
+static void ptrace_link(struct task_struct *child, struct task_struct *new_parent)
{
- BUG_ON(!list_empty(&child->ptrace_entry));
- list_add(&child->ptrace_entry, &new_parent->ptraced);
- child->parent = new_parent;
rcu_read_lock();
- child->ptracer_cred = get_cred(__task_cred(new_parent));
+ __ptrace_link(child, new_parent, __task_cred(new_parent));
rcu_read_unlock();
}
@@ -151,11 +157,17 @@
WARN_ON(!task->ptrace || task->parent != current);
+ /*
+ * PTRACE_LISTEN can allow ptrace_trap_notify to wake us up remotely.
+ * Recheck state under the lock to close this race.
+ */
spin_lock_irq(&task->sighand->siglock);
- if (__fatal_signal_pending(task))
- wake_up_state(task, __TASK_TRACED);
- else
- task->state = TASK_TRACED;
+ if (task->state == __TASK_TRACED) {
+ if (__fatal_signal_pending(task))
+ wake_up_state(task, __TASK_TRACED);
+ else
+ task->state = TASK_TRACED;
+ }
spin_unlock_irq(&task->sighand->siglock);
}
@@ -347,7 +359,7 @@
flags |= PT_SEIZED;
task->ptrace = flags;
- __ptrace_link(task, current);
+ ptrace_link(task, current);
/* SEIZE doesn't trap tracee on attach */
if (!seize)
@@ -414,7 +426,7 @@
*/
if (!ret && !(current->real_parent->flags & PF_EXITING)) {
current->ptrace = PT_PTRACED;
- __ptrace_link(current, current->real_parent);
+ ptrace_link(current, current->real_parent);
}
}
write_unlock_irq(&tasklist_lock);
diff --git a/kernel/resource.c b/kernel/resource.c
index 4c9835c..c09d484 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -105,16 +105,25 @@
{
struct resource *root = m->private;
struct resource *r = v, *p;
+ unsigned long long start, end;
int width = root->end < 0x10000 ? 4 : 8;
int depth;
for (depth = 0, p = r; depth < MAX_IORES_LEVEL; depth++, p = p->parent)
if (p->parent == root)
break;
+
+ if (file_ns_capable(m->file, &init_user_ns, CAP_SYS_ADMIN)) {
+ start = r->start;
+ end = r->end;
+ } else {
+ start = end = 0;
+ }
+
seq_printf(m, "%*s%0*llx-%0*llx : %s\n",
depth * 2, "",
- width, (unsigned long long) r->start,
- width, (unsigned long long) r->end,
+ width, start,
+ width, end,
r->name ? r->name : "<BAD>");
return 0;
}
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 566c2d4..80740e9 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -75,6 +75,7 @@
#include <linux/binfmts.h>
#include <linux/context_tracking.h>
#include <linux/compiler.h>
+#include <linux/cpufreq.h>
#include <asm/switch_to.h>
#include <asm/tlb.h>
@@ -2178,10 +2179,15 @@
memset(&p->se.statistics, 0, sizeof(p->se.statistics));
#endif
+#ifdef CONFIG_CPU_FREQ_STAT
+ cpufreq_task_stats_init(p);
+#endif
+
RB_CLEAR_NODE(&p->dl.rb_node);
init_dl_task_timer(&p->dl);
__dl_clear_params(p);
+ init_rt_schedtune_timer(&p->rt);
INIT_LIST_HEAD(&p->rt.run_list);
#ifdef CONFIG_PREEMPT_NOTIFIERS
@@ -3387,11 +3393,15 @@
prev->waiting_time = now;
if (!is_idle_task(next) &&
now > next->waiting_time + NSEC_PER_SEC) {
- printk("task wait timing: next->pid: %d, "
- "next->tid: %d, next->prio: %d, "
- "next->vruntime: %llu, "
+ printk("task wait timing: "
+ "prev->tgid: %d, prev->pid: %d, "
+ "prev->prio: %d, prev->vruntime: %llu, "
+ "next->tgid: %d, next->pid: %d, "
+ "next->prio: %d, next->vruntime: %llu, "
"now: %llu, ready: %llu, "
"waiting time: %llu\n",
+ prev->tgid, prev->pid, prev->prio,
+ prev->se.vruntime,
next->tgid, next->pid, next->prio,
next->se.vruntime,
now, next->waiting_time,
@@ -6415,6 +6425,9 @@
* Build an iteration mask that can exclude certain CPUs from the upwards
* domain traversal.
*
+ * Only CPUs that can arrive at this group should be considered to continue
+ * balancing.
+ *
* Asymmetric node setups can result in situations where the domain tree is of
* unequal depth, make sure to skip domains that already cover the entire
* range.
@@ -6426,18 +6439,31 @@
*/
static void build_group_mask(struct sched_domain *sd, struct sched_group *sg)
{
- const struct cpumask *span = sched_domain_span(sd);
+ const struct cpumask *sg_span = sched_group_cpus(sg);
struct sd_data *sdd = sd->private;
struct sched_domain *sibling;
int i;
- for_each_cpu(i, span) {
+ for_each_cpu(i, sg_span) {
sibling = *per_cpu_ptr(sdd->sd, i);
- if (!cpumask_test_cpu(i, sched_domain_span(sibling)))
+
+ /*
+ * Can happen in the asymmetric case, where these siblings are
+ * unused. The mask will not be empty because those CPUs that
+ * do have the top domain _should_ span the domain.
+ */
+ if (!sibling->child)
+ continue;
+
+ /* If we would not end up here, we can't continue from here */
+ if (!cpumask_equal(sg_span, sched_domain_span(sibling->child)))
continue;
cpumask_set_cpu(i, sched_group_mask(sg));
}
+
+ /* We must not have empty masks here */
+ WARN_ON_ONCE(cpumask_empty(sched_group_mask(sg)));
}
/*
@@ -8641,11 +8667,20 @@
if (IS_ERR(tg))
return ERR_PTR(-ENOMEM);
- sched_online_group(tg, parent);
-
return &tg->css;
}
+/* Expose task group only after completing cgroup initialization */
+static int cpu_cgroup_css_online(struct cgroup_subsys_state *css)
+{
+ struct task_group *tg = css_tg(css);
+ struct task_group *parent = css_tg(css->parent);
+
+ if (parent)
+ sched_online_group(tg, parent);
+ return 0;
+}
+
static void cpu_cgroup_css_released(struct cgroup_subsys_state *css)
{
struct task_group *tg = css_tg(css);
@@ -9020,6 +9055,7 @@
struct cgroup_subsys cpu_cgrp_subsys = {
.css_alloc = cpu_cgroup_css_alloc,
+ .css_online = cpu_cgroup_css_online,
.css_released = cpu_cgroup_css_released,
.css_free = cpu_cgroup_css_free,
.fork = cpu_cgroup_fork,
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index 11c6dea..073a07a 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -303,11 +303,10 @@
sugov_update_commit(sg_policy, time, next_f);
}
-static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu)
+static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time)
{
struct sugov_policy *sg_policy = sg_cpu->sg_policy;
struct cpufreq_policy *policy = sg_policy->policy;
- u64 last_freq_update_time = sg_policy->last_freq_update_time;
unsigned long util = 0, max = 1;
unsigned int j;
@@ -323,7 +322,7 @@
* enough, don't take the CPU into account as it probably is
* idle now (and clear iowait_boost for it).
*/
- delta_ns = last_freq_update_time - j_sg_cpu->last_update;
+ delta_ns = time - j_sg_cpu->last_update;
if (delta_ns > TICK_NSEC) {
j_sg_cpu->iowait_boost = 0;
continue;
@@ -367,7 +366,7 @@
if (flags & SCHED_CPUFREQ_DL)
next_f = sg_policy->policy->cpuinfo.max_freq;
else
- next_f = sugov_next_freq_shared(sg_cpu);
+ next_f = sugov_next_freq_shared(sg_cpu, time);
sugov_update_commit(sg_policy, time, next_f);
}
diff --git a/kernel/sched/cpupri.c b/kernel/sched/cpupri.c
index d6f5d00..54f01e0 100644
--- a/kernel/sched/cpupri.c
+++ b/kernel/sched/cpupri.c
@@ -63,9 +63,8 @@
while (cpu < nr_cpu_ids) {
/* unlocked access */
struct task_struct *task = READ_ONCE(cpu_rq(cpu)->curr);
- if (task_may_not_preempt(task, cpu)) {
+ if (task_may_not_preempt(task, cpu))
cpumask_clear_cpu(cpu, lowest_mask);
- }
cpu = cpumask_next(cpu, lowest_mask);
}
}
diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
index acde1d7..c92ddb2 100644
--- a/kernel/sched/cputime.c
+++ b/kernel/sched/cputime.c
@@ -4,6 +4,7 @@
#include <linux/kernel_stat.h>
#include <linux/static_key.h>
#include <linux/context_tracking.h>
+#include <linux/cpufreq.h>
#include "sched.h"
#include "walt.h"
@@ -165,6 +166,11 @@
/* Account for user time used */
acct_account_cputime(p);
+
+#ifdef CONFIG_CPU_FREQ_STAT
+ /* Account power usage for system time */
+ acct_update_power(p, cputime);
+#endif
}
/*
@@ -215,6 +221,11 @@
/* Account for system time used */
acct_account_cputime(p);
+
+#ifdef CONFIG_CPU_FREQ_STAT
+ /* Account power usage for system time */
+ acct_update_power(p, cputime);
+#endif
}
/*
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 609acc9..1341aca 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3170,6 +3170,7 @@
*/
#define UPDATE_TG 0x1
#define SKIP_AGE_LOAD 0x2
+#define SKIP_CPUFREQ 0x4
/* Update task and its cfs_rq load average */
static inline void update_load_avg(struct sched_entity *se, int flags)
@@ -3190,7 +3191,7 @@
cfs_rq->curr == se, NULL);
}
- decayed = update_cfs_rq_load_avg(now, cfs_rq, true);
+ decayed = update_cfs_rq_load_avg(now, cfs_rq, !(flags & SKIP_CPUFREQ));
decayed |= propagate_entity_load_avg(se);
if (decayed && (flags & UPDATE_TG))
@@ -3363,6 +3364,7 @@
#define UPDATE_TG 0x0
#define SKIP_AGE_LOAD 0x0
+#define SKIP_CPUFREQ 0x0
static inline void update_load_avg(struct sched_entity *se, int not_used1){}
static inline void
@@ -3579,6 +3581,8 @@
static void
dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
{
+ int update_flags;
+
/*
* Update run-time statistics of the 'current'.
*/
@@ -3592,7 +3596,12 @@
* - For group entity, update its weight to reflect the new share
* of its group cfs_rq.
*/
- update_load_avg(se, UPDATE_TG);
+ update_flags = UPDATE_TG;
+
+ if (flags & DEQUEUE_IDLE)
+ update_flags |= SKIP_CPUFREQ;
+
+ update_load_avg(se, update_flags);
dequeue_entity_load_avg(cfs_rq, se);
update_stats_dequeue(cfs_rq, se);
@@ -4396,6 +4405,26 @@
if (!cfs_bandwidth_used())
return;
+ /* Synchronize hierarchical throttle counter: */
+ if (unlikely(!cfs_rq->throttle_uptodate)) {
+ struct rq *rq = rq_of(cfs_rq);
+ struct cfs_rq *pcfs_rq;
+ struct task_group *tg;
+
+ cfs_rq->throttle_uptodate = 1;
+
+ /* Get closest up-to-date node, because leaves go first: */
+ for (tg = cfs_rq->tg->parent; tg; tg = tg->parent) {
+ pcfs_rq = tg->cfs_rq[cpu_of(rq)];
+ if (pcfs_rq->throttle_uptodate)
+ break;
+ }
+ if (tg) {
+ cfs_rq->throttle_count = pcfs_rq->throttle_count;
+ cfs_rq->throttled_clock_task = rq_clock_task(rq);
+ }
+ }
+
/* an active group must be handled by the update_curr()->put() path */
if (!cfs_rq->runtime_enabled || cfs_rq->curr)
return;
@@ -4772,6 +4801,9 @@
struct sched_entity *se = &p->se;
int task_sleep = flags & DEQUEUE_SLEEP;
+ if (task_sleep && rq->nr_running == 1)
+ flags |= DEQUEUE_IDLE;
+
for_each_sched_entity(se) {
cfs_rq = cfs_rq_of(se);
dequeue_entity(cfs_rq, se, flags);
@@ -4789,21 +4821,22 @@
/* Don't dequeue parent if it has other entities besides us */
if (cfs_rq->load.weight) {
+ /* Avoid re-evaluating load for this entity: */
+ se = parent_entity(se);
/*
* Bias pick_next to pick a task from this cfs_rq, as
* p is sleeping when it is within its sched_slice.
*/
- if (task_sleep && parent_entity(se))
- set_next_buddy(parent_entity(se));
-
- /* avoid re-evaluating load for this entity */
- se = parent_entity(se);
+ if (task_sleep && se && !throttled_hierarchy(cfs_rq))
+ set_next_buddy(se);
break;
}
flags |= DEQUEUE_SLEEP;
}
for_each_sched_entity(se) {
+ int update_flags;
+
cfs_rq = cfs_rq_of(se);
cfs_rq->h_nr_running--;
walt_dec_cfs_cumulative_runnable_avg(cfs_rq, p);
@@ -4811,7 +4844,12 @@
if (cfs_rq_throttled(cfs_rq))
break;
- update_load_avg(se, UPDATE_TG);
+ update_flags = UPDATE_TG;
+
+ if (flags & DEQUEUE_IDLE)
+ update_flags |= SKIP_CPUFREQ;
+
+ update_load_avg(se, update_flags);
update_cfs_shares(se);
}
diff --git a/kernel/sched/loadavg.c b/kernel/sched/loadavg.c
index b0b93fd..f8e8d68 100644
--- a/kernel/sched/loadavg.c
+++ b/kernel/sched/loadavg.c
@@ -201,8 +201,9 @@
struct rq *this_rq = this_rq();
/*
- * If we're still before the sample window, we're done.
+ * If we're still before the pending sample window, we're done.
*/
+ this_rq->calc_load_update = calc_load_update;
if (time_before(jiffies, this_rq->calc_load_update))
return;
@@ -211,7 +212,6 @@
* accounted through the nohz accounting, so skip the entire deal and
* sync up for the next window.
*/
- this_rq->calc_load_update = calc_load_update;
if (time_before(jiffies, this_rq->calc_load_update + 10))
this_rq->calc_load_update += LOAD_FREQ;
}
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 25499a3..9aceaea 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -8,6 +8,7 @@
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/irq_work.h>
+#include <linux/hrtimer.h>
#include "walt.h"
#include "tune.h"
@@ -987,6 +988,74 @@
return 0;
}
+/* TODO: Make configurable */
+#define RT_SCHEDTUNE_INTERVAL 50000000ULL
+
+static void sched_rt_update_capacity_req(struct rq *rq);
+
+static enum hrtimer_restart rt_schedtune_timer(struct hrtimer *timer)
+{
+ struct sched_rt_entity *rt_se = container_of(timer,
+ struct sched_rt_entity,
+ schedtune_timer);
+ struct task_struct *p = rt_task_of(rt_se);
+ struct rq *rq = task_rq(p);
+
+ raw_spin_lock(&rq->lock);
+
+ /*
+ * Nothing to do if:
+ * - task has switched runqueues
+ * - task isn't RT anymore
+ */
+ if (rq != task_rq(p) || (p->sched_class != &rt_sched_class))
+ goto out;
+
+ /*
+ * If task got enqueued back during callback time, it means we raced
+ * with the enqueue on another cpu, that's Ok, just do nothing as
+ * enqueue path would have tried to cancel us and we shouldn't run
+ * Also check the schedtune_enqueued flag as class-switch on a
+ * sleeping task may have already canceled the timer and done dq
+ */
+ if (p->on_rq || !rt_se->schedtune_enqueued)
+ goto out;
+
+ /*
+ * RT task is no longer active, cancel boost
+ */
+ rt_se->schedtune_enqueued = false;
+ schedtune_dequeue_task(p, cpu_of(rq));
+ sched_rt_update_capacity_req(rq);
+ cpufreq_update_this_cpu(rq, SCHED_CPUFREQ_RT);
+out:
+ raw_spin_unlock(&rq->lock);
+
+ /*
+ * This can free the task_struct if no more references.
+ */
+ put_task_struct(p);
+
+ return HRTIMER_NORESTART;
+}
+
+void init_rt_schedtune_timer(struct sched_rt_entity *rt_se)
+{
+ struct hrtimer *timer = &rt_se->schedtune_timer;
+
+ hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ timer->function = rt_schedtune_timer;
+ rt_se->schedtune_enqueued = false;
+}
+
+static void start_schedtune_timer(struct sched_rt_entity *rt_se)
+{
+ struct hrtimer *timer = &rt_se->schedtune_timer;
+
+ hrtimer_start(timer, ns_to_ktime(RT_SCHEDTUNE_INTERVAL),
+ HRTIMER_MODE_REL_PINNED);
+}
+
/*
* Update the current task's runtime statistics. Skip current tasks that
* are not in our scheduling class.
@@ -1308,6 +1377,25 @@
}
/*
+ * Keep track of whether each cpu has an RT task that will
+ * soon schedule on that core. The problem this is intended
+ * to address is that we want to avoid entering a non-preemptible
+ * softirq handler if we are about to schedule a real-time
+ * task on that core. Ideally, we could just check whether
+ * the RT runqueue on that core had a runnable task, but the
+ * window between choosing to schedule a real-time task
+ * on a core and actually enqueueing it on that run-queue
+ * is large enough to lose races at an unacceptably high rate.
+ *
+ * This variable attempts to reduce that window by indicating
+ * when we have decided to schedule an RT task on a core
+ * but not yet enqueued it.
+ * This variable is a heuristic only: it is not guaranteed
+ * to be correct and may be updated without synchronization.
+ */
+DEFINE_PER_CPU(bool, incoming_rt_task);
+
+/*
* Adding/removing a task to/from a priority array:
*/
static void
@@ -1321,10 +1409,38 @@
enqueue_rt_entity(rt_se, flags & ENQUEUE_HEAD);
walt_inc_cumulative_runnable_avg(rq, p);
- if (!task_current(rq, p) && p->nr_cpus_allowed > 1) {
+ if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
enqueue_pushable_task(rq, p);
- schedtune_enqueue_task(p, cpu_of(rq));
- }
+
+ *per_cpu_ptr(&incoming_rt_task, cpu_of(rq)) = false;
+
+ if (!schedtune_task_boost(p))
+ return;
+
+ /*
+ * If schedtune timer is active, that means a boost was already
+ * done, just cancel the timer so that deboost doesn't happen.
+ * Otherwise, increase the boost. If an enqueued timer was
+ * cancelled, put the task reference.
+ */
+ if (hrtimer_try_to_cancel(&rt_se->schedtune_timer) == 1)
+ put_task_struct(p);
+
+ /*
+ * schedtune_enqueued can be true in the following situation:
+ * enqueue_task_rt grabs rq lock before timer fires
+ * or before its callback acquires rq lock
+ * schedtune_enqueued can be false if timer callback is running
+ * and timer just released rq lock, or if the timer finished
+ * running and canceling the boost
+ */
+ if (rt_se->schedtune_enqueued)
+ return;
+
+ rt_se->schedtune_enqueued = true;
+ schedtune_enqueue_task(p, cpu_of(rq));
+ sched_rt_update_capacity_req(rq);
+ cpufreq_update_this_cpu(rq, SCHED_CPUFREQ_RT);
}
static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags)
@@ -1336,7 +1452,20 @@
walt_dec_cumulative_runnable_avg(rq, p);
dequeue_pushable_task(rq, p);
+
+ if (!rt_se->schedtune_enqueued)
+ return;
+
+ if (flags == DEQUEUE_SLEEP) {
+ get_task_struct(p);
+ start_schedtune_timer(rt_se);
+ return;
+ }
+
+ rt_se->schedtune_enqueued = false;
schedtune_dequeue_task(p, cpu_of(rq));
+ sched_rt_update_capacity_req(rq);
+ cpufreq_update_this_cpu(rq, SCHED_CPUFREQ_RT);
}
/*
@@ -1373,14 +1502,27 @@
requeue_task_rt(rq, rq->curr, 0);
}
+/*
+ * Return whether the given cpu has (or will shortly have) an RT task
+ * ready to run. NB: This is a heuristic and is subject to races.
+ */
+bool
+cpu_has_rt_task(int cpu)
+{
+ struct rq *rq = cpu_rq(cpu);
+ return rq->rt.rt_nr_running > 0 || per_cpu(incoming_rt_task, cpu);
+}
+
#ifdef CONFIG_SMP
static int find_lowest_rq(struct task_struct *task, int sync);
/*
* Return whether the task on the given cpu is currently non-preemptible
* while handling a potentially long softint, or if the task is likely
- * to block preemptions soon because it is a ksoftirq thread that is
- * handling slow softints.
+ * to block preemptions soon because (a) it is a ksoftirq thread that is
+ * handling slow softints, (b) it is idle and therefore likely to start
+ * processing the irq's immediately, (c) the cpu is currently handling
+ * hard irq's and will soon move on to the softirq handler.
*/
bool
task_may_not_preempt(struct task_struct *task, int cpu)
@@ -1389,14 +1531,42 @@
__IRQ_STAT(cpu, __softirq_pending);
struct task_struct *cpu_ksoftirqd = per_cpu(ksoftirqd, cpu);
return ((softirqs & LONG_SOFTIRQ_MASK) &&
- (task == cpu_ksoftirqd ||
- task_thread_info(task)->preempt_count & SOFTIRQ_MASK));
+ (task == cpu_ksoftirqd || is_idle_task(task) ||
+ (task_thread_info(task)->preempt_count
+ & (HARDIRQ_MASK | SOFTIRQ_MASK))));
+}
+
+/*
+ * Perform a schedtune dequeue and cancelation of boost timers if needed.
+ * Should be called only with the rq->lock held.
+ */
+static void schedtune_dequeue_rt(struct rq *rq, struct task_struct *p)
+{
+ struct sched_rt_entity *rt_se = &p->rt;
+
+ BUG_ON(!raw_spin_is_locked(&rq->lock));
+
+ if (!rt_se->schedtune_enqueued)
+ return;
+
+ /*
+ * Incase of class change cancel any active timers. If an enqueued
+ * timer was cancelled, put the task ref.
+ */
+ if (hrtimer_try_to_cancel(&rt_se->schedtune_timer) == 1)
+ put_task_struct(p);
+
+ /* schedtune_enqueued is true, deboost it */
+ rt_se->schedtune_enqueued = false;
+ schedtune_dequeue_task(p, task_cpu(p));
+ sched_rt_update_capacity_req(rq);
+ cpufreq_update_this_cpu(rq, SCHED_CPUFREQ_RT);
}
static int
select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags)
{
- struct task_struct *curr;
+ struct task_struct *curr, *tgt_task;
struct rq *rq;
bool may_not_preempt;
int target;
@@ -1413,6 +1583,17 @@
may_not_preempt = task_may_not_preempt(curr, cpu);
target = find_lowest_rq(p, sync);
+
+ /*
+ * Check once for losing a race with the other core's irq handler.
+ * This does not happen frequently, but it can avoid delaying
+ * the execution of the RT task in those cases.
+ */
+ if (target != -1) {
+ tgt_task = READ_ONCE(cpu_rq(target)->curr);
+ if (task_may_not_preempt(tgt_task, target))
+ target = find_lowest_rq(p, sync);
+ }
/*
* Possible race. Don't bother moving it if the
* destination CPU is not running a lower priority task.
@@ -1420,8 +1601,22 @@
if (target != -1 &&
(may_not_preempt || p->prio < cpu_rq(target)->rt.highest_prio.curr))
cpu = target;
+ *per_cpu_ptr(&incoming_rt_task, cpu) = true;
rcu_read_unlock();
out:
+ /*
+ * If previous CPU was different, make sure to cancel any active
+ * schedtune timers and deboost.
+ */
+ if (task_cpu(p) != cpu) {
+ unsigned long fl;
+ struct rq *prq = task_rq(p);
+
+ raw_spin_lock_irqsave(&prq->lock, fl);
+ schedtune_dequeue_rt(prq, p);
+ raw_spin_unlock_irqrestore(&prq->lock, fl);
+ }
+
return cpu;
}
@@ -1451,7 +1646,6 @@
requeue_task_rt(rq, p, 1);
resched_curr(rq);
}
-
#endif /* CONFIG_SMP */
/*
@@ -2315,6 +2509,13 @@
static void switched_from_rt(struct rq *rq, struct task_struct *p)
{
/*
+ * On class switch from rt, always cancel active schedtune timers,
+ * this handles the cases where we switch class for a task that is
+ * already rt-dequeued but has a running timer.
+ */
+ schedtune_dequeue_rt(rq, p);
+
+ /*
* If there are other RT tasks then we will reschedule
* and the scheduling of the other RT tasks will handle
* the balancing. But if we are the last RT task
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index f897ac8..c5deec5 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -422,7 +422,7 @@
u64 throttled_clock, throttled_clock_task;
u64 throttled_clock_task_time;
- int throttled, throttle_count;
+ int throttled, throttle_count, throttle_uptodate;
struct list_head throttled_list;
#endif /* CONFIG_CFS_BANDWIDTH */
#endif /* CONFIG_FAIR_GROUP_SCHED */
@@ -1027,7 +1027,11 @@
* per-task data have been completed by this moment.
*/
smp_wmb();
+#ifdef CONFIG_THREAD_INFO_IN_TASK
+ p->cpu = cpu;
+#else
task_thread_info(p)->cpu = cpu;
+#endif
p->wake_cpu = cpu;
#endif
}
@@ -1240,6 +1244,7 @@
#define DEQUEUE_SLEEP 0x01
#define DEQUEUE_SAVE 0x02 /* matches ENQUEUE_RESTORE */
#define DEQUEUE_MOVE 0x04 /* matches ENQUEUE_MOVE */
+#define DEQUEUE_IDLE 0x80 /* The last dequeue before IDLE */
#define ENQUEUE_WAKEUP 0x01
#define ENQUEUE_RESTORE 0x02
@@ -1409,6 +1414,7 @@
extern struct rt_bandwidth def_rt_bandwidth;
extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime);
+extern void init_rt_schedtune_timer(struct sched_rt_entity *rt_se);
extern struct dl_bandwidth def_dl_bandwidth;
extern void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime);
diff --git a/kernel/signal.c b/kernel/signal.c
index f3f1f7a..5d50ea89 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -346,7 +346,7 @@
* fresh group stop. Read comment in do_signal_stop() for details.
*/
if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
- sig->flags = SIGNAL_STOP_STOPPED;
+ signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED);
return true;
}
return false;
@@ -503,7 +503,8 @@
return !tsk->ptrace;
}
-static void collect_signal(int sig, struct sigpending *list, siginfo_t *info)
+static void collect_signal(int sig, struct sigpending *list, siginfo_t *info,
+ bool *resched_timer)
{
struct sigqueue *q, *first = NULL;
@@ -525,6 +526,12 @@
still_pending:
list_del_init(&first->list);
copy_siginfo(info, &first->info);
+
+ *resched_timer =
+ (first->flags & SIGQUEUE_PREALLOC) &&
+ (info->si_code == SI_TIMER) &&
+ (info->si_sys_private);
+
__sigqueue_free(first);
} else {
/*
@@ -541,12 +548,12 @@
}
static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
- siginfo_t *info)
+ siginfo_t *info, bool *resched_timer)
{
int sig = next_signal(pending, mask);
if (sig)
- collect_signal(sig, pending, info);
+ collect_signal(sig, pending, info, resched_timer);
return sig;
}
@@ -558,15 +565,16 @@
*/
int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
{
+ bool resched_timer = false;
int signr;
/* We only dequeue private signals from ourselves, we don't let
* signalfd steal them
*/
- signr = __dequeue_signal(&tsk->pending, mask, info);
+ signr = __dequeue_signal(&tsk->pending, mask, info, &resched_timer);
if (!signr) {
signr = __dequeue_signal(&tsk->signal->shared_pending,
- mask, info);
+ mask, info, &resched_timer);
/*
* itimer signal ?
*
@@ -611,7 +619,7 @@
*/
current->jobctl |= JOBCTL_STOP_DEQUEUED;
}
- if ((info->si_code & __SI_MASK) == __SI_TIMER && info->si_sys_private) {
+ if (resched_timer) {
/*
* Release the siglock to ensure proper locking order
* of timer locks outside of siglocks. Note, we leave
@@ -837,7 +845,7 @@
* will take ->siglock, notice SIGNAL_CLD_MASK, and
* notify its parent. See get_signal_to_deliver().
*/
- signal->flags = why | SIGNAL_STOP_CONTINUED;
+ signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED);
signal->group_stop_count = 0;
signal->group_exit_code = 0;
}
diff --git a/kernel/softirq.c b/kernel/softirq.c
index f7fa896..9d16ef0 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -241,7 +241,7 @@
int max_restart = MAX_SOFTIRQ_RESTART;
struct softirq_action *h;
bool in_hardirq;
- __u32 pending;
+ __u32 pending, pending_now, pending_delay, pending_mask;
int softirq_bit;
/*
@@ -251,7 +251,20 @@
*/
current->flags &= ~PF_MEMALLOC;
+ /*
+ * If this is not the ksoftirqd thread,
+ * and there is an RT task that is running or is waiting to run,
+ * delay handling the long-running softirq handlers by leaving
+ * them for the ksoftirqd thread.
+ */
+ if (current != __this_cpu_read(ksoftirqd) &&
+ cpu_has_rt_task(smp_processor_id()))
+ pending_mask = LONG_SOFTIRQ_MASK;
+ else
+ pending_mask = 0;
pending = local_softirq_pending();
+ pending_delay = pending & pending_mask;
+ pending_now = pending & ~pending_mask;
account_irq_enter_time(current);
__local_bh_disable_ip(_RET_IP_, SOFTIRQ_OFFSET);
@@ -259,14 +272,14 @@
restart:
/* Reset the pending bitmask before enabling irqs */
- set_softirq_pending(0);
- __this_cpu_write(active_softirqs, pending);
+ __this_cpu_write(active_softirqs, pending_now);
+ set_softirq_pending(pending_delay);
local_irq_enable();
h = softirq_vec;
- while ((softirq_bit = ffs(pending))) {
+ while ((softirq_bit = ffs(pending_now))) {
unsigned int vec_nr;
int prev_count;
@@ -287,7 +300,7 @@
preempt_count_set(prev_count);
}
h++;
- pending >>= softirq_bit;
+ pending_now >>= softirq_bit;
}
__this_cpu_write(active_softirqs, 0);
@@ -295,11 +308,19 @@
local_irq_disable();
pending = local_softirq_pending();
+ pending_delay = pending & pending_mask;
+ pending_now = pending & ~pending_mask;
if (pending) {
- if (time_before(jiffies, end) && !need_resched() &&
- --max_restart)
+ if (pending_now && time_before(jiffies, end) &&
+ !need_resched() && --max_restart)
goto restart;
+ /*
+ * Wake up ksoftirqd to handle remaining softirq's, either
+ * because we are delaying a subset (pending_delayed)
+ * to avoid interrupting an RT task, or because we have
+ * exhausted the time limit.
+ */
wakeup_softirqd();
}
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 8216eb8..dfd8f6f 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -65,6 +65,7 @@
#include <linux/sched/sysctl.h>
#include <linux/kexec.h>
#include <linux/bpf.h>
+#include <linux/mount.h>
#include <asm/uaccess.h>
#include <asm/processor.h>
@@ -1870,6 +1871,28 @@
.proc_handler = &pipe_proc_fn,
.extra1 = &pipe_min_size,
},
+ {
+ .procname = "pipe-user-pages-hard",
+ .data = &pipe_user_pages_hard,
+ .maxlen = sizeof(pipe_user_pages_hard),
+ .mode = 0644,
+ .proc_handler = proc_doulongvec_minmax,
+ },
+ {
+ .procname = "pipe-user-pages-soft",
+ .data = &pipe_user_pages_soft,
+ .maxlen = sizeof(pipe_user_pages_soft),
+ .mode = 0644,
+ .proc_handler = proc_doulongvec_minmax,
+ },
+ {
+ .procname = "mount-max",
+ .data = &sysctl_mount_max,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &one,
+ },
{ }
};
diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
index 2af5687..ceec77c 100644
--- a/kernel/time/alarmtimer.c
+++ b/kernel/time/alarmtimer.c
@@ -569,7 +569,7 @@
{
struct alarm_base *base = &alarm_bases[alarm->type];
- start = ktime_add(start, base->gettime());
+ start = ktime_add_safe(start, base->gettime());
alarm_start(alarm, start);
}
EXPORT_SYMBOL_GPL(alarm_start_relative);
@@ -655,7 +655,7 @@
overrun++;
}
- alarm->node.expires = ktime_add(alarm->node.expires, interval);
+ alarm->node.expires = ktime_add_safe(alarm->node.expires, interval);
return overrun;
}
EXPORT_SYMBOL_GPL(alarm_forward);
@@ -843,13 +843,22 @@
/* start the timer */
timr->it.alarm.interval = timespec_to_ktime(new_setting->it_interval);
+
+ /*
+ * Rate limit to the tick as a hot fix to prevent DOS. Will be
+ * mopped up later.
+ */
+ if (timr->it.alarm.interval.tv64 &&
+ ktime_to_ns(timr->it.alarm.interval) < TICK_NSEC)
+ timr->it.alarm.interval = ktime_set(0, TICK_NSEC);
+
exp = timespec_to_ktime(new_setting->it_value);
/* Convert (if necessary) to absolute time */
if (flags != TIMER_ABSTIME) {
ktime_t now;
now = alarm_bases[timr->it.alarm.alarmtimer.type].gettime();
- exp = ktime_add(now, exp);
+ exp = ktime_add_safe(now, exp);
}
alarm_start(&timr->it.alarm.alarmtimer, exp);
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 5fa544f..738f346 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -116,6 +116,26 @@
tk->offs_boot = ktime_add(tk->offs_boot, delta);
}
+/*
+ * tk_clock_read - atomic clocksource read() helper
+ *
+ * This helper is necessary to use in the read paths because, while the
+ * seqlock ensures we don't return a bad value while structures are updated,
+ * it doesn't protect from potential crashes. There is the possibility that
+ * the tkr's clocksource may change between the read reference, and the
+ * clock reference passed to the read function. This can cause crashes if
+ * the wrong clocksource is passed to the wrong read function.
+ * This isn't necessary to use when holding the timekeeper_lock or doing
+ * a read of the fast-timekeeper tkrs (which is protected by its own locking
+ * and update logic).
+ */
+static inline u64 tk_clock_read(struct tk_read_base *tkr)
+{
+ struct clocksource *clock = READ_ONCE(tkr->clock);
+
+ return clock->read(clock);
+}
+
#ifdef CONFIG_DEBUG_TIMEKEEPING
#define WARNING_FREQ (HZ*300) /* 5 minute rate-limiting */
@@ -173,7 +193,7 @@
*/
do {
seq = read_seqcount_begin(&tk_core.seq);
- now = tkr->read(tkr->clock);
+ now = tk_clock_read(tkr);
last = tkr->cycle_last;
mask = tkr->mask;
max = tkr->clock->max_cycles;
@@ -207,7 +227,7 @@
cycle_t cycle_now, delta;
/* read clocksource */
- cycle_now = tkr->read(tkr->clock);
+ cycle_now = tk_clock_read(tkr);
/* calculate the delta since the last update_wall_time */
delta = clocksource_delta(cycle_now, tkr->cycle_last, tkr->mask);
@@ -235,12 +255,10 @@
old_clock = tk->tkr_mono.clock;
tk->tkr_mono.clock = clock;
- tk->tkr_mono.read = clock->read;
tk->tkr_mono.mask = clock->mask;
- tk->tkr_mono.cycle_last = tk->tkr_mono.read(clock);
+ tk->tkr_mono.cycle_last = tk_clock_read(&tk->tkr_mono);
tk->tkr_raw.clock = clock;
- tk->tkr_raw.read = clock->read;
tk->tkr_raw.mask = clock->mask;
tk->tkr_raw.cycle_last = tk->tkr_mono.cycle_last;
@@ -404,7 +422,7 @@
now += timekeeping_delta_to_ns(tkr,
clocksource_delta(
- tkr->read(tkr->clock),
+ tk_clock_read(tkr),
tkr->cycle_last,
tkr->mask));
} while (read_seqcount_retry(&tkf->seq, seq));
@@ -461,6 +479,10 @@
return cycles_at_suspend;
}
+static struct clocksource dummy_clock = {
+ .read = dummy_clock_read,
+};
+
/**
* halt_fast_timekeeper - Prevent fast timekeeper from accessing clocksource.
* @tk: Timekeeper to snapshot.
@@ -477,13 +499,13 @@
struct tk_read_base *tkr = &tk->tkr_mono;
memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy));
- cycles_at_suspend = tkr->read(tkr->clock);
- tkr_dummy.read = dummy_clock_read;
+ cycles_at_suspend = tk_clock_read(tkr);
+ tkr_dummy.clock = &dummy_clock;
update_fast_timekeeper(&tkr_dummy, &tk_fast_mono);
tkr = &tk->tkr_raw;
memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy));
- tkr_dummy.read = dummy_clock_read;
+ tkr_dummy.clock = &dummy_clock;
update_fast_timekeeper(&tkr_dummy, &tk_fast_raw);
}
@@ -647,11 +669,10 @@
*/
static void timekeeping_forward_now(struct timekeeper *tk)
{
- struct clocksource *clock = tk->tkr_mono.clock;
cycle_t cycle_now, delta;
s64 nsec;
- cycle_now = tk->tkr_mono.read(clock);
+ cycle_now = tk_clock_read(&tk->tkr_mono);
delta = clocksource_delta(cycle_now, tk->tkr_mono.cycle_last, tk->tkr_mono.mask);
tk->tkr_mono.cycle_last = cycle_now;
tk->tkr_raw.cycle_last = cycle_now;
@@ -1434,7 +1455,7 @@
* The less preferred source will only be tried if there is no better
* usable source. The rtc part is handled separately in rtc core code.
*/
- cycle_now = tk->tkr_mono.read(clock);
+ cycle_now = tk_clock_read(&tk->tkr_mono);
if ((clock->flags & CLOCK_SOURCE_SUSPEND_NONSTOP) &&
cycle_now > tk->tkr_mono.cycle_last) {
u64 num, max = ULLONG_MAX;
@@ -1829,7 +1850,7 @@
#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
offset = real_tk->cycle_interval;
#else
- offset = clocksource_delta(tk->tkr_mono.read(tk->tkr_mono.clock),
+ offset = clocksource_delta(tk_clock_read(&tk->tkr_mono),
tk->tkr_mono.cycle_last, tk->tkr_mono.mask);
#endif
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 3f743b14..eba904b 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -3535,7 +3535,7 @@
int exclude_mod = 0;
int found = 0;
int ret;
- int clear_filter;
+ int clear_filter = 0;
if (func) {
func_g.type = filter_parse_regex(func, len, &func_g.search,
@@ -3677,23 +3677,24 @@
ftrace_probe_registered = 1;
}
-static void __disable_ftrace_function_probe(void)
+static bool __disable_ftrace_function_probe(void)
{
int i;
if (!ftrace_probe_registered)
- return;
+ return false;
for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
struct hlist_head *hhd = &ftrace_func_hash[i];
if (hhd->first)
- return;
+ return false;
}
/* no more funcs left */
ftrace_shutdown(&trace_probe_ops, 0);
ftrace_probe_registered = 0;
+ return true;
}
@@ -3820,6 +3821,7 @@
__unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
void *data, int flags)
{
+ struct ftrace_ops_hash old_hash_ops;
struct ftrace_func_entry *rec_entry;
struct ftrace_func_probe *entry;
struct ftrace_func_probe *p;
@@ -3831,6 +3833,7 @@
struct hlist_node *tmp;
char str[KSYM_SYMBOL_LEN];
int i, ret;
+ bool disabled;
if (glob && (strcmp(glob, "*") == 0 || !strlen(glob)))
func_g.search = NULL;
@@ -3849,6 +3852,10 @@
mutex_lock(&trace_probe_ops.func_hash->regex_lock);
+ old_hash_ops.filter_hash = old_hash;
+ /* Probes only have filters */
+ old_hash_ops.notrace_hash = NULL;
+
hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
if (!hash)
/* Hmm, should report this somehow */
@@ -3886,12 +3893,17 @@
}
}
mutex_lock(&ftrace_lock);
- __disable_ftrace_function_probe();
+ disabled = __disable_ftrace_function_probe();
/*
* Remove after the disable is called. Otherwise, if the last
* probe is removed, a null hash means *all enabled*.
*/
ret = ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash);
+
+ /* still need to update the function call sites */
+ if (ftrace_enabled && !disabled)
+ ftrace_run_modify_code(&trace_probe_ops, FTRACE_UPDATE_CALLS,
+ &old_hash_ops);
synchronize_sched();
if (!ret)
free_ftrace_hash_rcu(old_hash);
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 2a7607f..ce30406 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -3439,11 +3439,23 @@
int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
{
struct ring_buffer_per_cpu *cpu_buffer;
+ struct buffer_page *reader;
+ struct buffer_page *head_page;
+ struct buffer_page *commit_page;
+ unsigned commit;
cpu_buffer = iter->cpu_buffer;
- return iter->head_page == cpu_buffer->commit_page &&
- iter->head == rb_commit_index(cpu_buffer);
+ /* Remember, trace recording is off when iterator is in use */
+ reader = cpu_buffer->reader_page;
+ head_page = cpu_buffer->head_page;
+ commit_page = cpu_buffer->commit_page;
+ commit = rb_page_commit(commit_page);
+
+ return ((iter->head_page == commit_page && iter->head == commit) ||
+ (iter->head_page == reader && commit_page == head_page &&
+ head_page->read == commit &&
+ iter->head == rb_page_commit(cpu_buffer->reader_page)));
}
EXPORT_SYMBOL_GPL(ring_buffer_iter_empty);
@@ -4874,9 +4886,9 @@
rb_data[cpu].cnt = cpu;
rb_threads[cpu] = kthread_create(rb_test, &rb_data[cpu],
"rbtester/%d", cpu);
- if (WARN_ON(!rb_threads[cpu])) {
+ if (WARN_ON(IS_ERR(rb_threads[cpu]))) {
pr_cont("FAILED\n");
- ret = -1;
+ ret = PTR_ERR(rb_threads[cpu]);
goto out_free;
}
@@ -4886,9 +4898,9 @@
/* Now create the rb hammer! */
rb_hammer = kthread_run(rb_hammer_test, NULL, "rbhammer");
- if (WARN_ON(!rb_hammer)) {
+ if (WARN_ON(IS_ERR(rb_hammer))) {
pr_cont("FAILED\n");
- ret = -1;
+ ret = PTR_ERR(rb_hammer);
goto out_free;
}
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index e010de3..3611a5b 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -6174,11 +6174,13 @@
return ret;
out_reg:
+ ret = alloc_snapshot(&global_trace);
+ if (ret < 0)
+ goto out;
+
ret = register_ftrace_function_probe(glob, ops, count);
- if (ret >= 0)
- alloc_snapshot(&global_trace);
-
+ out:
return ret < 0 ? ret : 0;
}
@@ -6849,6 +6851,7 @@
}
kfree(tr->topts);
+ free_cpumask_var(tr->tracing_cpumask);
kfree(tr->name);
kfree(tr);
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
index 68163025..f0e5408 100644
--- a/kernel/trace/trace_events_filter.c
+++ b/kernel/trace/trace_events_filter.c
@@ -1979,6 +1979,10 @@
if (err && set_str)
append_filter_err(ps, filter);
}
+ if (err && !set_str) {
+ free_event_filter(filter);
+ filter = NULL;
+ }
create_filter_finish(ps);
*filterp = filter;
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index c995644..e9092a0 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -659,30 +659,25 @@
pr_info("Probe point is not specified.\n");
return -EINVAL;
}
- if (isdigit(argv[1][0])) {
- if (is_return) {
- pr_info("Return probe point must be a symbol.\n");
- return -EINVAL;
- }
- /* an address specified */
- ret = kstrtoul(&argv[1][0], 0, (unsigned long *)&addr);
- if (ret) {
- pr_info("Failed to parse address.\n");
- return ret;
- }
- } else {
+
+ /* try to parse an address. if that fails, try to read the
+ * input as a symbol. */
+ if (kstrtoul(argv[1], 0, (unsigned long *)&addr)) {
/* a symbol specified */
symbol = argv[1];
/* TODO: support .init module functions */
ret = traceprobe_split_symbol_offset(symbol, &offset);
if (ret) {
- pr_info("Failed to parse symbol.\n");
+ pr_info("Failed to parse either an address or a symbol.\n");
return ret;
}
if (offset && is_return) {
pr_info("Return probe must be used without offset.\n");
return -EINVAL;
}
+ } else if (is_return) {
+ pr_info("Return probe point must be a symbol.\n");
+ return -EINVAL;
}
argc -= 2; argv += 2;
@@ -1471,6 +1466,11 @@
end:
release_all_trace_kprobes();
+ /*
+ * Wait for the optimizer work to finish. Otherwise it might fiddle
+ * with probes in already freed __init text.
+ */
+ wait_for_kprobe_optimizer();
if (warn)
pr_cont("NG: Some tests are failed. Please check them.\n");
else
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index c0ab232..8965f36 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -3671,8 +3671,12 @@
return -EINVAL;
/* creating multiple pwqs breaks ordering guarantee */
- if (WARN_ON((wq->flags & __WQ_ORDERED) && !list_empty(&wq->pwqs)))
- return -EINVAL;
+ if (!list_empty(&wq->pwqs)) {
+ if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
+ return -EINVAL;
+
+ wq->flags &= ~__WQ_ORDERED;
+ }
ctx = apply_wqattrs_prepare(wq, attrs);
@@ -3858,6 +3862,16 @@
struct workqueue_struct *wq;
struct pool_workqueue *pwq;
+ /*
+ * Unbound && max_active == 1 used to imply ordered, which is no
+ * longer the case on NUMA machines due to per-node pools. While
+ * alloc_ordered_workqueue() is the right way to create an ordered
+ * workqueue, keep the previous behavior to avoid subtle breakages
+ * on NUMA.
+ */
+ if ((flags & WQ_UNBOUND) && max_active == 1)
+ flags |= __WQ_ORDERED;
+
/* see the comment above the definition of WQ_POWER_EFFICIENT */
if ((flags & WQ_POWER_EFFICIENT) && wq_power_efficient)
flags |= WQ_UNBOUND;
@@ -4046,13 +4060,14 @@
struct pool_workqueue *pwq;
/* disallow meddling with max_active for ordered workqueues */
- if (WARN_ON(wq->flags & __WQ_ORDERED))
+ if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
return;
max_active = wq_clamp_max_active(max_active, wq->flags, wq->name);
mutex_lock(&wq->mutex);
+ wq->flags &= ~__WQ_ORDERED;
wq->saved_max_active = max_active;
for_each_pwq(pwq, wq)
@@ -5180,7 +5195,7 @@
* attributes breaks ordering guarantee. Disallow exposing ordered
* workqueues.
*/
- if (WARN_ON(wq->flags & __WQ_ORDERED))
+ if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
return -EINVAL;
wq->wq_dev = wq_dev = kzalloc(sizeof(*wq_dev), GFP_KERNEL);
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 4251938..d9d855e 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -161,7 +161,7 @@
config DEBUG_INFO_SPLIT
bool "Produce split debuginfo in .dwo files"
- depends on DEBUG_INFO
+ depends on DEBUG_INFO && !FRV
help
Generate debug info into separate .dwo files. This significantly
reduces the build directory size for builds with DEBUG_INFO,
diff --git a/lib/cmdline.c b/lib/cmdline.c
index 8f13cf7..79069d7 100644
--- a/lib/cmdline.c
+++ b/lib/cmdline.c
@@ -22,14 +22,14 @@
* the values[M, M+1, ..., N] into the ints array in get_options.
*/
-static int get_range(char **str, int *pint)
+static int get_range(char **str, int *pint, int n)
{
int x, inc_counter, upper_range;
(*str)++;
upper_range = simple_strtol((*str), NULL, 0);
inc_counter = upper_range - *pint;
- for (x = *pint; x < upper_range; x++)
+ for (x = *pint; n && x < upper_range; x++, n--)
*pint++ = x;
return inc_counter;
}
@@ -96,7 +96,7 @@
break;
if (res == 3) {
int range_nums;
- range_nums = get_range((char **)&str, ints + i);
+ range_nums = get_range((char **)&str, ints + i, nints - i);
if (range_nums < 0)
break;
/*
diff --git a/lib/decompress_unlz4.c b/lib/decompress_unlz4.c
index 036fc88..1b0baf3 100644
--- a/lib/decompress_unlz4.c
+++ b/lib/decompress_unlz4.c
@@ -72,7 +72,7 @@
error("NULL input pointer and missing fill function");
goto exit_1;
} else {
- inp = large_malloc(lz4_compressbound(uncomp_chunksize));
+ inp = large_malloc(LZ4_compressBound(uncomp_chunksize));
if (!inp) {
error("Could not allocate input buffer");
goto exit_1;
@@ -136,7 +136,7 @@
inp += 4;
size -= 4;
} else {
- if (chunksize > lz4_compressbound(uncomp_chunksize)) {
+ if (chunksize > LZ4_compressBound(uncomp_chunksize)) {
error("chunk length is longer than allocated");
goto exit_2;
}
@@ -152,11 +152,14 @@
out_len -= dest_len;
} else
dest_len = out_len;
- ret = lz4_decompress(inp, &chunksize, outp, dest_len);
+
+ ret = LZ4_decompress_fast(inp, outp, dest_len);
+ chunksize = ret;
#else
dest_len = uncomp_chunksize;
- ret = lz4_decompress_unknownoutputsize(inp, chunksize, outp,
- &dest_len);
+
+ ret = LZ4_decompress_safe(inp, outp, chunksize, dest_len);
+ dest_len = ret;
#endif
if (ret < 0) {
error("Decoding failed");
diff --git a/lib/lz4/Makefile b/lib/lz4/Makefile
index 8085d04..f7b11327 100644
--- a/lib/lz4/Makefile
+++ b/lib/lz4/Makefile
@@ -1,3 +1,5 @@
+ccflags-y += -O3
+
obj-$(CONFIG_LZ4_COMPRESS) += lz4_compress.o
obj-$(CONFIG_LZ4HC_COMPRESS) += lz4hc_compress.o
obj-$(CONFIG_LZ4_DECOMPRESS) += lz4_decompress.o
diff --git a/lib/lz4/lz4_compress.c b/lib/lz4/lz4_compress.c
index 28321d8..668400b 100644
--- a/lib/lz4/lz4_compress.c
+++ b/lib/lz4/lz4_compress.c
@@ -1,19 +1,16 @@
/*
* LZ4 - Fast LZ compression algorithm
- * Copyright (C) 2011-2012, Yann Collet.
- * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
-
+ * Copyright (C) 2011 - 2016, Yann Collet.
+ * BSD 2 - Clause License (http://www.opensource.org/licenses/bsd - license.php)
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
- *
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
@@ -25,417 +22,939 @@
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
* You can contact the author at :
- * - LZ4 homepage : http://fastcompression.blogspot.com/p/lz4.html
- * - LZ4 source repository : http://code.google.com/p/lz4/
+ * - LZ4 homepage : http://www.lz4.org
+ * - LZ4 source repository : https://github.com/lz4/lz4
*
- * Changed for kernel use by:
- * Chanho Min <chanho.min@lge.com>
+ * Changed for kernel usage by:
+ * Sven Schmidt <4sschmid@informatik.uni-hamburg.de>
*/
+/*-************************************
+ * Dependencies
+ **************************************/
+#include <linux/lz4.h>
+#include "lz4defs.h"
#include <linux/module.h>
#include <linux/kernel.h>
-#include <linux/lz4.h>
#include <asm/unaligned.h>
-#include "lz4defs.h"
+
+static const int LZ4_minLength = (MFLIMIT + 1);
+static const int LZ4_64Klimit = ((64 * KB) + (MFLIMIT - 1));
+
+/*-******************************
+ * Compression functions
+ ********************************/
+static FORCE_INLINE U32 LZ4_hash4(
+ U32 sequence,
+ tableType_t const tableType)
+{
+ if (tableType == byU16)
+ return ((sequence * 2654435761U)
+ >> ((MINMATCH * 8) - (LZ4_HASHLOG + 1)));
+ else
+ return ((sequence * 2654435761U)
+ >> ((MINMATCH * 8) - LZ4_HASHLOG));
+}
+
+static FORCE_INLINE U32 LZ4_hash5(
+ U64 sequence,
+ tableType_t const tableType)
+{
+ const U32 hashLog = (tableType == byU16)
+ ? LZ4_HASHLOG + 1
+ : LZ4_HASHLOG;
+
+#if LZ4_LITTLE_ENDIAN
+ static const U64 prime5bytes = 889523592379ULL;
+
+ return (U32)(((sequence << 24) * prime5bytes) >> (64 - hashLog));
+#else
+ static const U64 prime8bytes = 11400714785074694791ULL;
+
+ return (U32)(((sequence >> 24) * prime8bytes) >> (64 - hashLog));
+#endif
+}
+
+static FORCE_INLINE U32 LZ4_hashPosition(
+ const void *p,
+ tableType_t const tableType)
+{
+#if LZ4_ARCH64
+ if (tableType == byU32)
+ return LZ4_hash5(LZ4_read_ARCH(p), tableType);
+#endif
+
+ return LZ4_hash4(LZ4_read32(p), tableType);
+}
+
+static void LZ4_putPositionOnHash(
+ const BYTE *p,
+ U32 h,
+ void *tableBase,
+ tableType_t const tableType,
+ const BYTE *srcBase)
+{
+ switch (tableType) {
+ case byPtr:
+ {
+ const BYTE **hashTable = (const BYTE **)tableBase;
+
+ hashTable[h] = p;
+ return;
+ }
+ case byU32:
+ {
+ U32 *hashTable = (U32 *) tableBase;
+
+ hashTable[h] = (U32)(p - srcBase);
+ return;
+ }
+ case byU16:
+ {
+ U16 *hashTable = (U16 *) tableBase;
+
+ hashTable[h] = (U16)(p - srcBase);
+ return;
+ }
+ }
+}
+
+static FORCE_INLINE void LZ4_putPosition(
+ const BYTE *p,
+ void *tableBase,
+ tableType_t tableType,
+ const BYTE *srcBase)
+{
+ U32 const h = LZ4_hashPosition(p, tableType);
+
+ LZ4_putPositionOnHash(p, h, tableBase, tableType, srcBase);
+}
+
+static const BYTE *LZ4_getPositionOnHash(
+ U32 h,
+ void *tableBase,
+ tableType_t tableType,
+ const BYTE *srcBase)
+{
+ if (tableType == byPtr) {
+ const BYTE **hashTable = (const BYTE **) tableBase;
+
+ return hashTable[h];
+ }
+
+ if (tableType == byU32) {
+ const U32 * const hashTable = (U32 *) tableBase;
+
+ return hashTable[h] + srcBase;
+ }
+
+ {
+ /* default, to ensure a return */
+ const U16 * const hashTable = (U16 *) tableBase;
+
+ return hashTable[h] + srcBase;
+ }
+}
+
+static FORCE_INLINE const BYTE *LZ4_getPosition(
+ const BYTE *p,
+ void *tableBase,
+ tableType_t tableType,
+ const BYTE *srcBase)
+{
+ U32 const h = LZ4_hashPosition(p, tableType);
+
+ return LZ4_getPositionOnHash(h, tableBase, tableType, srcBase);
+}
+
/*
- * LZ4_compressCtx :
- * -----------------
- * Compress 'isize' bytes from 'source' into an output buffer 'dest' of
- * maximum size 'maxOutputSize'. * If it cannot achieve it, compression
- * will stop, and result of the function will be zero.
- * return : the number of bytes written in buffer 'dest', or 0 if the
- * compression fails
+ * LZ4_compress_generic() :
+ * inlined, to ensure branches are decided at compilation time
*/
-static inline int lz4_compressctx(void *ctx,
- const char *source,
- char *dest,
- int isize,
- int maxoutputsize)
+static FORCE_INLINE int LZ4_compress_generic(
+ LZ4_stream_t_internal * const dictPtr,
+ const char * const source,
+ char * const dest,
+ const int inputSize,
+ const int maxOutputSize,
+ const limitedOutput_directive outputLimited,
+ const tableType_t tableType,
+ const dict_directive dict,
+ const dictIssue_directive dictIssue,
+ const U32 acceleration)
{
- HTYPE *hashtable = (HTYPE *)ctx;
- const u8 *ip = (u8 *)source;
+ const BYTE *ip = (const BYTE *) source;
+ const BYTE *base;
+ const BYTE *lowLimit;
+ const BYTE * const lowRefLimit = ip - dictPtr->dictSize;
+ const BYTE * const dictionary = dictPtr->dictionary;
+ const BYTE * const dictEnd = dictionary + dictPtr->dictSize;
+ const size_t dictDelta = dictEnd - (const BYTE *)source;
+ const BYTE *anchor = (const BYTE *) source;
+ const BYTE * const iend = ip + inputSize;
+ const BYTE * const mflimit = iend - MFLIMIT;
+ const BYTE * const matchlimit = iend - LASTLITERALS;
+
+ BYTE *op = (BYTE *) dest;
+ BYTE * const olimit = op + maxOutputSize;
+
+ U32 forwardH;
+ size_t refDelta = 0;
+
+ /* Init conditions */
+ if ((U32)inputSize > (U32)LZ4_MAX_INPUT_SIZE) {
+ /* Unsupported inputSize, too large (or negative) */
+ return 0;
+ }
+
+ switch (dict) {
+ case noDict:
+ default:
+ base = (const BYTE *)source;
+ lowLimit = (const BYTE *)source;
+ break;
+ case withPrefix64k:
+ base = (const BYTE *)source - dictPtr->currentOffset;
+ lowLimit = (const BYTE *)source - dictPtr->dictSize;
+ break;
+ case usingExtDict:
+ base = (const BYTE *)source - dictPtr->currentOffset;
+ lowLimit = (const BYTE *)source;
+ break;
+ }
+
+ if ((tableType == byU16)
+ && (inputSize >= LZ4_64Klimit)) {
+ /* Size too large (not within 64K limit) */
+ return 0;
+ }
+
+ if (inputSize < LZ4_minLength) {
+ /* Input too small, no compression (all literals) */
+ goto _last_literals;
+ }
+
+ /* First Byte */
+ LZ4_putPosition(ip, dictPtr->hashTable, tableType, base);
+ ip++;
+ forwardH = LZ4_hashPosition(ip, tableType);
+
+ /* Main Loop */
+ for ( ; ; ) {
+ const BYTE *match;
+ BYTE *token;
+
+ /* Find a match */
+ {
+ const BYTE *forwardIp = ip;
+ unsigned int step = 1;
+ unsigned int searchMatchNb = acceleration << LZ4_SKIPTRIGGER;
+
+ do {
+ U32 const h = forwardH;
+
+ ip = forwardIp;
+ forwardIp += step;
+ step = (searchMatchNb++ >> LZ4_SKIPTRIGGER);
+
+ if (unlikely(forwardIp > mflimit))
+ goto _last_literals;
+
+ match = LZ4_getPositionOnHash(h,
+ dictPtr->hashTable,
+ tableType, base);
+
+ if (dict == usingExtDict) {
+ if (match < (const BYTE *)source) {
+ refDelta = dictDelta;
+ lowLimit = dictionary;
+ } else {
+ refDelta = 0;
+ lowLimit = (const BYTE *)source;
+ } }
+
+ forwardH = LZ4_hashPosition(forwardIp,
+ tableType);
+
+ LZ4_putPositionOnHash(ip, h, dictPtr->hashTable,
+ tableType, base);
+ } while (((dictIssue == dictSmall)
+ ? (match < lowRefLimit)
+ : 0)
+ || ((tableType == byU16)
+ ? 0
+ : (match + MAX_DISTANCE < ip))
+ || (LZ4_read32(match + refDelta)
+ != LZ4_read32(ip)));
+ }
+
+ /* Catch up */
+ while (((ip > anchor) & (match + refDelta > lowLimit))
+ && (unlikely(ip[-1] == match[refDelta - 1]))) {
+ ip--;
+ match--;
+ }
+
+ /* Encode Literals */
+ {
+ unsigned const int litLength = (unsigned int)(ip - anchor);
+
+ token = op++;
+
+ if ((outputLimited) &&
+ /* Check output buffer overflow */
+ (unlikely(op + litLength +
+ (2 + 1 + LASTLITERALS) +
+ (litLength / 255) > olimit)))
+ return 0;
+
+ if (litLength >= RUN_MASK) {
+ int len = (int)litLength - RUN_MASK;
+
+ *token = (RUN_MASK << ML_BITS);
+
+ for (; len >= 255; len -= 255)
+ *op++ = 255;
+ *op++ = (BYTE)len;
+ } else
+ *token = (BYTE)(litLength << ML_BITS);
+
+ /* Copy Literals */
+ LZ4_wildCopy(op, anchor, op + litLength);
+ op += litLength;
+ }
+
+_next_match:
+ /* Encode Offset */
+ LZ4_writeLE16(op, (U16)(ip - match));
+ op += 2;
+
+ /* Encode MatchLength */
+ {
+ unsigned int matchCode;
+
+ if ((dict == usingExtDict)
+ && (lowLimit == dictionary)) {
+ const BYTE *limit;
+
+ match += refDelta;
+ limit = ip + (dictEnd - match);
+
+ if (limit > matchlimit)
+ limit = matchlimit;
+
+ matchCode = LZ4_count(ip + MINMATCH,
+ match + MINMATCH, limit);
+
+ ip += MINMATCH + matchCode;
+
+ if (ip == limit) {
+ unsigned const int more = LZ4_count(ip,
+ (const BYTE *)source,
+ matchlimit);
+
+ matchCode += more;
+ ip += more;
+ }
+ } else {
+ matchCode = LZ4_count(ip + MINMATCH,
+ match + MINMATCH, matchlimit);
+ ip += MINMATCH + matchCode;
+ }
+
+ if (outputLimited &&
+ /* Check output buffer overflow */
+ (unlikely(op +
+ (1 + LASTLITERALS) +
+ (matchCode >> 8) > olimit)))
+ return 0;
+
+ if (matchCode >= ML_MASK) {
+ *token += ML_MASK;
+ matchCode -= ML_MASK;
+ LZ4_write32(op, 0xFFFFFFFF);
+
+ while (matchCode >= 4 * 255) {
+ op += 4;
+ LZ4_write32(op, 0xFFFFFFFF);
+ matchCode -= 4 * 255;
+ }
+
+ op += matchCode / 255;
+ *op++ = (BYTE)(matchCode % 255);
+ } else
+ *token += (BYTE)(matchCode);
+ }
+
+ anchor = ip;
+
+ /* Test end of chunk */
+ if (ip > mflimit)
+ break;
+
+ /* Fill table */
+ LZ4_putPosition(ip - 2, dictPtr->hashTable, tableType, base);
+
+ /* Test next position */
+ match = LZ4_getPosition(ip, dictPtr->hashTable,
+ tableType, base);
+
+ if (dict == usingExtDict) {
+ if (match < (const BYTE *)source) {
+ refDelta = dictDelta;
+ lowLimit = dictionary;
+ } else {
+ refDelta = 0;
+ lowLimit = (const BYTE *)source;
+ }
+ }
+
+ LZ4_putPosition(ip, dictPtr->hashTable, tableType, base);
+
+ if (((dictIssue == dictSmall) ? (match >= lowRefLimit) : 1)
+ && (match + MAX_DISTANCE >= ip)
+ && (LZ4_read32(match + refDelta) == LZ4_read32(ip))) {
+ token = op++;
+ *token = 0;
+ goto _next_match;
+ }
+
+ /* Prepare next loop */
+ forwardH = LZ4_hashPosition(++ip, tableType);
+ }
+
+_last_literals:
+ /* Encode Last Literals */
+ {
+ size_t const lastRun = (size_t)(iend - anchor);
+
+ if ((outputLimited) &&
+ /* Check output buffer overflow */
+ ((op - (BYTE *)dest) + lastRun + 1 +
+ ((lastRun + 255 - RUN_MASK) / 255) > (U32)maxOutputSize))
+ return 0;
+
+ if (lastRun >= RUN_MASK) {
+ size_t accumulator = lastRun - RUN_MASK;
+ *op++ = RUN_MASK << ML_BITS;
+ for (; accumulator >= 255; accumulator -= 255)
+ *op++ = 255;
+ *op++ = (BYTE) accumulator;
+ } else {
+ *op++ = (BYTE)(lastRun << ML_BITS);
+ }
+
+ memcpy(op, anchor, lastRun);
+
+ op += lastRun;
+ }
+
+ /* End */
+ return (int) (((char *)op) - dest);
+}
+
+static int LZ4_compress_fast_extState(
+ void *state,
+ const char *source,
+ char *dest,
+ int inputSize,
+ int maxOutputSize,
+ int acceleration)
+{
+ LZ4_stream_t_internal *ctx = &((LZ4_stream_t *)state)->internal_donotuse;
#if LZ4_ARCH64
- const BYTE * const base = ip;
+ const tableType_t tableType = byU32;
#else
- const int base = 0;
+ const tableType_t tableType = byPtr;
#endif
- const u8 *anchor = ip;
- const u8 *const iend = ip + isize;
- const u8 *const mflimit = iend - MFLIMIT;
- #define MATCHLIMIT (iend - LASTLITERALS)
- u8 *op = (u8 *) dest;
- u8 *const oend = op + maxoutputsize;
- int length;
- const int skipstrength = SKIPSTRENGTH;
- u32 forwardh;
- int lastrun;
+ LZ4_resetStream((LZ4_stream_t *)state);
- /* Init */
- if (isize < MINLENGTH)
+ if (acceleration < 1)
+ acceleration = LZ4_ACCELERATION_DEFAULT;
+
+ if (maxOutputSize >= LZ4_COMPRESSBOUND(inputSize)) {
+ if (inputSize < LZ4_64Klimit)
+ return LZ4_compress_generic(ctx, source,
+ dest, inputSize, 0,
+ noLimit, byU16, noDict,
+ noDictIssue, acceleration);
+ else
+ return LZ4_compress_generic(ctx, source,
+ dest, inputSize, 0,
+ noLimit, tableType, noDict,
+ noDictIssue, acceleration);
+ } else {
+ if (inputSize < LZ4_64Klimit)
+ return LZ4_compress_generic(ctx, source,
+ dest, inputSize,
+ maxOutputSize, limitedOutput, byU16, noDict,
+ noDictIssue, acceleration);
+ else
+ return LZ4_compress_generic(ctx, source,
+ dest, inputSize,
+ maxOutputSize, limitedOutput, tableType, noDict,
+ noDictIssue, acceleration);
+ }
+}
+
+int LZ4_compress_fast(const char *source, char *dest, int inputSize,
+ int maxOutputSize, int acceleration, void *wrkmem)
+{
+ return LZ4_compress_fast_extState(wrkmem, source, dest, inputSize,
+ maxOutputSize, acceleration);
+}
+EXPORT_SYMBOL(LZ4_compress_fast);
+
+int LZ4_compress_default(const char *source, char *dest, int inputSize,
+ int maxOutputSize, void *wrkmem)
+{
+ return LZ4_compress_fast(source, dest, inputSize,
+ maxOutputSize, LZ4_ACCELERATION_DEFAULT, wrkmem);
+}
+EXPORT_SYMBOL(LZ4_compress_default);
+
+/*-******************************
+ * *_destSize() variant
+ ********************************/
+static int LZ4_compress_destSize_generic(
+ LZ4_stream_t_internal * const ctx,
+ const char * const src,
+ char * const dst,
+ int * const srcSizePtr,
+ const int targetDstSize,
+ const tableType_t tableType)
+{
+ const BYTE *ip = (const BYTE *) src;
+ const BYTE *base = (const BYTE *) src;
+ const BYTE *lowLimit = (const BYTE *) src;
+ const BYTE *anchor = ip;
+ const BYTE * const iend = ip + *srcSizePtr;
+ const BYTE * const mflimit = iend - MFLIMIT;
+ const BYTE * const matchlimit = iend - LASTLITERALS;
+
+ BYTE *op = (BYTE *) dst;
+ BYTE * const oend = op + targetDstSize;
+ BYTE * const oMaxLit = op + targetDstSize - 2 /* offset */
+ - 8 /* because 8 + MINMATCH == MFLIMIT */ - 1 /* token */;
+ BYTE * const oMaxMatch = op + targetDstSize
+ - (LASTLITERALS + 1 /* token */);
+ BYTE * const oMaxSeq = oMaxLit - 1 /* token */;
+
+ U32 forwardH;
+
+ /* Init conditions */
+ /* Impossible to store anything */
+ if (targetDstSize < 1)
+ return 0;
+ /* Unsupported input size, too large (or negative) */
+ if ((U32)*srcSizePtr > (U32)LZ4_MAX_INPUT_SIZE)
+ return 0;
+ /* Size too large (not within 64K limit) */
+ if ((tableType == byU16) && (*srcSizePtr >= LZ4_64Klimit))
+ return 0;
+ /* Input too small, no compression (all literals) */
+ if (*srcSizePtr < LZ4_minLength)
goto _last_literals;
- memset((void *)hashtable, 0, LZ4_MEM_COMPRESS);
-
/* First Byte */
- hashtable[LZ4_HASH_VALUE(ip)] = ip - base;
- ip++;
- forwardh = LZ4_HASH_VALUE(ip);
+ *srcSizePtr = 0;
+ LZ4_putPosition(ip, ctx->hashTable, tableType, base);
+ ip++; forwardH = LZ4_hashPosition(ip, tableType);
/* Main Loop */
- for (;;) {
- int findmatchattempts = (1U << skipstrength) + 3;
- const u8 *forwardip = ip;
- const u8 *ref;
- u8 *token;
+ for ( ; ; ) {
+ const BYTE *match;
+ BYTE *token;
/* Find a match */
- do {
- u32 h = forwardh;
- int step = findmatchattempts++ >> skipstrength;
- ip = forwardip;
- forwardip = ip + step;
+ {
+ const BYTE *forwardIp = ip;
+ unsigned int step = 1;
+ unsigned int searchMatchNb = 1 << LZ4_SKIPTRIGGER;
- if (unlikely(forwardip > mflimit))
- goto _last_literals;
+ do {
+ U32 h = forwardH;
- forwardh = LZ4_HASH_VALUE(forwardip);
- ref = base + hashtable[h];
- hashtable[h] = ip - base;
- } while ((ref < ip - MAX_DISTANCE) || (A32(ref) != A32(ip)));
+ ip = forwardIp;
+ forwardIp += step;
+ step = (searchMatchNb++ >> LZ4_SKIPTRIGGER);
+
+ if (unlikely(forwardIp > mflimit))
+ goto _last_literals;
+
+ match = LZ4_getPositionOnHash(h, ctx->hashTable,
+ tableType, base);
+ forwardH = LZ4_hashPosition(forwardIp,
+ tableType);
+ LZ4_putPositionOnHash(ip, h,
+ ctx->hashTable, tableType,
+ base);
+
+ } while (((tableType == byU16)
+ ? 0
+ : (match + MAX_DISTANCE < ip))
+ || (LZ4_read32(match) != LZ4_read32(ip)));
+ }
/* Catch up */
- while ((ip > anchor) && (ref > (u8 *)source) &&
- unlikely(ip[-1] == ref[-1])) {
+ while ((ip > anchor)
+ && (match > lowLimit)
+ && (unlikely(ip[-1] == match[-1]))) {
ip--;
- ref--;
+ match--;
}
/* Encode Literal length */
- length = (int)(ip - anchor);
- token = op++;
- /* check output limit */
- if (unlikely(op + length + (2 + 1 + LASTLITERALS) +
- (length >> 8) > oend))
- return 0;
+ {
+ unsigned int litLength = (unsigned int)(ip - anchor);
- if (length >= (int)RUN_MASK) {
- int len;
- *token = (RUN_MASK << ML_BITS);
- len = length - RUN_MASK;
- for (; len > 254 ; len -= 255)
- *op++ = 255;
- *op++ = (u8)len;
- } else
- *token = (length << ML_BITS);
+ token = op++;
+ if (op + ((litLength + 240) / 255)
+ + litLength > oMaxLit) {
+ /* Not enough space for a last match */
+ op--;
+ goto _last_literals;
+ }
+ if (litLength >= RUN_MASK) {
+ unsigned int len = litLength - RUN_MASK;
+ *token = (RUN_MASK<<ML_BITS);
+ for (; len >= 255; len -= 255)
+ *op++ = 255;
+ *op++ = (BYTE)len;
+ } else
+ *token = (BYTE)(litLength << ML_BITS);
- /* Copy Literals */
- LZ4_BLINDCOPY(anchor, op, length);
+ /* Copy Literals */
+ LZ4_wildCopy(op, anchor, op + litLength);
+ op += litLength;
+ }
+
_next_match:
/* Encode Offset */
- LZ4_WRITE_LITTLEENDIAN_16(op, (u16)(ip - ref));
+ LZ4_writeLE16(op, (U16)(ip - match)); op += 2;
- /* Start Counting */
- ip += MINMATCH;
- /* MinMatch verified */
- ref += MINMATCH;
- anchor = ip;
- while (likely(ip < MATCHLIMIT - (STEPSIZE - 1))) {
- #if LZ4_ARCH64
- u64 diff = A64(ref) ^ A64(ip);
- #else
- u32 diff = A32(ref) ^ A32(ip);
- #endif
- if (!diff) {
- ip += STEPSIZE;
- ref += STEPSIZE;
- continue;
- }
- ip += LZ4_NBCOMMONBYTES(diff);
- goto _endcount;
- }
- #if LZ4_ARCH64
- if ((ip < (MATCHLIMIT - 3)) && (A32(ref) == A32(ip))) {
- ip += 4;
- ref += 4;
- }
- #endif
- if ((ip < (MATCHLIMIT - 1)) && (A16(ref) == A16(ip))) {
- ip += 2;
- ref += 2;
- }
- if ((ip < MATCHLIMIT) && (*ref == *ip))
- ip++;
-_endcount:
/* Encode MatchLength */
- length = (int)(ip - anchor);
- /* Check output limit */
- if (unlikely(op + (1 + LASTLITERALS) + (length >> 8) > oend))
- return 0;
- if (length >= (int)ML_MASK) {
- *token += ML_MASK;
- length -= ML_MASK;
- for (; length > 509 ; length -= 510) {
- *op++ = 255;
- *op++ = 255;
- }
- if (length > 254) {
- length -= 255;
- *op++ = 255;
- }
- *op++ = (u8)length;
- } else
- *token += length;
+ {
+ size_t matchLength = LZ4_count(ip + MINMATCH,
+ match + MINMATCH, matchlimit);
- /* Test end of chunk */
- if (ip > mflimit) {
- anchor = ip;
- break;
+ if (op + ((matchLength + 240)/255) > oMaxMatch) {
+ /* Match description too long : reduce it */
+ matchLength = (15 - 1) + (oMaxMatch - op) * 255;
+ }
+ ip += MINMATCH + matchLength;
+
+ if (matchLength >= ML_MASK) {
+ *token += ML_MASK;
+ matchLength -= ML_MASK;
+ while (matchLength >= 255) {
+ matchLength -= 255;
+ *op++ = 255;
+ }
+ *op++ = (BYTE)matchLength;
+ } else
+ *token += (BYTE)(matchLength);
}
+ anchor = ip;
+
+ /* Test end of block */
+ if (ip > mflimit)
+ break;
+ if (op > oMaxSeq)
+ break;
+
/* Fill table */
- hashtable[LZ4_HASH_VALUE(ip-2)] = ip - 2 - base;
+ LZ4_putPosition(ip - 2, ctx->hashTable, tableType, base);
/* Test next position */
- ref = base + hashtable[LZ4_HASH_VALUE(ip)];
- hashtable[LZ4_HASH_VALUE(ip)] = ip - base;
- if ((ref > ip - (MAX_DISTANCE + 1)) && (A32(ref) == A32(ip))) {
- token = op++;
- *token = 0;
+ match = LZ4_getPosition(ip, ctx->hashTable, tableType, base);
+ LZ4_putPosition(ip, ctx->hashTable, tableType, base);
+
+ if ((match + MAX_DISTANCE >= ip)
+ && (LZ4_read32(match) == LZ4_read32(ip))) {
+ token = op++; *token = 0;
goto _next_match;
}
/* Prepare next loop */
- anchor = ip++;
- forwardh = LZ4_HASH_VALUE(ip);
+ forwardH = LZ4_hashPosition(++ip, tableType);
}
_last_literals:
/* Encode Last Literals */
- lastrun = (int)(iend - anchor);
- if (((char *)op - dest) + lastrun + 1
- + ((lastrun + 255 - RUN_MASK) / 255) > (u32)maxoutputsize)
- return 0;
+ {
+ size_t lastRunSize = (size_t)(iend - anchor);
- if (lastrun >= (int)RUN_MASK) {
- *op++ = (RUN_MASK << ML_BITS);
- lastrun -= RUN_MASK;
- for (; lastrun > 254 ; lastrun -= 255)
- *op++ = 255;
- *op++ = (u8)lastrun;
- } else
- *op++ = (lastrun << ML_BITS);
- memcpy(op, anchor, iend - anchor);
- op += iend - anchor;
-
- /* End */
- return (int)(((char *)op) - dest);
-}
-
-static inline int lz4_compress64kctx(void *ctx,
- const char *source,
- char *dest,
- int isize,
- int maxoutputsize)
-{
- u16 *hashtable = (u16 *)ctx;
- const u8 *ip = (u8 *) source;
- const u8 *anchor = ip;
- const u8 *const base = ip;
- const u8 *const iend = ip + isize;
- const u8 *const mflimit = iend - MFLIMIT;
- #define MATCHLIMIT (iend - LASTLITERALS)
-
- u8 *op = (u8 *) dest;
- u8 *const oend = op + maxoutputsize;
- int len, length;
- const int skipstrength = SKIPSTRENGTH;
- u32 forwardh;
- int lastrun;
-
- /* Init */
- if (isize < MINLENGTH)
- goto _last_literals;
-
- memset((void *)hashtable, 0, LZ4_MEM_COMPRESS);
-
- /* First Byte */
- ip++;
- forwardh = LZ4_HASH64K_VALUE(ip);
-
- /* Main Loop */
- for (;;) {
- int findmatchattempts = (1U << skipstrength) + 3;
- const u8 *forwardip = ip;
- const u8 *ref;
- u8 *token;
-
- /* Find a match */
- do {
- u32 h = forwardh;
- int step = findmatchattempts++ >> skipstrength;
- ip = forwardip;
- forwardip = ip + step;
-
- if (forwardip > mflimit)
- goto _last_literals;
-
- forwardh = LZ4_HASH64K_VALUE(forwardip);
- ref = base + hashtable[h];
- hashtable[h] = (u16)(ip - base);
- } while (A32(ref) != A32(ip));
-
- /* Catch up */
- while ((ip > anchor) && (ref > (u8 *)source)
- && (ip[-1] == ref[-1])) {
- ip--;
- ref--;
+ if (op + 1 /* token */
+ + ((lastRunSize + 240) / 255) /* litLength */
+ + lastRunSize /* literals */ > oend) {
+ /* adapt lastRunSize to fill 'dst' */
+ lastRunSize = (oend - op) - 1;
+ lastRunSize -= (lastRunSize + 240) / 255;
}
+ ip = anchor + lastRunSize;
- /* Encode Literal length */
- length = (int)(ip - anchor);
- token = op++;
- /* Check output limit */
- if (unlikely(op + length + (2 + 1 + LASTLITERALS)
- + (length >> 8) > oend))
- return 0;
- if (length >= (int)RUN_MASK) {
- *token = (RUN_MASK << ML_BITS);
- len = length - RUN_MASK;
- for (; len > 254 ; len -= 255)
+ if (lastRunSize >= RUN_MASK) {
+ size_t accumulator = lastRunSize - RUN_MASK;
+
+ *op++ = RUN_MASK << ML_BITS;
+ for (; accumulator >= 255; accumulator -= 255)
*op++ = 255;
- *op++ = (u8)len;
- } else
- *token = (length << ML_BITS);
-
- /* Copy Literals */
- LZ4_BLINDCOPY(anchor, op, length);
-
-_next_match:
- /* Encode Offset */
- LZ4_WRITE_LITTLEENDIAN_16(op, (u16)(ip - ref));
-
- /* Start Counting */
- ip += MINMATCH;
- /* MinMatch verified */
- ref += MINMATCH;
- anchor = ip;
-
- while (ip < MATCHLIMIT - (STEPSIZE - 1)) {
- #if LZ4_ARCH64
- u64 diff = A64(ref) ^ A64(ip);
- #else
- u32 diff = A32(ref) ^ A32(ip);
- #endif
-
- if (!diff) {
- ip += STEPSIZE;
- ref += STEPSIZE;
- continue;
- }
- ip += LZ4_NBCOMMONBYTES(diff);
- goto _endcount;
+ *op++ = (BYTE) accumulator;
+ } else {
+ *op++ = (BYTE)(lastRunSize<<ML_BITS);
}
- #if LZ4_ARCH64
- if ((ip < (MATCHLIMIT - 3)) && (A32(ref) == A32(ip))) {
- ip += 4;
- ref += 4;
- }
- #endif
- if ((ip < (MATCHLIMIT - 1)) && (A16(ref) == A16(ip))) {
- ip += 2;
- ref += 2;
- }
- if ((ip < MATCHLIMIT) && (*ref == *ip))
- ip++;
-_endcount:
-
- /* Encode MatchLength */
- len = (int)(ip - anchor);
- /* Check output limit */
- if (unlikely(op + (1 + LASTLITERALS) + (len >> 8) > oend))
- return 0;
- if (len >= (int)ML_MASK) {
- *token += ML_MASK;
- len -= ML_MASK;
- for (; len > 509 ; len -= 510) {
- *op++ = 255;
- *op++ = 255;
- }
- if (len > 254) {
- len -= 255;
- *op++ = 255;
- }
- *op++ = (u8)len;
- } else
- *token += len;
-
- /* Test end of chunk */
- if (ip > mflimit) {
- anchor = ip;
- break;
- }
-
- /* Fill table */
- hashtable[LZ4_HASH64K_VALUE(ip-2)] = (u16)(ip - 2 - base);
-
- /* Test next position */
- ref = base + hashtable[LZ4_HASH64K_VALUE(ip)];
- hashtable[LZ4_HASH64K_VALUE(ip)] = (u16)(ip - base);
- if (A32(ref) == A32(ip)) {
- token = op++;
- *token = 0;
- goto _next_match;
- }
-
- /* Prepare next loop */
- anchor = ip++;
- forwardh = LZ4_HASH64K_VALUE(ip);
+ memcpy(op, anchor, lastRunSize);
+ op += lastRunSize;
}
-_last_literals:
- /* Encode Last Literals */
- lastrun = (int)(iend - anchor);
- if (op + lastrun + 1 + (lastrun - RUN_MASK + 255) / 255 > oend)
- return 0;
- if (lastrun >= (int)RUN_MASK) {
- *op++ = (RUN_MASK << ML_BITS);
- lastrun -= RUN_MASK;
- for (; lastrun > 254 ; lastrun -= 255)
- *op++ = 255;
- *op++ = (u8)lastrun;
- } else
- *op++ = (lastrun << ML_BITS);
- memcpy(op, anchor, iend - anchor);
- op += iend - anchor;
/* End */
- return (int)(((char *)op) - dest);
+ *srcSizePtr = (int) (((const char *)ip) - src);
+ return (int) (((char *)op) - dst);
}
-int lz4_compress(const unsigned char *src, size_t src_len,
- unsigned char *dst, size_t *dst_len, void *wrkmem)
+static int LZ4_compress_destSize_extState(
+ LZ4_stream_t *state,
+ const char *src,
+ char *dst,
+ int *srcSizePtr,
+ int targetDstSize)
{
- int ret = -1;
- int out_len = 0;
+#if LZ4_ARCH64
+ const tableType_t tableType = byU32;
+#else
+ const tableType_t tableType = byPtr;
+#endif
- if (src_len < LZ4_64KLIMIT)
- out_len = lz4_compress64kctx(wrkmem, src, dst, src_len,
- lz4_compressbound(src_len));
+ LZ4_resetStream(state);
+
+ if (targetDstSize >= LZ4_COMPRESSBOUND(*srcSizePtr)) {
+ /* compression success is guaranteed */
+ return LZ4_compress_fast_extState(
+ state, src, dst, *srcSizePtr,
+ targetDstSize, 1);
+ } else {
+ if (*srcSizePtr < LZ4_64Klimit)
+ return LZ4_compress_destSize_generic(
+ &state->internal_donotuse,
+ src, dst, srcSizePtr,
+ targetDstSize, byU16);
+ else
+ return LZ4_compress_destSize_generic(
+ &state->internal_donotuse,
+ src, dst, srcSizePtr,
+ targetDstSize, tableType);
+ }
+}
+
+
+int LZ4_compress_destSize(
+ const char *src,
+ char *dst,
+ int *srcSizePtr,
+ int targetDstSize,
+ void *wrkmem)
+{
+ return LZ4_compress_destSize_extState(wrkmem, src, dst, srcSizePtr,
+ targetDstSize);
+}
+EXPORT_SYMBOL(LZ4_compress_destSize);
+
+/*-******************************
+ * Streaming functions
+ ********************************/
+void LZ4_resetStream(LZ4_stream_t *LZ4_stream)
+{
+ memset(LZ4_stream, 0, sizeof(LZ4_stream_t));
+}
+
+int LZ4_loadDict(LZ4_stream_t *LZ4_dict,
+ const char *dictionary, int dictSize)
+{
+ LZ4_stream_t_internal *dict = &LZ4_dict->internal_donotuse;
+ const BYTE *p = (const BYTE *)dictionary;
+ const BYTE * const dictEnd = p + dictSize;
+ const BYTE *base;
+
+ if ((dict->initCheck)
+ || (dict->currentOffset > 1 * GB)) {
+ /* Uninitialized structure, or reuse overflow */
+ LZ4_resetStream(LZ4_dict);
+ }
+
+ if (dictSize < (int)HASH_UNIT) {
+ dict->dictionary = NULL;
+ dict->dictSize = 0;
+ return 0;
+ }
+
+ if ((dictEnd - p) > 64 * KB)
+ p = dictEnd - 64 * KB;
+ dict->currentOffset += 64 * KB;
+ base = p - dict->currentOffset;
+ dict->dictionary = p;
+ dict->dictSize = (U32)(dictEnd - p);
+ dict->currentOffset += dict->dictSize;
+
+ while (p <= dictEnd - HASH_UNIT) {
+ LZ4_putPosition(p, dict->hashTable, byU32, base);
+ p += 3;
+ }
+
+ return dict->dictSize;
+}
+EXPORT_SYMBOL(LZ4_loadDict);
+
+static void LZ4_renormDictT(LZ4_stream_t_internal *LZ4_dict,
+ const BYTE *src)
+{
+ if ((LZ4_dict->currentOffset > 0x80000000) ||
+ ((uptrval)LZ4_dict->currentOffset > (uptrval)src)) {
+ /* address space overflow */
+ /* rescale hash table */
+ U32 const delta = LZ4_dict->currentOffset - 64 * KB;
+ const BYTE *dictEnd = LZ4_dict->dictionary + LZ4_dict->dictSize;
+ int i;
+
+ for (i = 0; i < LZ4_HASH_SIZE_U32; i++) {
+ if (LZ4_dict->hashTable[i] < delta)
+ LZ4_dict->hashTable[i] = 0;
+ else
+ LZ4_dict->hashTable[i] -= delta;
+ }
+ LZ4_dict->currentOffset = 64 * KB;
+ if (LZ4_dict->dictSize > 64 * KB)
+ LZ4_dict->dictSize = 64 * KB;
+ LZ4_dict->dictionary = dictEnd - LZ4_dict->dictSize;
+ }
+}
+
+int LZ4_saveDict(LZ4_stream_t *LZ4_dict, char *safeBuffer, int dictSize)
+{
+ LZ4_stream_t_internal * const dict = &LZ4_dict->internal_donotuse;
+ const BYTE * const previousDictEnd = dict->dictionary + dict->dictSize;
+
+ if ((U32)dictSize > 64 * KB) {
+ /* useless to define a dictionary > 64 * KB */
+ dictSize = 64 * KB;
+ }
+ if ((U32)dictSize > dict->dictSize)
+ dictSize = dict->dictSize;
+
+ memmove(safeBuffer, previousDictEnd - dictSize, dictSize);
+
+ dict->dictionary = (const BYTE *)safeBuffer;
+ dict->dictSize = (U32)dictSize;
+
+ return dictSize;
+}
+EXPORT_SYMBOL(LZ4_saveDict);
+
+int LZ4_compress_fast_continue(LZ4_stream_t *LZ4_stream, const char *source,
+ char *dest, int inputSize, int maxOutputSize, int acceleration)
+{
+ LZ4_stream_t_internal *streamPtr = &LZ4_stream->internal_donotuse;
+ const BYTE * const dictEnd = streamPtr->dictionary
+ + streamPtr->dictSize;
+
+ const BYTE *smallest = (const BYTE *) source;
+
+ if (streamPtr->initCheck) {
+ /* Uninitialized structure detected */
+ return 0;
+ }
+
+ if ((streamPtr->dictSize > 0) && (smallest > dictEnd))
+ smallest = dictEnd;
+
+ LZ4_renormDictT(streamPtr, smallest);
+
+ if (acceleration < 1)
+ acceleration = LZ4_ACCELERATION_DEFAULT;
+
+ /* Check overlapping input/dictionary space */
+ {
+ const BYTE *sourceEnd = (const BYTE *) source + inputSize;
+
+ if ((sourceEnd > streamPtr->dictionary)
+ && (sourceEnd < dictEnd)) {
+ streamPtr->dictSize = (U32)(dictEnd - sourceEnd);
+ if (streamPtr->dictSize > 64 * KB)
+ streamPtr->dictSize = 64 * KB;
+ if (streamPtr->dictSize < 4)
+ streamPtr->dictSize = 0;
+ streamPtr->dictionary = dictEnd - streamPtr->dictSize;
+ }
+ }
+
+ /* prefix mode : source data follows dictionary */
+ if (dictEnd == (const BYTE *)source) {
+ int result;
+
+ if ((streamPtr->dictSize < 64 * KB) &&
+ (streamPtr->dictSize < streamPtr->currentOffset)) {
+ result = LZ4_compress_generic(
+ streamPtr, source, dest, inputSize,
+ maxOutputSize, limitedOutput, byU32,
+ withPrefix64k, dictSmall, acceleration);
+ } else {
+ result = LZ4_compress_generic(
+ streamPtr, source, dest, inputSize,
+ maxOutputSize, limitedOutput, byU32,
+ withPrefix64k, noDictIssue, acceleration);
+ }
+ streamPtr->dictSize += (U32)inputSize;
+ streamPtr->currentOffset += (U32)inputSize;
+ return result;
+ }
+
+ /* external dictionary mode */
+ {
+ int result;
+
+ if ((streamPtr->dictSize < 64 * KB) &&
+ (streamPtr->dictSize < streamPtr->currentOffset)) {
+ result = LZ4_compress_generic(
+ streamPtr, source, dest, inputSize,
+ maxOutputSize, limitedOutput, byU32,
+ usingExtDict, dictSmall, acceleration);
+ } else {
+ result = LZ4_compress_generic(
+ streamPtr, source, dest, inputSize,
+ maxOutputSize, limitedOutput, byU32,
+ usingExtDict, noDictIssue, acceleration);
+ }
+ streamPtr->dictionary = (const BYTE *)source;
+ streamPtr->dictSize = (U32)inputSize;
+ streamPtr->currentOffset += (U32)inputSize;
+ return result;
+ }
+}
+EXPORT_SYMBOL(LZ4_compress_fast_continue);
+
+/*-******************************
+ * For backwards compatibility
+ ********************************/
+int lz4_compress(const unsigned char *src, size_t src_len, unsigned char *dst,
+ size_t *dst_len, void *wrkmem) {
+ *dst_len = LZ4_compress_default(src, dst, src_len,
+ LZ4_COMPRESSBOUND(src_len), wrkmem);
+
+ /*
+ * Prior lz4_compress will return -1 in case of error
+ * and 0 on success
+ * while new LZ4_compress_fast/default
+ * returns 0 in case of error
+ * and the output length on success
+ */
+ if (!*dst_len)
+ return -1;
else
- out_len = lz4_compressctx(wrkmem, src, dst, src_len,
- lz4_compressbound(src_len));
-
- if (out_len < 0)
- goto exit;
-
- *dst_len = out_len;
-
- return 0;
-exit:
- return ret;
+ return 0;
}
EXPORT_SYMBOL(lz4_compress);
diff --git a/lib/lz4/lz4_decompress.c b/lib/lz4/lz4_decompress.c
index 6d940c7..b5e00a1 100644
--- a/lib/lz4/lz4_decompress.c
+++ b/lib/lz4/lz4_decompress.c
@@ -1,25 +1,16 @@
/*
- * LZ4 Decompressor for Linux kernel
- *
- * Copyright (C) 2013, LG Electronics, Kyungsik Lee <kyungsik.lee@lge.com>
- *
- * Based on LZ4 implementation by Yann Collet.
- *
* LZ4 - Fast LZ compression algorithm
- * Copyright (C) 2011-2012, Yann Collet.
- * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
- *
+ * Copyright (C) 2011 - 2016, Yann Collet.
+ * BSD 2 - Clause License (http://www.opensource.org/licenses/bsd - license.php)
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
- *
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
@@ -31,313 +22,529 @@
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * You can contact the author at :
+ * - LZ4 homepage : http://www.lz4.org
+ * - LZ4 source repository : https://github.com/lz4/lz4
*
- * You can contact the author at :
- * - LZ4 homepage : http://fastcompression.blogspot.com/p/lz4.html
- * - LZ4 source repository : http://code.google.com/p/lz4/
+ * Changed for kernel usage by:
+ * Sven Schmidt <4sschmid@informatik.uni-hamburg.de>
*/
-#ifndef STATIC
+/*-************************************
+ * Dependencies
+ **************************************/
+#include <linux/lz4.h>
+#include "lz4defs.h"
+#include <linux/init.h>
#include <linux/module.h>
#include <linux/kernel.h>
-#endif
-#include <linux/lz4.h>
-
#include <asm/unaligned.h>
-#include "lz4defs.h"
-
-static const int dec32table[] = {0, 3, 2, 3, 0, 0, 0, 0};
-#if LZ4_ARCH64
-static const int dec64table[] = {0, 0, 0, -1, 0, 1, 2, 3};
-#endif
-
-static int lz4_uncompress(const char *source, char *dest, int osize)
+/*-*****************************
+ * Decompression functions
+ *******************************/
+/* LZ4_decompress_generic() :
+ * This generic decompression function cover all use cases.
+ * It shall be instantiated several times, using different sets of directives
+ * Note that it is important this generic function is really inlined,
+ * in order to remove useless branches during compilation optimization.
+ */
+static FORCE_INLINE int LZ4_decompress_generic(
+ const char * const source,
+ char * const dest,
+ int inputSize,
+ /*
+ * If endOnInput == endOnInputSize,
+ * this value is the max size of Output Buffer.
+ */
+ int outputSize,
+ /* endOnOutputSize, endOnInputSize */
+ int endOnInput,
+ /* full, partial */
+ int partialDecoding,
+ /* only used if partialDecoding == partial */
+ int targetOutputSize,
+ /* noDict, withPrefix64k, usingExtDict */
+ int dict,
+ /* == dest when no prefix */
+ const BYTE * const lowPrefix,
+ /* only if dict == usingExtDict */
+ const BYTE * const dictStart,
+ /* note : = 0 if noDict */
+ const size_t dictSize
+ )
{
+ /* Local Variables */
const BYTE *ip = (const BYTE *) source;
- const BYTE *ref;
+ const BYTE * const iend = ip + inputSize;
+
BYTE *op = (BYTE *) dest;
- BYTE * const oend = op + osize;
+ BYTE * const oend = op + outputSize;
BYTE *cpy;
- unsigned token;
- size_t length;
+ BYTE *oexit = op + targetOutputSize;
+ const BYTE * const lowLimit = lowPrefix - dictSize;
+ const BYTE * const dictEnd = (const BYTE *)dictStart + dictSize;
+ const unsigned int dec32table[] = { 0, 1, 2, 1, 4, 4, 4, 4 };
+ const int dec64table[] = { 0, 0, 0, -1, 0, 1, 2, 3 };
+
+ const int safeDecode = (endOnInput == endOnInputSize);
+ const int checkOffset = ((safeDecode) && (dictSize < (int)(64 * KB)));
+
+ /* Special cases */
+ /* targetOutputSize too high => decode everything */
+ if ((partialDecoding) && (oexit > oend - MFLIMIT))
+ oexit = oend - MFLIMIT;
+
+ /* Empty output buffer */
+ if ((endOnInput) && (unlikely(outputSize == 0)))
+ return ((inputSize == 1) && (*ip == 0)) ? 0 : -1;
+
+ if ((!endOnInput) && (unlikely(outputSize == 0)))
+ return (*ip == 0 ? 1 : -1);
+
+ /* Main Loop : decode sequences */
while (1) {
+ size_t length;
+ const BYTE *match;
+ size_t offset;
- /* get runlength */
- token = *ip++;
- length = (token >> ML_BITS);
+ /* get literal length */
+ unsigned int const token = *ip++;
+
+ length = token>>ML_BITS;
+
if (length == RUN_MASK) {
- size_t len;
+ unsigned int s;
- len = *ip++;
- for (; len == 255; length += 255)
- len = *ip++;
- if (unlikely(length > (size_t)(length + len)))
+ do {
+ s = *ip++;
+ length += s;
+ } while (likely(endOnInput
+ ? ip < iend - RUN_MASK
+ : 1) & (s == 255));
+
+ if ((safeDecode)
+ && unlikely(
+ (size_t)(op + length) < (size_t)(op))) {
+ /* overflow detection */
goto _output_error;
- length += len;
+ }
+ if ((safeDecode)
+ && unlikely(
+ (size_t)(ip + length) < (size_t)(ip))) {
+ /* overflow detection */
+ goto _output_error;
+ }
}
/* copy literals */
cpy = op + length;
- if (unlikely(cpy > oend - COPYLENGTH)) {
- /*
- * Error: not enough place for another match
- * (min 4) + 5 literals
- */
- if (cpy != oend)
- goto _output_error;
+ if (((endOnInput) && ((cpy > (partialDecoding ? oexit : oend - MFLIMIT))
+ || (ip + length > iend - (2 + 1 + LASTLITERALS))))
+ || ((!endOnInput) && (cpy > oend - WILDCOPYLENGTH))) {
+ if (partialDecoding) {
+ if (cpy > oend) {
+ /*
+ * Error :
+ * write attempt beyond end of output buffer
+ */
+ goto _output_error;
+ }
+ if ((endOnInput)
+ && (ip + length > iend)) {
+ /*
+ * Error :
+ * read attempt beyond
+ * end of input buffer
+ */
+ goto _output_error;
+ }
+ } else {
+ if ((!endOnInput)
+ && (cpy != oend)) {
+ /*
+ * Error :
+ * block decoding must
+ * stop exactly there
+ */
+ goto _output_error;
+ }
+ if ((endOnInput)
+ && ((ip + length != iend)
+ || (cpy > oend))) {
+ /*
+ * Error :
+ * input must be consumed
+ */
+ goto _output_error;
+ }
+ }
memcpy(op, ip, length);
ip += length;
- break; /* EOF */
+ op += length;
+ /* Necessarily EOF, due to parsing restrictions */
+ break;
}
- LZ4_WILDCOPY(ip, op, cpy);
- ip -= (op - cpy);
+
+ LZ4_wildCopy(op, ip, cpy);
+ ip += length;
op = cpy;
/* get offset */
- LZ4_READ_LITTLEENDIAN_16(ref, cpy, ip);
+ offset = LZ4_readLE16(ip);
ip += 2;
+ match = op - offset;
- /* Error: offset create reference outside destination buffer */
- if (unlikely(ref < (BYTE *const) dest))
+ if ((checkOffset) && (unlikely(match < lowLimit))) {
+ /* Error : offset outside buffers */
goto _output_error;
+ }
+
+ /* costs ~1%; silence an msan warning when offset == 0 */
+ LZ4_write32(op, (U32)offset);
/* get matchlength */
length = token & ML_MASK;
if (length == ML_MASK) {
- for (; *ip == 255; length += 255)
- ip++;
- if (unlikely(length > (size_t)(length + *ip)))
+ unsigned int s;
+
+ do {
+ s = *ip++;
+
+ if ((endOnInput) && (ip > iend - LASTLITERALS))
+ goto _output_error;
+
+ length += s;
+ } while (s == 255);
+
+ if ((safeDecode)
+ && unlikely(
+ (size_t)(op + length) < (size_t)op)) {
+ /* overflow detection */
goto _output_error;
- length += *ip++;
+ }
}
- /* copy repeated sequence */
- if (unlikely((op - ref) < STEPSIZE)) {
-#if LZ4_ARCH64
- int dec64 = dec64table[op - ref];
-#else
- const int dec64 = 0;
-#endif
- op[0] = ref[0];
- op[1] = ref[1];
- op[2] = ref[2];
- op[3] = ref[3];
- op += 4;
- ref += 4;
- ref -= dec32table[op-ref];
- PUT4(ref, op);
- op += STEPSIZE - 4;
- ref -= dec64;
- } else {
- LZ4_COPYSTEP(ref, op);
- }
- cpy = op + length - (STEPSIZE - 4);
- if (cpy > (oend - COPYLENGTH)) {
+ length += MINMATCH;
- /* Error: request to write beyond destination buffer */
- if (cpy > oend)
+ /* check external dictionary */
+ if ((dict == usingExtDict) && (match < lowPrefix)) {
+ if (unlikely(op + length > oend - LASTLITERALS)) {
+ /* doesn't respect parsing restriction */
goto _output_error;
-#if LZ4_ARCH64
- if ((ref + COPYLENGTH) > oend)
-#else
- if ((ref + COPYLENGTH) > oend ||
- (op + COPYLENGTH) > oend)
-#endif
- goto _output_error;
- LZ4_SECURECOPY(ref, op, (oend - COPYLENGTH));
- while (op < cpy)
- *op++ = *ref++;
- op = cpy;
- /*
- * Check EOF (should never happen, since last 5 bytes
- * are supposed to be literals)
- */
- if (op == oend)
- goto _output_error;
+ }
+
+ if (length <= (size_t)(lowPrefix - match)) {
+ /*
+ * match can be copied as a single segment
+ * from external dictionary
+ */
+ memmove(op, dictEnd - (lowPrefix - match),
+ length);
+ op += length;
+ } else {
+ /*
+ * match encompass external
+ * dictionary and current block
+ */
+ size_t const copySize = (size_t)(lowPrefix - match);
+ size_t const restSize = length - copySize;
+
+ memcpy(op, dictEnd - copySize, copySize);
+ op += copySize;
+
+ if (restSize > (size_t)(op - lowPrefix)) {
+ /* overlap copy */
+ BYTE * const endOfMatch = op + restSize;
+ const BYTE *copyFrom = lowPrefix;
+
+ while (op < endOfMatch)
+ *op++ = *copyFrom++;
+ } else {
+ memcpy(op, lowPrefix, restSize);
+ op += restSize;
+ }
+ }
+
continue;
}
- LZ4_SECURECOPY(ref, op, cpy);
+
+ /* copy match within block */
+ cpy = op + length;
+
+ if (unlikely(offset < 8)) {
+ const int dec64 = dec64table[offset];
+
+ op[0] = match[0];
+ op[1] = match[1];
+ op[2] = match[2];
+ op[3] = match[3];
+ match += dec32table[offset];
+ memcpy(op + 4, match, 4);
+ match -= dec64;
+ } else {
+ LZ4_copy8(op, match);
+ match += 8;
+ }
+
+ op += 8;
+
+ if (unlikely(cpy > oend - 12)) {
+ BYTE * const oCopyLimit = oend - (WILDCOPYLENGTH - 1);
+
+ if (cpy > oend - LASTLITERALS) {
+ /*
+ * Error : last LASTLITERALS bytes
+ * must be literals (uncompressed)
+ */
+ goto _output_error;
+ }
+
+ if (op < oCopyLimit) {
+ LZ4_wildCopy(op, match, oCopyLimit);
+ match += oCopyLimit - op;
+ op = oCopyLimit;
+ }
+
+ while (op < cpy)
+ *op++ = *match++;
+ } else {
+ LZ4_copy8(op, match);
+
+ if (length > 16)
+ LZ4_wildCopy(op + 8, match + 8, cpy);
+ }
+
op = cpy; /* correction */
}
- /* end of decoding */
- return (int) (((char *)ip) - source);
- /* write overflow error detected */
+ /* end of decoding */
+ if (endOnInput) {
+ /* Nb of output bytes decoded */
+ return (int) (((char *)op) - dest);
+ } else {
+ /* Nb of input bytes read */
+ return (int) (((const char *)ip) - source);
+ }
+
+ /* Overflow error detected */
_output_error:
return -1;
}
-static int lz4_uncompress_unknownoutputsize(const char *source, char *dest,
- int isize, size_t maxoutputsize)
+int LZ4_decompress_safe(const char *source, char *dest,
+ int compressedSize, int maxDecompressedSize)
{
- const BYTE *ip = (const BYTE *) source;
- const BYTE *const iend = ip + isize;
- const BYTE *ref;
+ return LZ4_decompress_generic(source, dest, compressedSize,
+ maxDecompressedSize, endOnInputSize, full, 0,
+ noDict, (BYTE *)dest, NULL, 0);
+}
+int LZ4_decompress_safe_partial(const char *source, char *dest,
+ int compressedSize, int targetOutputSize, int maxDecompressedSize)
+{
+ return LZ4_decompress_generic(source, dest, compressedSize,
+ maxDecompressedSize, endOnInputSize, partial,
+ targetOutputSize, noDict, (BYTE *)dest, NULL, 0);
+}
- BYTE *op = (BYTE *) dest;
- BYTE * const oend = op + maxoutputsize;
- BYTE *cpy;
+int LZ4_decompress_fast(const char *source, char *dest, int originalSize)
+{
+ return LZ4_decompress_generic(source, dest, 0, originalSize,
+ endOnOutputSize, full, 0, withPrefix64k,
+ (BYTE *)(dest - 64 * KB), NULL, 64 * KB);
+}
- /* Main Loop */
- while (ip < iend) {
+int LZ4_setStreamDecode(LZ4_streamDecode_t *LZ4_streamDecode,
+ const char *dictionary, int dictSize)
+{
+ LZ4_streamDecode_t_internal *lz4sd = (LZ4_streamDecode_t_internal *) LZ4_streamDecode;
- unsigned token;
- size_t length;
+ lz4sd->prefixSize = (size_t) dictSize;
+ lz4sd->prefixEnd = (const BYTE *) dictionary + dictSize;
+ lz4sd->externalDict = NULL;
+ lz4sd->extDictSize = 0;
+ return 1;
+}
- /* get runlength */
- token = *ip++;
- length = (token >> ML_BITS);
- if (length == RUN_MASK) {
- int s = 255;
- while ((ip < iend) && (s == 255)) {
- s = *ip++;
- if (unlikely(length > (size_t)(length + s)))
- goto _output_error;
- length += s;
- }
- }
- /* copy literals */
- cpy = op + length;
- if ((cpy > oend - COPYLENGTH) ||
- (ip + length > iend - COPYLENGTH)) {
+/*
+ * *_continue() :
+ * These decoding functions allow decompression of multiple blocks
+ * in "streaming" mode.
+ * Previously decoded blocks must still be available at the memory
+ * position where they were decoded.
+ * If it's not possible, save the relevant part of
+ * decoded data into a safe buffer,
+ * and indicate where it stands using LZ4_setStreamDecode()
+ */
+int LZ4_decompress_safe_continue(LZ4_streamDecode_t *LZ4_streamDecode,
+ const char *source, char *dest, int compressedSize, int maxOutputSize)
+{
+ LZ4_streamDecode_t_internal *lz4sd = &LZ4_streamDecode->internal_donotuse;
+ int result;
- if (cpy > oend)
- goto _output_error;/* writes beyond buffer */
+ if (lz4sd->prefixEnd == (BYTE *)dest) {
+ result = LZ4_decompress_generic(source, dest,
+ compressedSize,
+ maxOutputSize,
+ endOnInputSize, full, 0,
+ usingExtDict, lz4sd->prefixEnd - lz4sd->prefixSize,
+ lz4sd->externalDict,
+ lz4sd->extDictSize);
- if (ip + length != iend)
- goto _output_error;/*
- * Error: LZ4 format requires
- * to consume all input
- * at this stage
- */
- memcpy(op, ip, length);
- op += length;
- break;/* Necessarily EOF, due to parsing restrictions */
- }
- LZ4_WILDCOPY(ip, op, cpy);
- ip -= (op - cpy);
- op = cpy;
+ if (result <= 0)
+ return result;
- /* get offset */
- LZ4_READ_LITTLEENDIAN_16(ref, cpy, ip);
- ip += 2;
- if (ref < (BYTE * const) dest)
- goto _output_error;
- /*
- * Error : offset creates reference
- * outside of destination buffer
- */
-
- /* get matchlength */
- length = (token & ML_MASK);
- if (length == ML_MASK) {
- while (ip < iend) {
- int s = *ip++;
- if (unlikely(length > (size_t)(length + s)))
- goto _output_error;
- length += s;
- if (s == 255)
- continue;
- break;
- }
- }
-
- /* copy repeated sequence */
- if (unlikely((op - ref) < STEPSIZE)) {
-#if LZ4_ARCH64
- int dec64 = dec64table[op - ref];
-#else
- const int dec64 = 0;
-#endif
- op[0] = ref[0];
- op[1] = ref[1];
- op[2] = ref[2];
- op[3] = ref[3];
- op += 4;
- ref += 4;
- ref -= dec32table[op - ref];
- PUT4(ref, op);
- op += STEPSIZE - 4;
- ref -= dec64;
- } else {
- LZ4_COPYSTEP(ref, op);
- }
- cpy = op + length - (STEPSIZE-4);
- if (cpy > oend - COPYLENGTH) {
- if (cpy > oend)
- goto _output_error; /* write outside of buf */
-#if LZ4_ARCH64
- if ((ref + COPYLENGTH) > oend)
-#else
- if ((ref + COPYLENGTH) > oend ||
- (op + COPYLENGTH) > oend)
-#endif
- goto _output_error;
- LZ4_SECURECOPY(ref, op, (oend - COPYLENGTH));
- while (op < cpy)
- *op++ = *ref++;
- op = cpy;
- /*
- * Check EOF (should never happen, since last 5 bytes
- * are supposed to be literals)
- */
- if (op == oend)
- goto _output_error;
- continue;
- }
- LZ4_SECURECOPY(ref, op, cpy);
- op = cpy; /* correction */
+ lz4sd->prefixSize += result;
+ lz4sd->prefixEnd += result;
+ } else {
+ lz4sd->extDictSize = lz4sd->prefixSize;
+ lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize;
+ result = LZ4_decompress_generic(source, dest,
+ compressedSize, maxOutputSize,
+ endOnInputSize, full, 0,
+ usingExtDict, (BYTE *)dest,
+ lz4sd->externalDict, lz4sd->extDictSize);
+ if (result <= 0)
+ return result;
+ lz4sd->prefixSize = result;
+ lz4sd->prefixEnd = (BYTE *)dest + result;
}
- /* end of decoding */
- return (int) (((char *) op) - dest);
- /* write overflow error detected */
-_output_error:
- return -1;
+ return result;
+}
+
+int LZ4_decompress_fast_continue(LZ4_streamDecode_t *LZ4_streamDecode,
+ const char *source, char *dest, int originalSize)
+{
+ LZ4_streamDecode_t_internal *lz4sd = &LZ4_streamDecode->internal_donotuse;
+ int result;
+
+ if (lz4sd->prefixEnd == (BYTE *)dest) {
+ result = LZ4_decompress_generic(source, dest, 0, originalSize,
+ endOnOutputSize, full, 0,
+ usingExtDict,
+ lz4sd->prefixEnd - lz4sd->prefixSize,
+ lz4sd->externalDict, lz4sd->extDictSize);
+
+ if (result <= 0)
+ return result;
+
+ lz4sd->prefixSize += originalSize;
+ lz4sd->prefixEnd += originalSize;
+ } else {
+ lz4sd->extDictSize = lz4sd->prefixSize;
+ lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize;
+ result = LZ4_decompress_generic(source, dest, 0, originalSize,
+ endOnOutputSize, full, 0,
+ usingExtDict, (BYTE *)dest,
+ lz4sd->externalDict, lz4sd->extDictSize);
+ if (result <= 0)
+ return result;
+ lz4sd->prefixSize = originalSize;
+ lz4sd->prefixEnd = (BYTE *)dest + originalSize;
+ }
+
+ return result;
+}
+
+/*
+ * Advanced decoding functions :
+ * *_usingDict() :
+ * These decoding functions work the same as "_continue" ones,
+ * the dictionary must be explicitly provided within parameters
+ */
+static FORCE_INLINE int LZ4_decompress_usingDict_generic(const char *source,
+ char *dest, int compressedSize, int maxOutputSize, int safe,
+ const char *dictStart, int dictSize)
+{
+ if (dictSize == 0)
+ return LZ4_decompress_generic(source, dest,
+ compressedSize, maxOutputSize, safe, full, 0,
+ noDict, (BYTE *)dest, NULL, 0);
+ if (dictStart + dictSize == dest) {
+ if (dictSize >= (int)(64 * KB - 1))
+ return LZ4_decompress_generic(source, dest,
+ compressedSize, maxOutputSize, safe, full, 0,
+ withPrefix64k, (BYTE *)dest - 64 * KB, NULL, 0);
+ return LZ4_decompress_generic(source, dest, compressedSize,
+ maxOutputSize, safe, full, 0, noDict,
+ (BYTE *)dest - dictSize, NULL, 0);
+ }
+ return LZ4_decompress_generic(source, dest, compressedSize,
+ maxOutputSize, safe, full, 0, usingExtDict,
+ (BYTE *)dest, (const BYTE *)dictStart, dictSize);
+}
+
+int LZ4_decompress_safe_usingDict(const char *source, char *dest,
+ int compressedSize, int maxOutputSize,
+ const char *dictStart, int dictSize)
+{
+ return LZ4_decompress_usingDict_generic(source, dest,
+ compressedSize, maxOutputSize, 1, dictStart, dictSize);
+}
+
+int LZ4_decompress_fast_usingDict(const char *source, char *dest,
+ int originalSize, const char *dictStart, int dictSize)
+{
+ return LZ4_decompress_usingDict_generic(source, dest, 0,
+ originalSize, 0, dictStart, dictSize);
+}
+
+/*-******************************
+ * For backwards compatibility
+ ********************************/
+int lz4_decompress_unknownoutputsize(const unsigned char *src,
+ size_t src_len, unsigned char *dest, size_t *dest_len) {
+ *dest_len = LZ4_decompress_safe(src, dest,
+ src_len, *dest_len);
+
+ /*
+ * Prior lz4_decompress_unknownoutputsize will return
+ * 0 for success and a negative result for error
+ * new LZ4_decompress_safe returns
+ * - the length of data read on success
+ * - and also a negative result on error
+ * meaning when result > 0, we just return 0 here
+ */
+ if (src_len > 0)
+ return 0;
+ else
+ return -1;
}
int lz4_decompress(const unsigned char *src, size_t *src_len,
- unsigned char *dest, size_t actual_dest_len)
-{
- int ret = -1;
- int input_len = 0;
+ unsigned char *dest, size_t actual_dest_len) {
+ *src_len = LZ4_decompress_fast(src, dest, actual_dest_len);
- input_len = lz4_uncompress(src, dest, actual_dest_len);
- if (input_len < 0)
- goto exit_0;
- *src_len = input_len;
-
- return 0;
-exit_0:
- return ret;
+ /*
+ * Prior lz4_decompress will return
+ * 0 for success and a negative result for error
+ * new LZ4_decompress_fast returns
+ * - the length of data read on success
+ * - and also a negative result on error
+ * meaning when result > 0, we just return 0 here
+ */
+ if (*src_len > 0)
+ return 0;
+ else
+ return -1;
}
+
#ifndef STATIC
-EXPORT_SYMBOL(lz4_decompress);
-#endif
-
-int lz4_decompress_unknownoutputsize(const unsigned char *src, size_t src_len,
- unsigned char *dest, size_t *dest_len)
-{
- int ret = -1;
- int out_len = 0;
-
- out_len = lz4_uncompress_unknownoutputsize(src, dest, src_len,
- *dest_len);
- if (out_len < 0)
- goto exit_0;
- *dest_len = out_len;
-
- return 0;
-exit_0:
- return ret;
-}
-#ifndef STATIC
+EXPORT_SYMBOL(LZ4_decompress_safe);
+EXPORT_SYMBOL(LZ4_decompress_safe_partial);
+EXPORT_SYMBOL(LZ4_decompress_fast);
+EXPORT_SYMBOL(LZ4_setStreamDecode);
+EXPORT_SYMBOL(LZ4_decompress_safe_continue);
+EXPORT_SYMBOL(LZ4_decompress_fast_continue);
+EXPORT_SYMBOL(LZ4_decompress_safe_usingDict);
+EXPORT_SYMBOL(LZ4_decompress_fast_usingDict);
EXPORT_SYMBOL(lz4_decompress_unknownoutputsize);
+EXPORT_SYMBOL(lz4_decompress);
MODULE_LICENSE("Dual BSD/GPL");
-MODULE_DESCRIPTION("LZ4 Decompressor");
+MODULE_DESCRIPTION("LZ4 decompressor");
#endif
diff --git a/lib/lz4/lz4defs.h b/lib/lz4/lz4defs.h
index 0710a62..00a0b58 100644
--- a/lib/lz4/lz4defs.h
+++ b/lib/lz4/lz4defs.h
@@ -1,159 +1,227 @@
-/*
- * lz4defs.h -- architecture specific defines
- *
- * Copyright (C) 2013, LG Electronics, Kyungsik Lee <kyungsik.lee@lge.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
+#ifndef __LZ4DEFS_H__
+#define __LZ4DEFS_H__
/*
- * Detects 64 bits mode
+ * lz4defs.h -- common and architecture specific defines for the kernel usage
+
+ * LZ4 - Fast LZ compression algorithm
+ * Copyright (C) 2011-2016, Yann Collet.
+ * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * You can contact the author at :
+ * - LZ4 homepage : http://www.lz4.org
+ * - LZ4 source repository : https://github.com/lz4/lz4
+ *
+ * Changed for kernel usage by:
+ * Sven Schmidt <4sschmid@informatik.uni-hamburg.de>
*/
+
+#include <asm/unaligned.h>
+#include <linux/string.h> /* memset, memcpy */
+
+#define FORCE_INLINE __always_inline
+
+/*-************************************
+ * Basic Types
+ **************************************/
+#include <linux/types.h>
+
+typedef uint8_t BYTE;
+typedef uint16_t U16;
+typedef uint32_t U32;
+typedef int32_t S32;
+typedef uint64_t U64;
+typedef uintptr_t uptrval;
+
+/*-************************************
+ * Architecture specifics
+ **************************************/
#if defined(CONFIG_64BIT)
#define LZ4_ARCH64 1
#else
#define LZ4_ARCH64 0
#endif
-/*
- * Architecture-specific macros
- */
-#define BYTE u8
-typedef struct _U16_S { u16 v; } U16_S;
-typedef struct _U32_S { u32 v; } U32_S;
-typedef struct _U64_S { u64 v; } U64_S;
-#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) \
- || defined(CONFIG_ARM) && __LINUX_ARM_ARCH__ >= 6 \
- && defined(ARM_EFFICIENT_UNALIGNED_ACCESS)
-
-#define A16(x) (((U16_S *)(x))->v)
-#define A32(x) (((U32_S *)(x))->v)
-#define A64(x) (((U64_S *)(x))->v)
-
-#define PUT4(s, d) (A32(d) = A32(s))
-#define PUT8(s, d) (A64(d) = A64(s))
-
-#define LZ4_READ_LITTLEENDIAN_16(d, s, p) \
- (d = s - A16(p))
-
-#define LZ4_WRITE_LITTLEENDIAN_16(p, v) \
- do { \
- A16(p) = v; \
- p += 2; \
- } while (0)
-#else /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */
-
-#define A64(x) get_unaligned((u64 *)&(((U16_S *)(x))->v))
-#define A32(x) get_unaligned((u32 *)&(((U16_S *)(x))->v))
-#define A16(x) get_unaligned((u16 *)&(((U16_S *)(x))->v))
-
-#define PUT4(s, d) \
- put_unaligned(get_unaligned((const u32 *) s), (u32 *) d)
-#define PUT8(s, d) \
- put_unaligned(get_unaligned((const u64 *) s), (u64 *) d)
-
-#define LZ4_READ_LITTLEENDIAN_16(d, s, p) \
- (d = s - get_unaligned_le16(p))
-
-#define LZ4_WRITE_LITTLEENDIAN_16(p, v) \
- do { \
- put_unaligned_le16(v, (u16 *)(p)); \
- p += 2; \
- } while (0)
+#if defined(__LITTLE_ENDIAN)
+#define LZ4_LITTLE_ENDIAN 1
+#else
+#define LZ4_LITTLE_ENDIAN 0
#endif
-#define COPYLENGTH 8
-#define ML_BITS 4
-#define ML_MASK ((1U << ML_BITS) - 1)
+/*-************************************
+ * Constants
+ **************************************/
+#define MINMATCH 4
+
+#define WILDCOPYLENGTH 8
+#define LASTLITERALS 5
+#define MFLIMIT (WILDCOPYLENGTH + MINMATCH)
+
+/* Increase this value ==> compression run slower on incompressible data */
+#define LZ4_SKIPTRIGGER 6
+
+#define HASH_UNIT sizeof(size_t)
+
+#define KB (1 << 10)
+#define MB (1 << 20)
+#define GB (1U << 30)
+
+#define MAXD_LOG 16
+#define MAX_DISTANCE ((1 << MAXD_LOG) - 1)
+#define STEPSIZE sizeof(size_t)
+
+#define ML_BITS 4
+#define ML_MASK ((1U << ML_BITS) - 1)
#define RUN_BITS (8 - ML_BITS)
#define RUN_MASK ((1U << RUN_BITS) - 1)
-#define MEMORY_USAGE 14
-#define MINMATCH 4
-#define SKIPSTRENGTH 6
-#define LASTLITERALS 5
-#define MFLIMIT (COPYLENGTH + MINMATCH)
-#define MINLENGTH (MFLIMIT + 1)
-#define MAXD_LOG 16
-#define MAXD (1 << MAXD_LOG)
-#define MAXD_MASK (u32)(MAXD - 1)
-#define MAX_DISTANCE (MAXD - 1)
-#define HASH_LOG (MAXD_LOG - 1)
-#define HASHTABLESIZE (1 << HASH_LOG)
-#define MAX_NB_ATTEMPTS 256
-#define OPTIMAL_ML (int)((ML_MASK-1)+MINMATCH)
-#define LZ4_64KLIMIT ((1<<16) + (MFLIMIT - 1))
-#define HASHLOG64K ((MEMORY_USAGE - 2) + 1)
-#define HASH64KTABLESIZE (1U << HASHLOG64K)
-#define LZ4_HASH_VALUE(p) (((A32(p)) * 2654435761U) >> \
- ((MINMATCH * 8) - (MEMORY_USAGE-2)))
-#define LZ4_HASH64K_VALUE(p) (((A32(p)) * 2654435761U) >> \
- ((MINMATCH * 8) - HASHLOG64K))
-#define HASH_VALUE(p) (((A32(p)) * 2654435761U) >> \
- ((MINMATCH * 8) - HASH_LOG))
-#if LZ4_ARCH64/* 64-bit */
-#define STEPSIZE 8
+/*-************************************
+ * Reading and writing into memory
+ **************************************/
+static FORCE_INLINE U16 LZ4_read16(const void *ptr)
+{
+ return get_unaligned((const U16 *)ptr);
+}
-#define LZ4_COPYSTEP(s, d) \
- do { \
- PUT8(s, d); \
- d += 8; \
- s += 8; \
- } while (0)
+static FORCE_INLINE U32 LZ4_read32(const void *ptr)
+{
+ return get_unaligned((const U32 *)ptr);
+}
-#define LZ4_COPYPACKET(s, d) LZ4_COPYSTEP(s, d)
+static FORCE_INLINE size_t LZ4_read_ARCH(const void *ptr)
+{
+ return get_unaligned((const size_t *)ptr);
+}
-#define LZ4_SECURECOPY(s, d, e) \
- do { \
- if (d < e) { \
- LZ4_WILDCOPY(s, d, e); \
- } \
- } while (0)
-#define HTYPE u32
+static FORCE_INLINE void LZ4_write16(void *memPtr, U16 value)
+{
+ put_unaligned(value, (U16 *)memPtr);
+}
-#ifdef __BIG_ENDIAN
-#define LZ4_NBCOMMONBYTES(val) (__builtin_clzll(val) >> 3)
+static FORCE_INLINE void LZ4_write32(void *memPtr, U32 value)
+{
+ put_unaligned(value, (U32 *)memPtr);
+}
+
+static FORCE_INLINE U16 LZ4_readLE16(const void *memPtr)
+{
+ return get_unaligned_le16(memPtr);
+}
+
+static FORCE_INLINE void LZ4_writeLE16(void *memPtr, U16 value)
+{
+ return put_unaligned_le16(value, memPtr);
+}
+
+static FORCE_INLINE void LZ4_copy8(void *dst, const void *src)
+{
+#if LZ4_ARCH64
+ U64 a = get_unaligned((const U64 *)src);
+
+ put_unaligned(a, (U64 *)dst);
#else
-#define LZ4_NBCOMMONBYTES(val) (__builtin_ctzll(val) >> 3)
+ U32 a = get_unaligned((const U32 *)src);
+ U32 b = get_unaligned((const U32 *)src + 1);
+
+ put_unaligned(a, (U32 *)dst);
+ put_unaligned(b, (U32 *)dst + 1);
#endif
+}
-#else /* 32-bit */
-#define STEPSIZE 4
+/*
+ * customized variant of memcpy,
+ * which can overwrite up to 7 bytes beyond dstEnd
+ */
+static FORCE_INLINE void LZ4_wildCopy(void *dstPtr,
+ const void *srcPtr, void *dstEnd)
+{
+ BYTE *d = (BYTE *)dstPtr;
+ const BYTE *s = (const BYTE *)srcPtr;
+ BYTE *const e = (BYTE *)dstEnd;
-#define LZ4_COPYSTEP(s, d) \
- do { \
- PUT4(s, d); \
- d += 4; \
- s += 4; \
- } while (0)
+ do {
+ LZ4_copy8(d, s);
+ d += 8;
+ s += 8;
+ } while (d < e);
+}
-#define LZ4_COPYPACKET(s, d) \
- do { \
- LZ4_COPYSTEP(s, d); \
- LZ4_COPYSTEP(s, d); \
- } while (0)
-
-#define LZ4_SECURECOPY LZ4_WILDCOPY
-#define HTYPE const u8*
-
-#ifdef __BIG_ENDIAN
-#define LZ4_NBCOMMONBYTES(val) (__builtin_clz(val) >> 3)
+static FORCE_INLINE unsigned int LZ4_NbCommonBytes(register size_t val)
+{
+#if LZ4_LITTLE_ENDIAN
+ return __ffs(val) >> 3;
#else
-#define LZ4_NBCOMMONBYTES(val) (__builtin_ctz(val) >> 3)
+ return (BITS_PER_LONG - 1 - __fls(val)) >> 3;
+#endif
+}
+
+static FORCE_INLINE unsigned int LZ4_count(
+ const BYTE *pIn,
+ const BYTE *pMatch,
+ const BYTE *pInLimit)
+{
+ const BYTE *const pStart = pIn;
+
+ while (likely(pIn < pInLimit - (STEPSIZE - 1))) {
+ size_t const diff = LZ4_read_ARCH(pMatch) ^ LZ4_read_ARCH(pIn);
+
+ if (!diff) {
+ pIn += STEPSIZE;
+ pMatch += STEPSIZE;
+ continue;
+ }
+
+ pIn += LZ4_NbCommonBytes(diff);
+
+ return (unsigned int)(pIn - pStart);
+ }
+
+#if LZ4_ARCH64
+ if ((pIn < (pInLimit - 3))
+ && (LZ4_read32(pMatch) == LZ4_read32(pIn))) {
+ pIn += 4;
+ pMatch += 4;
+ }
#endif
+ if ((pIn < (pInLimit - 1))
+ && (LZ4_read16(pMatch) == LZ4_read16(pIn))) {
+ pIn += 2;
+ pMatch += 2;
+ }
+
+ if ((pIn < pInLimit) && (*pMatch == *pIn))
+ pIn++;
+
+ return (unsigned int)(pIn - pStart);
+}
+
+typedef enum { noLimit = 0, limitedOutput = 1 } limitedOutput_directive;
+typedef enum { byPtr, byU32, byU16 } tableType_t;
+
+typedef enum { noDict = 0, withPrefix64k, usingExtDict } dict_directive;
+typedef enum { noDictIssue = 0, dictSmall } dictIssue_directive;
+
+typedef enum { endOnOutputSize = 0, endOnInputSize = 1 } endCondition_directive;
+typedef enum { full = 0, partial = 1 } earlyEnd_directive;
+
#endif
-
-#define LZ4_WILDCOPY(s, d, e) \
- do { \
- LZ4_COPYPACKET(s, d); \
- } while (d < e)
-
-#define LZ4_BLINDCOPY(s, d, l) \
- do { \
- u8 *e = (d) + l; \
- LZ4_WILDCOPY(s, d, e); \
- d = e; \
- } while (0)
diff --git a/lib/lz4/lz4hc_compress.c b/lib/lz4/lz4hc_compress.c
index f344f76b..e1fbf15 100644
--- a/lib/lz4/lz4hc_compress.c
+++ b/lib/lz4/lz4hc_compress.c
@@ -1,19 +1,17 @@
/*
* LZ4 HC - High Compression Mode of LZ4
- * Copyright (C) 2011-2012, Yann Collet.
- * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+ * Copyright (C) 2011-2015, Yann Collet.
*
+ * BSD 2 - Clause License (http://www.opensource.org/licenses/bsd - license.php)
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following disclaimer
* in the documentation and/or other materials provided with the
* distribution.
- *
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
@@ -25,323 +23,361 @@
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
* You can contact the author at :
- * - LZ4 homepage : http://fastcompression.blogspot.com/p/lz4.html
- * - LZ4 source repository : http://code.google.com/p/lz4/
+ * - LZ4 homepage : http://www.lz4.org
+ * - LZ4 source repository : https://github.com/lz4/lz4
*
- * Changed for kernel use by:
- * Chanho Min <chanho.min@lge.com>
+ * Changed for kernel usage by:
+ * Sven Schmidt <4sschmid@informatik.uni-hamburg.de>
*/
+/*-************************************
+ * Dependencies
+ **************************************/
+#include <linux/lz4.h>
+#include "lz4defs.h"
#include <linux/module.h>
#include <linux/kernel.h>
-#include <linux/lz4.h>
-#include <asm/unaligned.h>
-#include "lz4defs.h"
+#include <linux/string.h> /* memset */
-struct lz4hc_data {
- const u8 *base;
- HTYPE hashtable[HASHTABLESIZE];
- u16 chaintable[MAXD];
- const u8 *nexttoupdate;
-} __attribute__((__packed__));
+/* *************************************
+ * Local Constants and types
+ ***************************************/
-static inline int lz4hc_init(struct lz4hc_data *hc4, const u8 *base)
+#define OPTIMAL_ML (int)((ML_MASK - 1) + MINMATCH)
+
+#define HASH_FUNCTION(i) (((i) * 2654435761U) \
+ >> ((MINMATCH*8) - LZ4HC_HASH_LOG))
+#define DELTANEXTU16(p) chainTable[(U16)(p)] /* faster */
+
+static U32 LZ4HC_hashPtr(const void *ptr)
{
- memset((void *)hc4->hashtable, 0, sizeof(hc4->hashtable));
- memset(hc4->chaintable, 0xFF, sizeof(hc4->chaintable));
+ return HASH_FUNCTION(LZ4_read32(ptr));
+}
-#if LZ4_ARCH64
- hc4->nexttoupdate = base + 1;
-#else
- hc4->nexttoupdate = base;
-#endif
- hc4->base = base;
- return 1;
+/**************************************
+ * HC Compression
+ **************************************/
+static void LZ4HC_init(LZ4HC_CCtx_internal *hc4, const BYTE *start)
+{
+ memset((void *)hc4->hashTable, 0, sizeof(hc4->hashTable));
+ memset(hc4->chainTable, 0xFF, sizeof(hc4->chainTable));
+ hc4->nextToUpdate = 64 * KB;
+ hc4->base = start - 64 * KB;
+ hc4->end = start;
+ hc4->dictBase = start - 64 * KB;
+ hc4->dictLimit = 64 * KB;
+ hc4->lowLimit = 64 * KB;
}
/* Update chains up to ip (excluded) */
-static inline void lz4hc_insert(struct lz4hc_data *hc4, const u8 *ip)
+static FORCE_INLINE void LZ4HC_Insert(LZ4HC_CCtx_internal *hc4,
+ const BYTE *ip)
{
- u16 *chaintable = hc4->chaintable;
- HTYPE *hashtable = hc4->hashtable;
-#if LZ4_ARCH64
+ U16 * const chainTable = hc4->chainTable;
+ U32 * const hashTable = hc4->hashTable;
const BYTE * const base = hc4->base;
-#else
- const int base = 0;
-#endif
+ U32 const target = (U32)(ip - base);
+ U32 idx = hc4->nextToUpdate;
- while (hc4->nexttoupdate < ip) {
- const u8 *p = hc4->nexttoupdate;
- size_t delta = p - (hashtable[HASH_VALUE(p)] + base);
+ while (idx < target) {
+ U32 const h = LZ4HC_hashPtr(base + idx);
+ size_t delta = idx - hashTable[h];
+
if (delta > MAX_DISTANCE)
delta = MAX_DISTANCE;
- chaintable[(size_t)(p) & MAXD_MASK] = (u16)delta;
- hashtable[HASH_VALUE(p)] = (p) - base;
- hc4->nexttoupdate++;
+
+ DELTANEXTU16(idx) = (U16)delta;
+
+ hashTable[h] = idx;
+ idx++;
}
+
+ hc4->nextToUpdate = target;
}
-static inline size_t lz4hc_commonlength(const u8 *p1, const u8 *p2,
- const u8 *const matchlimit)
+static FORCE_INLINE int LZ4HC_InsertAndFindBestMatch(
+ LZ4HC_CCtx_internal *hc4, /* Index table will be updated */
+ const BYTE *ip,
+ const BYTE * const iLimit,
+ const BYTE **matchpos,
+ const int maxNbAttempts)
{
- const u8 *p1t = p1;
-
- while (p1t < matchlimit - (STEPSIZE - 1)) {
-#if LZ4_ARCH64
- u64 diff = A64(p2) ^ A64(p1t);
-#else
- u32 diff = A32(p2) ^ A32(p1t);
-#endif
- if (!diff) {
- p1t += STEPSIZE;
- p2 += STEPSIZE;
- continue;
- }
- p1t += LZ4_NBCOMMONBYTES(diff);
- return p1t - p1;
- }
-#if LZ4_ARCH64
- if ((p1t < (matchlimit-3)) && (A32(p2) == A32(p1t))) {
- p1t += 4;
- p2 += 4;
- }
-#endif
-
- if ((p1t < (matchlimit - 1)) && (A16(p2) == A16(p1t))) {
- p1t += 2;
- p2 += 2;
- }
- if ((p1t < matchlimit) && (*p2 == *p1t))
- p1t++;
- return p1t - p1;
-}
-
-static inline int lz4hc_insertandfindbestmatch(struct lz4hc_data *hc4,
- const u8 *ip, const u8 *const matchlimit, const u8 **matchpos)
-{
- u16 *const chaintable = hc4->chaintable;
- HTYPE *const hashtable = hc4->hashtable;
- const u8 *ref;
-#if LZ4_ARCH64
+ U16 * const chainTable = hc4->chainTable;
+ U32 * const HashTable = hc4->hashTable;
const BYTE * const base = hc4->base;
-#else
- const int base = 0;
-#endif
- int nbattempts = MAX_NB_ATTEMPTS;
- size_t repl = 0, ml = 0;
- u16 delta;
+ const BYTE * const dictBase = hc4->dictBase;
+ const U32 dictLimit = hc4->dictLimit;
+ const U32 lowLimit = (hc4->lowLimit + 64 * KB > (U32)(ip - base))
+ ? hc4->lowLimit
+ : (U32)(ip - base) - (64 * KB - 1);
+ U32 matchIndex;
+ int nbAttempts = maxNbAttempts;
+ size_t ml = 0;
/* HC4 match finder */
- lz4hc_insert(hc4, ip);
- ref = hashtable[HASH_VALUE(ip)] + base;
+ LZ4HC_Insert(hc4, ip);
+ matchIndex = HashTable[LZ4HC_hashPtr(ip)];
- /* potential repetition */
- if (ref >= ip-4) {
- /* confirmed */
- if (A32(ref) == A32(ip)) {
- delta = (u16)(ip-ref);
- repl = ml = lz4hc_commonlength(ip + MINMATCH,
- ref + MINMATCH, matchlimit) + MINMATCH;
- *matchpos = ref;
- }
- ref -= (size_t)chaintable[(size_t)(ref) & MAXD_MASK];
- }
+ while ((matchIndex >= lowLimit)
+ && (nbAttempts)) {
+ nbAttempts--;
+ if (matchIndex >= dictLimit) {
+ const BYTE * const match = base + matchIndex;
- while ((ref >= ip - MAX_DISTANCE) && nbattempts) {
- nbattempts--;
- if (*(ref + ml) == *(ip + ml)) {
- if (A32(ref) == A32(ip)) {
- size_t mlt =
- lz4hc_commonlength(ip + MINMATCH,
- ref + MINMATCH, matchlimit) + MINMATCH;
+ if (*(match + ml) == *(ip + ml)
+ && (LZ4_read32(match) == LZ4_read32(ip))) {
+ size_t const mlt = LZ4_count(ip + MINMATCH,
+ match + MINMATCH, iLimit) + MINMATCH;
+
if (mlt > ml) {
ml = mlt;
- *matchpos = ref;
+ *matchpos = match;
+ }
+ }
+ } else {
+ const BYTE * const match = dictBase + matchIndex;
+
+ if (LZ4_read32(match) == LZ4_read32(ip)) {
+ size_t mlt;
+ const BYTE *vLimit = ip
+ + (dictLimit - matchIndex);
+
+ if (vLimit > iLimit)
+ vLimit = iLimit;
+ mlt = LZ4_count(ip + MINMATCH,
+ match + MINMATCH, vLimit) + MINMATCH;
+ if ((ip + mlt == vLimit)
+ && (vLimit < iLimit))
+ mlt += LZ4_count(ip + mlt,
+ base + dictLimit,
+ iLimit);
+ if (mlt > ml) {
+ /* virtual matchpos */
+ ml = mlt;
+ *matchpos = base + matchIndex;
}
}
}
- ref -= (size_t)chaintable[(size_t)(ref) & MAXD_MASK];
- }
-
- /* Complete table */
- if (repl) {
- const BYTE *ptr = ip;
- const BYTE *end;
- end = ip + repl - (MINMATCH-1);
- /* Pre-Load */
- while (ptr < end - delta) {
- chaintable[(size_t)(ptr) & MAXD_MASK] = delta;
- ptr++;
- }
- do {
- chaintable[(size_t)(ptr) & MAXD_MASK] = delta;
- /* Head of chain */
- hashtable[HASH_VALUE(ptr)] = (ptr) - base;
- ptr++;
- } while (ptr < end);
- hc4->nexttoupdate = end;
+ matchIndex -= DELTANEXTU16(matchIndex);
}
return (int)ml;
}
-static inline int lz4hc_insertandgetwidermatch(struct lz4hc_data *hc4,
- const u8 *ip, const u8 *startlimit, const u8 *matchlimit, int longest,
- const u8 **matchpos, const u8 **startpos)
+static FORCE_INLINE int LZ4HC_InsertAndGetWiderMatch(
+ LZ4HC_CCtx_internal *hc4,
+ const BYTE * const ip,
+ const BYTE * const iLowLimit,
+ const BYTE * const iHighLimit,
+ int longest,
+ const BYTE **matchpos,
+ const BYTE **startpos,
+ const int maxNbAttempts)
{
- u16 *const chaintable = hc4->chaintable;
- HTYPE *const hashtable = hc4->hashtable;
-#if LZ4_ARCH64
+ U16 * const chainTable = hc4->chainTable;
+ U32 * const HashTable = hc4->hashTable;
const BYTE * const base = hc4->base;
-#else
- const int base = 0;
-#endif
- const u8 *ref;
- int nbattempts = MAX_NB_ATTEMPTS;
- int delta = (int)(ip - startlimit);
+ const U32 dictLimit = hc4->dictLimit;
+ const BYTE * const lowPrefixPtr = base + dictLimit;
+ const U32 lowLimit = (hc4->lowLimit + 64 * KB > (U32)(ip - base))
+ ? hc4->lowLimit
+ : (U32)(ip - base) - (64 * KB - 1);
+ const BYTE * const dictBase = hc4->dictBase;
+ U32 matchIndex;
+ int nbAttempts = maxNbAttempts;
+ int delta = (int)(ip - iLowLimit);
/* First Match */
- lz4hc_insert(hc4, ip);
- ref = hashtable[HASH_VALUE(ip)] + base;
+ LZ4HC_Insert(hc4, ip);
+ matchIndex = HashTable[LZ4HC_hashPtr(ip)];
- while ((ref >= ip - MAX_DISTANCE) && (ref >= hc4->base)
- && (nbattempts)) {
- nbattempts--;
- if (*(startlimit + longest) == *(ref - delta + longest)) {
- if (A32(ref) == A32(ip)) {
- const u8 *reft = ref + MINMATCH;
- const u8 *ipt = ip + MINMATCH;
- const u8 *startt = ip;
+ while ((matchIndex >= lowLimit)
+ && (nbAttempts)) {
+ nbAttempts--;
+ if (matchIndex >= dictLimit) {
+ const BYTE *matchPtr = base + matchIndex;
- while (ipt < matchlimit-(STEPSIZE - 1)) {
- #if LZ4_ARCH64
- u64 diff = A64(reft) ^ A64(ipt);
- #else
- u32 diff = A32(reft) ^ A32(ipt);
- #endif
+ if (*(iLowLimit + longest)
+ == *(matchPtr - delta + longest)) {
+ if (LZ4_read32(matchPtr) == LZ4_read32(ip)) {
+ int mlt = MINMATCH + LZ4_count(
+ ip + MINMATCH,
+ matchPtr + MINMATCH,
+ iHighLimit);
+ int back = 0;
- if (!diff) {
- ipt += STEPSIZE;
- reft += STEPSIZE;
- continue;
+ while ((ip + back > iLowLimit)
+ && (matchPtr + back > lowPrefixPtr)
+ && (ip[back - 1] == matchPtr[back - 1]))
+ back--;
+
+ mlt -= back;
+
+ if (mlt > longest) {
+ longest = (int)mlt;
+ *matchpos = matchPtr + back;
+ *startpos = ip + back;
}
- ipt += LZ4_NBCOMMONBYTES(diff);
- goto _endcount;
}
- #if LZ4_ARCH64
- if ((ipt < (matchlimit - 3))
- && (A32(reft) == A32(ipt))) {
- ipt += 4;
- reft += 4;
- }
- ipt += 2;
- #endif
- if ((ipt < (matchlimit - 1))
- && (A16(reft) == A16(ipt))) {
- reft += 2;
- }
- if ((ipt < matchlimit) && (*reft == *ipt))
- ipt++;
-_endcount:
- reft = ref;
+ }
+ } else {
+ const BYTE * const matchPtr = dictBase + matchIndex;
- while ((startt > startlimit)
- && (reft > hc4->base)
- && (startt[-1] == reft[-1])) {
- startt--;
- reft--;
- }
+ if (LZ4_read32(matchPtr) == LZ4_read32(ip)) {
+ size_t mlt;
+ int back = 0;
+ const BYTE *vLimit = ip + (dictLimit - matchIndex);
- if ((ipt - startt) > longest) {
- longest = (int)(ipt - startt);
- *matchpos = reft;
- *startpos = startt;
+ if (vLimit > iHighLimit)
+ vLimit = iHighLimit;
+
+ mlt = LZ4_count(ip + MINMATCH,
+ matchPtr + MINMATCH, vLimit) + MINMATCH;
+
+ if ((ip + mlt == vLimit) && (vLimit < iHighLimit))
+ mlt += LZ4_count(ip + mlt, base + dictLimit,
+ iHighLimit);
+ while ((ip + back > iLowLimit)
+ && (matchIndex + back > lowLimit)
+ && (ip[back - 1] == matchPtr[back - 1]))
+ back--;
+
+ mlt -= back;
+
+ if ((int)mlt > longest) {
+ longest = (int)mlt;
+ *matchpos = base + matchIndex + back;
+ *startpos = ip + back;
}
}
}
- ref -= (size_t)chaintable[(size_t)(ref) & MAXD_MASK];
+
+ matchIndex -= DELTANEXTU16(matchIndex);
}
+
return longest;
}
-static inline int lz4_encodesequence(const u8 **ip, u8 **op, const u8 **anchor,
- int ml, const u8 *ref)
+static FORCE_INLINE int LZ4HC_encodeSequence(
+ const BYTE **ip,
+ BYTE **op,
+ const BYTE **anchor,
+ int matchLength,
+ const BYTE * const match,
+ limitedOutput_directive limitedOutputBuffer,
+ BYTE *oend)
{
- int length, len;
- u8 *token;
+ int length;
+ BYTE *token;
/* Encode Literal length */
length = (int)(*ip - *anchor);
token = (*op)++;
+
+ if ((limitedOutputBuffer)
+ && ((*op + (length>>8)
+ + length + (2 + 1 + LASTLITERALS)) > oend)) {
+ /* Check output limit */
+ return 1;
+ }
if (length >= (int)RUN_MASK) {
- *token = (RUN_MASK << ML_BITS);
+ int len;
+
+ *token = (RUN_MASK<<ML_BITS);
len = length - RUN_MASK;
for (; len > 254 ; len -= 255)
*(*op)++ = 255;
- *(*op)++ = (u8)len;
+ *(*op)++ = (BYTE)len;
} else
- *token = (length << ML_BITS);
+ *token = (BYTE)(length<<ML_BITS);
/* Copy Literals */
- LZ4_BLINDCOPY(*anchor, *op, length);
+ LZ4_wildCopy(*op, *anchor, (*op) + length);
+ *op += length;
/* Encode Offset */
- LZ4_WRITE_LITTLEENDIAN_16(*op, (u16)(*ip - ref));
+ LZ4_writeLE16(*op, (U16)(*ip - match));
+ *op += 2;
/* Encode MatchLength */
- len = (int)(ml - MINMATCH);
- if (len >= (int)ML_MASK) {
+ length = (int)(matchLength - MINMATCH);
+
+ if ((limitedOutputBuffer)
+ && (*op + (length>>8)
+ + (1 + LASTLITERALS) > oend)) {
+ /* Check output limit */
+ return 1;
+ }
+
+ if (length >= (int)ML_MASK) {
*token += ML_MASK;
- len -= ML_MASK;
- for (; len > 509 ; len -= 510) {
+ length -= ML_MASK;
+
+ for (; length > 509 ; length -= 510) {
*(*op)++ = 255;
*(*op)++ = 255;
}
- if (len > 254) {
- len -= 255;
+
+ if (length > 254) {
+ length -= 255;
*(*op)++ = 255;
}
- *(*op)++ = (u8)len;
+
+ *(*op)++ = (BYTE)length;
} else
- *token += len;
+ *token += (BYTE)(length);
/* Prepare next loop */
- *ip += ml;
+ *ip += matchLength;
*anchor = *ip;
return 0;
}
-static int lz4_compresshcctx(struct lz4hc_data *ctx,
- const char *source,
- char *dest,
- int isize)
+static int LZ4HC_compress_generic(
+ LZ4HC_CCtx_internal *const ctx,
+ const char * const source,
+ char * const dest,
+ int const inputSize,
+ int const maxOutputSize,
+ int compressionLevel,
+ limitedOutput_directive limit
+ )
{
- const u8 *ip = (const u8 *)source;
- const u8 *anchor = ip;
- const u8 *const iend = ip + isize;
- const u8 *const mflimit = iend - MFLIMIT;
- const u8 *const matchlimit = (iend - LASTLITERALS);
+ const BYTE *ip = (const BYTE *) source;
+ const BYTE *anchor = ip;
+ const BYTE * const iend = ip + inputSize;
+ const BYTE * const mflimit = iend - MFLIMIT;
+ const BYTE * const matchlimit = (iend - LASTLITERALS);
- u8 *op = (u8 *)dest;
+ BYTE *op = (BYTE *) dest;
+ BYTE * const oend = op + maxOutputSize;
+ unsigned int maxNbAttempts;
int ml, ml2, ml3, ml0;
- const u8 *ref = NULL;
- const u8 *start2 = NULL;
- const u8 *ref2 = NULL;
- const u8 *start3 = NULL;
- const u8 *ref3 = NULL;
- const u8 *start0;
- const u8 *ref0;
- int lastrun;
+ const BYTE *ref = NULL;
+ const BYTE *start2 = NULL;
+ const BYTE *ref2 = NULL;
+ const BYTE *start3 = NULL;
+ const BYTE *ref3 = NULL;
+ const BYTE *start0;
+ const BYTE *ref0;
+
+ /* init */
+ if (compressionLevel > LZ4HC_MAX_CLEVEL)
+ compressionLevel = LZ4HC_MAX_CLEVEL;
+ if (compressionLevel < 1)
+ compressionLevel = LZ4HC_DEFAULT_CLEVEL;
+ maxNbAttempts = 1 << (compressionLevel - 1);
+ ctx->end += inputSize;
ip++;
/* Main Loop */
while (ip < mflimit) {
- ml = lz4hc_insertandfindbestmatch(ctx, ip, matchlimit, (&ref));
+ ml = LZ4HC_InsertAndFindBestMatch(ctx, ip,
+ matchlimit, (&ref), maxNbAttempts);
if (!ml) {
ip++;
continue;
@@ -351,51 +387,59 @@
start0 = ip;
ref0 = ref;
ml0 = ml;
-_search2:
- if (ip+ml < mflimit)
- ml2 = lz4hc_insertandgetwidermatch(ctx, ip + ml - 2,
- ip + 1, matchlimit, ml, &ref2, &start2);
+
+_Search2:
+ if (ip + ml < mflimit)
+ ml2 = LZ4HC_InsertAndGetWiderMatch(ctx,
+ ip + ml - 2, ip + 0,
+ matchlimit, ml, &ref2,
+ &start2, maxNbAttempts);
else
ml2 = ml;
- /* No better match */
+
if (ml2 == ml) {
- lz4_encodesequence(&ip, &op, &anchor, ml, ref);
+ /* No better match */
+ if (LZ4HC_encodeSequence(&ip, &op,
+ &anchor, ml, ref, limit, oend))
+ return 0;
continue;
}
if (start0 < ip) {
- /* empirical */
if (start2 < ip + ml0) {
+ /* empirical */
ip = start0;
ref = ref0;
ml = ml0;
}
}
- /*
- * Here, start0==ip
- * First Match too small : removed
- */
+
+ /* Here, start0 == ip */
if ((start2 - ip) < 3) {
+ /* First Match too small : removed */
ml = ml2;
ip = start2;
ref = ref2;
- goto _search2;
+ goto _Search2;
}
-_search3:
+_Search3:
/*
- * Currently we have :
- * ml2 > ml1, and
- * ip1+3 <= ip2 (usually < ip1+ml1)
- */
+ * Currently we have :
+ * ml2 > ml1, and
+ * ip1 + 3 <= ip2 (usually < ip1 + ml1)
+ */
if ((start2 - ip) < OPTIMAL_ML) {
int correction;
int new_ml = ml;
+
if (new_ml > OPTIMAL_ML)
new_ml = OPTIMAL_ML;
if (ip + new_ml > start2 + ml2 - MINMATCH)
new_ml = (int)(start2 - ip) + ml2 - MINMATCH;
+
correction = new_ml - (int)(start2 - ip);
+
if (correction > 0) {
start2 += correction;
ref2 += correction;
@@ -403,39 +447,44 @@
}
}
/*
- * Now, we have start2 = ip+new_ml,
- * with new_ml=min(ml, OPTIMAL_ML=18)
+ * Now, we have start2 = ip + new_ml,
+ * with new_ml = min(ml, OPTIMAL_ML = 18)
*/
+
if (start2 + ml2 < mflimit)
- ml3 = lz4hc_insertandgetwidermatch(ctx,
- start2 + ml2 - 3, start2, matchlimit,
- ml2, &ref3, &start3);
+ ml3 = LZ4HC_InsertAndGetWiderMatch(ctx,
+ start2 + ml2 - 3, start2,
+ matchlimit, ml2, &ref3, &start3,
+ maxNbAttempts);
else
ml3 = ml2;
- /* No better match : 2 sequences to encode */
if (ml3 == ml2) {
+ /* No better match : 2 sequences to encode */
/* ip & ref are known; Now for ml */
- if (start2 < ip+ml)
+ if (start2 < ip + ml)
ml = (int)(start2 - ip);
-
/* Now, encode 2 sequences */
- lz4_encodesequence(&ip, &op, &anchor, ml, ref);
+ if (LZ4HC_encodeSequence(&ip, &op, &anchor,
+ ml, ref, limit, oend))
+ return 0;
ip = start2;
- lz4_encodesequence(&ip, &op, &anchor, ml2, ref2);
+ if (LZ4HC_encodeSequence(&ip, &op, &anchor,
+ ml2, ref2, limit, oend))
+ return 0;
continue;
}
- /* Not enough space for match 2 : remove it */
if (start3 < ip + ml + 3) {
- /*
- * can write Seq1 immediately ==> Seq2 is removed,
- * so Seq3 becomes Seq1
- */
+ /* Not enough space for match 2 : remove it */
if (start3 >= (ip + ml)) {
+ /* can write Seq1 immediately
+ * ==> Seq2 is removed,
+ * so Seq3 becomes Seq1
+ */
if (start2 < ip + ml) {
- int correction =
- (int)(ip + ml - start2);
+ int correction = (int)(ip + ml - start2);
+
start2 += correction;
ref2 += correction;
ml2 -= correction;
@@ -446,35 +495,38 @@
}
}
- lz4_encodesequence(&ip, &op, &anchor, ml, ref);
- ip = start3;
+ if (LZ4HC_encodeSequence(&ip, &op, &anchor,
+ ml, ref, limit, oend))
+ return 0;
+ ip = start3;
ref = ref3;
- ml = ml3;
+ ml = ml3;
start0 = start2;
ref0 = ref2;
ml0 = ml2;
- goto _search2;
+ goto _Search2;
}
start2 = start3;
ref2 = ref3;
ml2 = ml3;
- goto _search3;
+ goto _Search3;
}
/*
- * OK, now we have 3 ascending matches; let's write at least
- * the first one ip & ref are known; Now for ml
- */
+ * OK, now we have 3 ascending matches;
+ * let's write at least the first one
+ * ip & ref are known; Now for ml
+ */
if (start2 < ip + ml) {
if ((start2 - ip) < (int)ML_MASK) {
int correction;
+
if (ml > OPTIMAL_ML)
ml = OPTIMAL_ML;
if (ip + ml > start2 + ml2 - MINMATCH)
- ml = (int)(start2 - ip) + ml2
- - MINMATCH;
+ ml = (int)(start2 - ip) + ml2 - MINMATCH;
correction = ml - (int)(start2 - ip);
if (correction > 0) {
start2 += correction;
@@ -484,7 +536,9 @@
} else
ml = (int)(start2 - ip);
}
- lz4_encodesequence(&ip, &op, &anchor, ml, ref);
+ if (LZ4HC_encodeSequence(&ip, &op, &anchor, ml,
+ ref, limit, oend))
+ return 0;
ip = start2;
ref = ref2;
@@ -494,46 +548,245 @@
ref2 = ref3;
ml2 = ml3;
- goto _search3;
+ goto _Search3;
}
/* Encode Last Literals */
- lastrun = (int)(iend - anchor);
- if (lastrun >= (int)RUN_MASK) {
- *op++ = (RUN_MASK << ML_BITS);
- lastrun -= RUN_MASK;
- for (; lastrun > 254 ; lastrun -= 255)
- *op++ = 255;
- *op++ = (u8) lastrun;
- } else
- *op++ = (lastrun << ML_BITS);
- memcpy(op, anchor, iend - anchor);
- op += iend - anchor;
+ {
+ int lastRun = (int)(iend - anchor);
+
+ if ((limit)
+ && (((char *)op - dest) + lastRun + 1
+ + ((lastRun + 255 - RUN_MASK)/255)
+ > (U32)maxOutputSize)) {
+ /* Check output limit */
+ return 0;
+ }
+ if (lastRun >= (int)RUN_MASK) {
+ *op++ = (RUN_MASK<<ML_BITS);
+ lastRun -= RUN_MASK;
+ for (; lastRun > 254 ; lastRun -= 255)
+ *op++ = 255;
+ *op++ = (BYTE) lastRun;
+ } else
+ *op++ = (BYTE)(lastRun<<ML_BITS);
+ memcpy(op, anchor, iend - anchor);
+ op += iend - anchor;
+ }
+
/* End */
return (int) (((char *)op) - dest);
}
-int lz4hc_compress(const unsigned char *src, size_t src_len,
- unsigned char *dst, size_t *dst_len, void *wrkmem)
+static int LZ4_compress_HC_extStateHC(
+ void *state,
+ const char *src,
+ char *dst,
+ int srcSize,
+ int maxDstSize,
+ int compressionLevel)
{
- int ret = -1;
- int out_len = 0;
+ LZ4HC_CCtx_internal *ctx = &((LZ4_streamHC_t *)state)->internal_donotuse;
- struct lz4hc_data *hc4 = (struct lz4hc_data *)wrkmem;
- lz4hc_init(hc4, (const u8 *)src);
- out_len = lz4_compresshcctx((struct lz4hc_data *)hc4, (const u8 *)src,
- (char *)dst, (int)src_len);
+ if (((size_t)(state)&(sizeof(void *) - 1)) != 0) {
+ /* Error : state is not aligned
+ * for pointers (32 or 64 bits)
+ */
+ return 0;
+ }
- if (out_len < 0)
- goto exit;
+ LZ4HC_init(ctx, (const BYTE *)src);
- *dst_len = out_len;
- return 0;
+ if (maxDstSize < LZ4_compressBound(srcSize))
+ return LZ4HC_compress_generic(ctx, src, dst,
+ srcSize, maxDstSize, compressionLevel, limitedOutput);
+ else
+ return LZ4HC_compress_generic(ctx, src, dst,
+ srcSize, maxDstSize, compressionLevel, noLimit);
+}
-exit:
- return ret;
+int LZ4_compress_HC(const char *src, char *dst, int srcSize,
+ int maxDstSize, int compressionLevel, void *wrkmem)
+{
+ return LZ4_compress_HC_extStateHC(wrkmem, src, dst,
+ srcSize, maxDstSize, compressionLevel);
+}
+EXPORT_SYMBOL(LZ4_compress_HC);
+
+/**************************************
+ * Streaming Functions
+ **************************************/
+void LZ4_resetStreamHC(LZ4_streamHC_t *LZ4_streamHCPtr, int compressionLevel)
+{
+ LZ4_streamHCPtr->internal_donotuse.base = NULL;
+ LZ4_streamHCPtr->internal_donotuse.compressionLevel = (unsigned int)compressionLevel;
+}
+
+int LZ4_loadDictHC(LZ4_streamHC_t *LZ4_streamHCPtr,
+ const char *dictionary,
+ int dictSize)
+{
+ LZ4HC_CCtx_internal *ctxPtr = &LZ4_streamHCPtr->internal_donotuse;
+
+ if (dictSize > 64 * KB) {
+ dictionary += dictSize - 64 * KB;
+ dictSize = 64 * KB;
+ }
+ LZ4HC_init(ctxPtr, (const BYTE *)dictionary);
+ if (dictSize >= 4)
+ LZ4HC_Insert(ctxPtr, (const BYTE *)dictionary + (dictSize - 3));
+ ctxPtr->end = (const BYTE *)dictionary + dictSize;
+ return dictSize;
+}
+EXPORT_SYMBOL(LZ4_loadDictHC);
+
+/* compression */
+
+static void LZ4HC_setExternalDict(
+ LZ4HC_CCtx_internal *ctxPtr,
+ const BYTE *newBlock)
+{
+ if (ctxPtr->end >= ctxPtr->base + 4) {
+ /* Referencing remaining dictionary content */
+ LZ4HC_Insert(ctxPtr, ctxPtr->end - 3);
+ }
+
+ /*
+ * Only one memory segment for extDict,
+ * so any previous extDict is lost at this stage
+ */
+ ctxPtr->lowLimit = ctxPtr->dictLimit;
+ ctxPtr->dictLimit = (U32)(ctxPtr->end - ctxPtr->base);
+ ctxPtr->dictBase = ctxPtr->base;
+ ctxPtr->base = newBlock - ctxPtr->dictLimit;
+ ctxPtr->end = newBlock;
+ /* match referencing will resume from there */
+ ctxPtr->nextToUpdate = ctxPtr->dictLimit;
+}
+EXPORT_SYMBOL(LZ4HC_setExternalDict);
+
+static int LZ4_compressHC_continue_generic(
+ LZ4_streamHC_t *LZ4_streamHCPtr,
+ const char *source,
+ char *dest,
+ int inputSize,
+ int maxOutputSize,
+ limitedOutput_directive limit)
+{
+ LZ4HC_CCtx_internal *ctxPtr = &LZ4_streamHCPtr->internal_donotuse;
+
+ /* auto - init if forgotten */
+ if (ctxPtr->base == NULL)
+ LZ4HC_init(ctxPtr, (const BYTE *) source);
+
+ /* Check overflow */
+ if ((size_t)(ctxPtr->end - ctxPtr->base) > 2 * GB) {
+ size_t dictSize = (size_t)(ctxPtr->end - ctxPtr->base)
+ - ctxPtr->dictLimit;
+ if (dictSize > 64 * KB)
+ dictSize = 64 * KB;
+ LZ4_loadDictHC(LZ4_streamHCPtr,
+ (const char *)(ctxPtr->end) - dictSize, (int)dictSize);
+ }
+
+ /* Check if blocks follow each other */
+ if ((const BYTE *)source != ctxPtr->end)
+ LZ4HC_setExternalDict(ctxPtr, (const BYTE *)source);
+
+ /* Check overlapping input/dictionary space */
+ {
+ const BYTE *sourceEnd = (const BYTE *) source + inputSize;
+ const BYTE * const dictBegin = ctxPtr->dictBase + ctxPtr->lowLimit;
+ const BYTE * const dictEnd = ctxPtr->dictBase + ctxPtr->dictLimit;
+
+ if ((sourceEnd > dictBegin)
+ && ((const BYTE *)source < dictEnd)) {
+ if (sourceEnd > dictEnd)
+ sourceEnd = dictEnd;
+ ctxPtr->lowLimit = (U32)(sourceEnd - ctxPtr->dictBase);
+
+ if (ctxPtr->dictLimit - ctxPtr->lowLimit < 4)
+ ctxPtr->lowLimit = ctxPtr->dictLimit;
+ }
+ }
+
+ return LZ4HC_compress_generic(ctxPtr, source, dest,
+ inputSize, maxOutputSize, ctxPtr->compressionLevel, limit);
+}
+
+int LZ4_compress_HC_continue(
+ LZ4_streamHC_t *LZ4_streamHCPtr,
+ const char *source,
+ char *dest,
+ int inputSize,
+ int maxOutputSize)
+{
+ if (maxOutputSize < LZ4_compressBound(inputSize))
+ return LZ4_compressHC_continue_generic(LZ4_streamHCPtr,
+ source, dest, inputSize, maxOutputSize, limitedOutput);
+ else
+ return LZ4_compressHC_continue_generic(LZ4_streamHCPtr,
+ source, dest, inputSize, maxOutputSize, noLimit);
+}
+EXPORT_SYMBOL(LZ4_compress_HC_continue);
+
+/* dictionary saving */
+
+int LZ4_saveDictHC(
+ LZ4_streamHC_t *LZ4_streamHCPtr,
+ char *safeBuffer,
+ int dictSize)
+{
+ LZ4HC_CCtx_internal *const streamPtr = &LZ4_streamHCPtr->internal_donotuse;
+ int const prefixSize = (int)(streamPtr->end
+ - (streamPtr->base + streamPtr->dictLimit));
+
+ if (dictSize > 64 * KB)
+ dictSize = 64 * KB;
+ if (dictSize < 4)
+ dictSize = 0;
+ if (dictSize > prefixSize)
+ dictSize = prefixSize;
+
+ memmove(safeBuffer, streamPtr->end - dictSize, dictSize);
+
+ {
+ U32 const endIndex = (U32)(streamPtr->end - streamPtr->base);
+
+ streamPtr->end = (const BYTE *)safeBuffer + dictSize;
+ streamPtr->base = streamPtr->end - endIndex;
+ streamPtr->dictLimit = endIndex - dictSize;
+ streamPtr->lowLimit = endIndex - dictSize;
+
+ if (streamPtr->nextToUpdate < streamPtr->dictLimit)
+ streamPtr->nextToUpdate = streamPtr->dictLimit;
+ }
+ return dictSize;
+}
+EXPORT_SYMBOL(LZ4_saveDictHC);
+
+/*-******************************
+ * For backwards compatibility
+ ********************************/
+int lz4hc_compress(const unsigned char *src, size_t src_len,
+ unsigned char *dst, size_t *dst_len, void *wrkmem)
+{
+ *dst_len = LZ4_compress_HC(src, dst, src_len,
+ *dst_len, LZ4HC_DEFAULT_CLEVEL, wrkmem);
+
+ /*
+ * Prior lz4hc_compress will return -1 in case of error
+ * and 0 on success
+ * while new LZ4_compress_HC
+ * returns 0 in case of error
+ * and the output length on success
+ */
+ if (!*dst_len)
+ return -1;
+ else
+ return 0;
}
EXPORT_SYMBOL(lz4hc_compress);
MODULE_LICENSE("Dual BSD/GPL");
-MODULE_DESCRIPTION("LZ4HC compressor");
+MODULE_DESCRIPTION("LZ4 HC compressor");
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index 76f29ec..771234d 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -452,11 +452,11 @@
: 1UL << (BITS_PER_LONG - IO_TLB_SHIFT);
/*
- * For mappings greater than a page, we limit the stride (and
- * hence alignment) to a page size.
+ * For mappings greater than or equal to a page, we limit the stride
+ * (and hence alignment) to a page size.
*/
nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
- if (size > PAGE_SIZE)
+ if (size >= PAGE_SIZE)
stride = (1 << (PAGE_SHIFT - IO_TLB_SHIFT));
else
stride = 1;
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index 10cd186..7e26aea 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -4315,6 +4315,51 @@
{ },
{ { 0, 1 } },
},
+ {
+ /* Mainly testing JIT + imm64 here. */
+ "JMP_JGE_X: ldimm64 test 1",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_LD_IMM64(R1, 3),
+ BPF_LD_IMM64(R2, 2),
+ BPF_JMP_REG(BPF_JGE, R1, R2, 2),
+ BPF_LD_IMM64(R0, 0xffffffffffffffffUL),
+ BPF_LD_IMM64(R0, 0xeeeeeeeeeeeeeeeeUL),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xeeeeeeeeU } },
+ },
+ {
+ "JMP_JGE_X: ldimm64 test 2",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 0),
+ BPF_LD_IMM64(R1, 3),
+ BPF_LD_IMM64(R2, 2),
+ BPF_JMP_REG(BPF_JGE, R1, R2, 0),
+ BPF_LD_IMM64(R0, 0xffffffffffffffffUL),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 0xffffffffU } },
+ },
+ {
+ "JMP_JGE_X: ldimm64 test 3",
+ .u.insns_int = {
+ BPF_ALU32_IMM(BPF_MOV, R0, 1),
+ BPF_LD_IMM64(R1, 3),
+ BPF_LD_IMM64(R2, 2),
+ BPF_JMP_REG(BPF_JGE, R1, R2, 4),
+ BPF_LD_IMM64(R0, 0xffffffffffffffffUL),
+ BPF_LD_IMM64(R0, 0xeeeeeeeeeeeeeeeeUL),
+ BPF_EXIT_INSN(),
+ },
+ INTERNAL,
+ { },
+ { { 0, 1 } },
+ },
/* BPF_JMP | BPF_JNE | BPF_X */
{
"JMP_JNE_X: if (3 != 2) return 1",
diff --git a/lib/test_user_copy.c b/lib/test_user_copy.c
index 0ecef3e..5e6db6b 100644
--- a/lib/test_user_copy.c
+++ b/lib/test_user_copy.c
@@ -58,7 +58,9 @@
usermem = (char __user *)user_addr;
bad_usermem = (char *)user_addr;
- /* Legitimate usage: none of these should fail. */
+ /*
+ * Legitimate usage: none of these copies should fail.
+ */
ret |= test(copy_from_user(kmem, usermem, PAGE_SIZE),
"legitimate copy_from_user failed");
ret |= test(copy_to_user(usermem, kmem, PAGE_SIZE),
@@ -68,19 +70,33 @@
ret |= test(put_user(value, (unsigned long __user *)usermem),
"legitimate put_user failed");
- /* Invalid usage: none of these should succeed. */
+ /*
+ * Invalid usage: none of these copies should succeed.
+ */
+
+ /* Reject kernel-to-kernel copies through copy_from_user(). */
ret |= test(!copy_from_user(kmem, (char __user *)(kmem + PAGE_SIZE),
PAGE_SIZE),
"illegal all-kernel copy_from_user passed");
+
+#if 0
+ /*
+ * When running with SMAP/PAN/etc, this will Oops the kernel
+ * due to the zeroing of userspace memory on failure. This needs
+ * to be tested in LKDTM instead, since this test module does not
+ * expect to explode.
+ */
ret |= test(!copy_from_user(bad_usermem, (char __user *)kmem,
PAGE_SIZE),
"illegal reversed copy_from_user passed");
+#endif
ret |= test(!copy_to_user((char __user *)kmem, kmem + PAGE_SIZE,
PAGE_SIZE),
"illegal all-kernel copy_to_user passed");
ret |= test(!copy_to_user((char __user *)kmem, bad_usermem,
PAGE_SIZE),
"illegal reversed copy_to_user passed");
+
ret |= test(!get_user(value, (unsigned long __user *)kmem),
"illegal get_user passed");
ret |= test(!put_user(value, (unsigned long __user *)kmem),
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 530e642..6c6f5cc 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1269,6 +1269,16 @@
return ret;
}
+/*
+ * FOLL_FORCE can write to even unwritable pmd's, but only
+ * after we've gone through a COW cycle and they are dirty.
+ */
+static inline bool can_follow_write_pmd(pmd_t pmd, unsigned int flags)
+{
+ return pmd_write(pmd) ||
+ ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pmd_dirty(pmd));
+}
+
struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
unsigned long addr,
pmd_t *pmd,
@@ -1279,7 +1289,7 @@
assert_spin_locked(pmd_lockptr(mm, pmd));
- if (flags & FOLL_WRITE && !pmd_write(*pmd))
+ if (flags & FOLL_WRITE && !can_follow_write_pmd(*pmd, flags))
goto out;
/* Avoid dumping huge zero page */
@@ -1353,8 +1363,11 @@
*/
if (unlikely(pmd_trans_migrating(*pmdp))) {
page = pmd_page(*pmdp);
+ if (!get_page_unless_zero(page))
+ goto out_unlock;
spin_unlock(ptl);
wait_on_page_locked(page);
+ put_page(page);
goto out;
}
@@ -1386,8 +1399,11 @@
/* Migration could have started since the pmd_trans_migrating check */
if (!page_locked) {
+ if (!get_page_unless_zero(page))
+ goto out_unlock;
spin_unlock(ptl);
wait_on_page_locked(page);
+ put_page(page);
page_nid = -1;
goto out;
}
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index ea11123..7294301 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -4362,6 +4362,7 @@
{
struct page *page = NULL;
spinlock_t *ptl;
+ pte_t pte;
retry:
ptl = pmd_lockptr(mm, pmd);
spin_lock(ptl);
@@ -4371,12 +4372,13 @@
*/
if (!pmd_huge(*pmd))
goto out;
- if (pmd_present(*pmd)) {
+ pte = huge_ptep_get((pte_t *)pmd);
+ if (pte_present(pte)) {
page = pmd_page(*pmd) + ((address & ~PMD_MASK) >> PAGE_SHIFT);
if (flags & FOLL_GET)
get_page(page);
} else {
- if (is_hugetlb_entry_migration(huge_ptep_get((pte_t *)pmd))) {
+ if (is_hugetlb_entry_migration(pte)) {
spin_unlock(ptl);
__migration_entry_wait(mm, (pte_t *)pmd, ptl);
goto retry;
diff --git a/mm/internal.h b/mm/internal.h
index c08dd95..600486e 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -452,6 +452,7 @@
#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
void try_to_unmap_flush(void);
void try_to_unmap_flush_dirty(void);
+void flush_tlb_batched_pending(struct mm_struct *mm);
#else
static inline void try_to_unmap_flush(void)
{
@@ -459,6 +460,8 @@
static inline void try_to_unmap_flush_dirty(void)
{
}
-
+static inline void flush_tlb_batched_pending(struct mm_struct *mm)
+{
+}
#endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */
#endif /* __MM_INTERNAL_H */
diff --git a/mm/kasan/report.c b/mm/kasan/report.c
index 7b1ff9d..2270122 100644
--- a/mm/kasan/report.c
+++ b/mm/kasan/report.c
@@ -13,6 +13,7 @@
*
*/
+#include <linux/ftrace.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/printk.h>
@@ -296,6 +297,8 @@
if (likely(!kasan_report_enabled()))
return;
+ disable_trace_on_warning();
+
info.access_addr = (void *)addr;
info.access_size = size;
info.is_write = is_write;
diff --git a/mm/list_lru.c b/mm/list_lru.c
index 5d8dffd..786176b 100644
--- a/mm/list_lru.c
+++ b/mm/list_lru.c
@@ -117,6 +117,7 @@
l = list_lru_from_kmem(nlru, item);
list_add_tail(item, &l->list);
l->nr_items++;
+ nlru->nr_items++;
spin_unlock(&nlru->lock);
return true;
}
@@ -136,6 +137,7 @@
l = list_lru_from_kmem(nlru, item);
list_del_init(item);
l->nr_items--;
+ nlru->nr_items--;
spin_unlock(&nlru->lock);
return true;
}
@@ -183,15 +185,10 @@
unsigned long list_lru_count_node(struct list_lru *lru, int nid)
{
- long count = 0;
- int memcg_idx;
+ struct list_lru_node *nlru;
- count += __list_lru_count_one(lru, nid, -1);
- if (list_lru_memcg_aware(lru)) {
- for_each_memcg_cache_index(memcg_idx)
- count += __list_lru_count_one(lru, nid, memcg_idx);
- }
- return count;
+ nlru = &lru->node[nid];
+ return nlru->nr_items;
}
EXPORT_SYMBOL_GPL(list_lru_count_node);
@@ -226,6 +223,7 @@
assert_spin_locked(&nlru->lock);
case LRU_REMOVED:
isolated++;
+ nlru->nr_items--;
/*
* If the lru lock has been dropped, our list
* traversal is now invalid and so we have to
diff --git a/mm/memblock.c b/mm/memblock.c
index 89e74fa..351a484 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -1688,6 +1688,30 @@
}
}
+extern unsigned long __init_memblock
+memblock_reserved_memory_within(phys_addr_t start_addr, phys_addr_t end_addr)
+{
+ struct memblock_type *type = &memblock.reserved;
+ unsigned long size = 0;
+ int idx;
+
+ for (idx = 0; idx < type->cnt; idx++) {
+ struct memblock_region *rgn = &type->regions[idx];
+ phys_addr_t start, end;
+
+ if (rgn->base + rgn->size < start_addr)
+ continue;
+ if (rgn->base > end_addr)
+ continue;
+
+ start = rgn->base;
+ end = start + rgn->size;
+ size += end - start;
+ }
+
+ return size;
+}
+
void __init_memblock __memblock_dump_all(void)
{
pr_info("MEMBLOCK configuration:\n");
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index b3becd9..14c1c5c 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -1208,7 +1208,10 @@
* page_remove_rmap() in try_to_unmap_one(). So to determine page status
* correctly, we save a copy of the page flags at this time.
*/
- page_flags = p->flags;
+ if (PageHuge(p))
+ page_flags = hpage->flags;
+ else
+ page_flags = p->flags;
/*
* unpoison always clear PG_hwpoison inside page lock
@@ -1619,12 +1622,8 @@
if (ret) {
pr_info("soft offline: %#lx: migration failed %d, type %lx\n",
pfn, ret, page->flags);
- /*
- * We know that soft_offline_huge_page() tries to migrate
- * only one hugepage pointed to by hpage, so we need not
- * run through the pagelist here.
- */
- putback_active_hugepage(hpage);
+ if (!list_empty(&pagelist))
+ putback_movable_pages(&pagelist);
if (ret > 0)
ret = -EIO;
} else {
diff --git a/mm/memory.c b/mm/memory.c
index 8689df9..d6e10c8 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1127,6 +1127,7 @@
init_rss_vec(rss);
start_pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
pte = start_pte;
+ flush_tlb_batched_pending(mm);
arch_enter_lazy_mmu_mode();
do {
pte_t ptent = *pte;
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index d20d838..5acc1f9 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -895,11 +895,6 @@
*policy |= (pol->flags & MPOL_MODE_FLAGS);
}
- if (vma) {
- up_read(¤t->mm->mmap_sem);
- vma = NULL;
- }
-
err = 0;
if (nmask) {
if (mpol_store_user_nodemask(pol)) {
diff --git a/mm/migrate.c b/mm/migrate.c
index 9a5ccfc7..d600252 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -39,6 +39,7 @@
#include <linux/balloon_compaction.h>
#include <linux/mmu_notifier.h>
#include <linux/page_idle.h>
+#include <linux/ptrace.h>
#include <asm/tlbflush.h>
@@ -1642,7 +1643,6 @@
const int __user *, nodes,
int __user *, status, int, flags)
{
- const struct cred *cred = current_cred(), *tcred;
struct task_struct *task;
struct mm_struct *mm;
int err;
@@ -1666,14 +1666,9 @@
/*
* Check if this process has the right to modify the specified
- * process. The right exists if the process has administrative
- * capabilities, superuser privileges or the same
- * userid as the target process.
+ * process. Use the regular "ptrace_may_access()" checks.
*/
- tcred = __task_cred(task);
- if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
- !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
- !capable(CAP_SYS_NICE)) {
+ if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
rcu_read_unlock();
err = -EPERM;
goto out;
diff --git a/mm/mlock.c b/mm/mlock.c
index d843bc9..206e86b 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -277,7 +277,7 @@
{
int i;
int nr = pagevec_count(pvec);
- int delta_munlocked;
+ int delta_munlocked = -nr;
struct pagevec pvec_putback;
int pgrescued = 0;
@@ -297,6 +297,8 @@
continue;
else
__munlock_isolation_failed(page);
+ } else {
+ delta_munlocked++;
}
/*
@@ -308,7 +310,6 @@
pagevec_add(&pvec_putback, pvec->pages[i]);
pvec->pages[i] = NULL;
}
- delta_munlocked = -nr + pagevec_count(&pvec_putback);
__mod_zone_page_state(zone, NR_MLOCK, delta_munlocked);
spin_unlock_irq(&zone->lru_lock);
diff --git a/mm/mmap.c b/mm/mmap.c
index 787f179..4318734 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2195,11 +2195,25 @@
if (!(vma->vm_flags & VM_GROWSUP))
return -EFAULT;
- /* Guard against wrapping around to address 0. */
+ /* Guard against exceeding limits of the address space. */
address &= PAGE_MASK;
- address += PAGE_SIZE;
- if (!address)
+ if (address >= (TASK_SIZE & PAGE_MASK))
return -ENOMEM;
+ address += PAGE_SIZE;
+
+ /* Enforce stack_guard_gap */
+ gap_addr = address + stack_guard_gap;
+
+ /* Guard against overflow */
+ if (gap_addr < address || gap_addr > TASK_SIZE)
+ gap_addr = TASK_SIZE;
+
+ next = vma->vm_next;
+ if (next && next->vm_start < gap_addr) {
+ if (!(next->vm_flags & VM_GROWSUP))
+ return -ENOMEM;
+ /* Check that both stack segments have the same anon_vma? */
+ }
/* Enforce stack_guard_gap */
gap_addr = address + stack_guard_gap;
diff --git a/mm/mprotect.c b/mm/mprotect.c
index bddb2c7..b8849a3 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -72,6 +72,7 @@
if (!pte)
return 0;
+ flush_tlb_batched_pending(vma->vm_mm);
arch_enter_lazy_mmu_mode();
do {
oldpte = *pte;
diff --git a/mm/mremap.c b/mm/mremap.c
index c25bc62..fe7b7f6 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -135,6 +135,7 @@
new_ptl = pte_lockptr(mm, new_pmd);
if (new_ptl != old_ptl)
spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
+ flush_tlb_batched_pending(vma->vm_mm);
arch_enter_lazy_mmu_mode();
for (; old_addr < old_end; old_pte++, old_addr += PAGE_SIZE,
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index e98076f..872bd18 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -274,6 +274,26 @@
#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
static inline void reset_deferred_meminit(pg_data_t *pgdat)
{
+ unsigned long max_initialise;
+ unsigned long reserved_lowmem;
+
+ /*
+ * Initialise at least 2G of a node but also take into account that
+ * two large system hashes that can take up 1GB for 0.25TB/node.
+ */
+ max_initialise = max(2UL << (30 - PAGE_SHIFT),
+ (pgdat->node_spanned_pages >> 8));
+
+ /*
+ * Compensate the all the memblock reservations (e.g. crash kernel)
+ * from the initial estimation to make sure we will initialize enough
+ * memory to boot.
+ */
+ reserved_lowmem = memblock_reserved_memory_within(pgdat->node_start_pfn,
+ pgdat->node_start_pfn + max_initialise);
+ max_initialise += reserved_lowmem;
+
+ pgdat->static_init_size = min(max_initialise, pgdat->node_spanned_pages);
pgdat->first_deferred_pfn = ULONG_MAX;
}
@@ -307,10 +327,9 @@
/* Always populate low zones for address-contrained allocations */
if (zone_end < pgdat_end_pfn(pgdat))
return true;
-
/* Initialise at least 2G of the highest zone */
(*nr_initialised)++;
- if (*nr_initialised > (2UL << (30 - PAGE_SHIFT)) &&
+ if ((*nr_initialised > pgdat->static_init_size) &&
(pfn & (PAGES_PER_SECTION - 1)) == 0) {
pgdat->first_deferred_pfn = pfn;
return false;
@@ -1526,14 +1545,14 @@
#endif
for (page = start_page; page <= end_page;) {
- /* Make sure we are not inadvertently changing nodes */
- VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
-
if (!pfn_valid_within(page_to_pfn(page))) {
page++;
continue;
}
+ /* Make sure we are not inadvertently changing nodes */
+ VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
+
if (!PageBuddy(page)) {
page++;
continue;
@@ -5455,7 +5474,6 @@
/* pg_data_t should be reset to zero when it's allocated */
WARN_ON(pgdat->nr_zones || pgdat->classzone_idx);
- reset_deferred_meminit(pgdat);
pgdat->node_id = nid;
pgdat->node_start_pfn = node_start_pfn;
#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
@@ -5474,6 +5492,7 @@
(unsigned long)pgdat->node_mem_map);
#endif
+ reset_deferred_meminit(pgdat);
free_area_init_core(pgdat);
}
@@ -5940,8 +5959,8 @@
}
if (pages && s)
- pr_info("Freeing %s memory: %ldK (%p - %p)\n",
- s, pages << (PAGE_SHIFT - 10), start, end);
+ pr_info("Freeing %s memory: %ldK\n",
+ s, pages << (PAGE_SHIFT - 10));
return pages;
}
@@ -6897,7 +6916,7 @@
/* Make sure the range is really isolated. */
if (test_pages_isolated(outer_start, end, false)) {
- pr_info("%s: [%lx, %lx) PFNs busy\n",
+ pr_info_ratelimited("%s: [%lx, %lx) PFNs busy\n",
__func__, outer_start, end);
ret = -EBUSY;
goto done;
diff --git a/mm/percpu.c b/mm/percpu.c
index 1f376bc..ef6353f 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -1012,8 +1012,11 @@
mutex_unlock(&pcpu_alloc_mutex);
}
- if (chunk != pcpu_reserved_chunk)
+ if (chunk != pcpu_reserved_chunk) {
+ spin_lock_irqsave(&pcpu_lock, flags);
pcpu_nr_empty_pop_pages -= occ_pages;
+ spin_unlock_irqrestore(&pcpu_lock, flags);
+ }
if (pcpu_nr_empty_pop_pages < PCPU_EMPTY_POP_PAGES_LOW)
pcpu_schedule_balance_work();
diff --git a/mm/rmap.c b/mm/rmap.c
index 59489b3..cbaf273 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -649,6 +649,13 @@
tlb_ubc->flush_required = true;
/*
+ * Ensure compiler does not re-order the setting of tlb_flush_batched
+ * before the PTE is cleared.
+ */
+ barrier();
+ mm->tlb_flush_batched = true;
+
+ /*
* If the PTE was dirty then it's best to assume it's writable. The
* caller must use try_to_unmap_flush_dirty() or try_to_unmap_flush()
* before the page is queued for IO.
@@ -675,6 +682,35 @@
return should_defer;
}
+
+/*
+ * Reclaim unmaps pages under the PTL but do not flush the TLB prior to
+ * releasing the PTL if TLB flushes are batched. It's possible for a parallel
+ * operation such as mprotect or munmap to race between reclaim unmapping
+ * the page and flushing the page. If this race occurs, it potentially allows
+ * access to data via a stale TLB entry. Tracking all mm's that have TLB
+ * batching in flight would be expensive during reclaim so instead track
+ * whether TLB batching occurred in the past and if so then do a flush here
+ * if required. This will cost one additional flush per reclaim cycle paid
+ * by the first operation at risk such as mprotect and mumap.
+ *
+ * This must be called under the PTL so that an access to tlb_flush_batched
+ * that is potentially a "reclaim vs mprotect/munmap/etc" race will synchronise
+ * via the PTL.
+ */
+void flush_tlb_batched_pending(struct mm_struct *mm)
+{
+ if (mm->tlb_flush_batched) {
+ flush_tlb_mm(mm);
+
+ /*
+ * Do not allow the compiler to re-order the clearing of
+ * tlb_flush_batched before the tlb is flushed.
+ */
+ barrier();
+ mm->tlb_flush_batched = false;
+ }
+}
#else
static void set_tlb_ubc_flush_pending(struct mm_struct *mm,
struct page *page, bool writable)
diff --git a/mm/slub.c b/mm/slub.c
index 7683bb3..683a692 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -5452,6 +5452,7 @@
char mbuf[64];
char *buf;
struct slab_attribute *attr = to_slab_attr(slab_attrs[i]);
+ ssize_t len;
if (!attr || !attr->store || !attr->show)
continue;
@@ -5476,8 +5477,9 @@
buf = buffer;
}
- attr->show(root_cache, buf);
- attr->store(s, buf, strlen(buf));
+ len = attr->show(root_cache, buf);
+ if (len > 0)
+ attr->store(s, buf, len);
}
if (buffer)
diff --git a/mm/swap_cgroup.c b/mm/swap_cgroup.c
index b5f7f24..09f733b 100644
--- a/mm/swap_cgroup.c
+++ b/mm/swap_cgroup.c
@@ -48,6 +48,9 @@
if (!page)
goto not_enough_page;
ctrl->map[idx] = page;
+
+ if (!(idx % SWAP_CLUSTER_MAX))
+ cond_resched();
}
return 0;
not_enough_page:
@@ -202,6 +205,8 @@
struct page *page = map[i];
if (page)
__free_page(page);
+ if (!(i % SWAP_CLUSTER_MAX))
+ cond_resched();
}
vfree(map);
}
diff --git a/mm/truncate.c b/mm/truncate.c
index 30c2f05..8ca20a9 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -799,7 +799,7 @@
*/
void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to)
{
- int bsize = 1 << inode->i_blkbits;
+ int bsize = i_blocksize(inode);
loff_t rounded_from;
struct page *page;
pgoff_t index;
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 94fecac..5f6e29f 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2676,7 +2676,7 @@
if (!populated_zone(zone))
continue;
- classzone_idx = requested_highidx;
+ classzone_idx = gfp_zone(sc->gfp_mask);
while (!populated_zone(zone->zone_pgdat->node_zones +
classzone_idx))
classzone_idx--;
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index ad8d6e6..5e4199d 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -278,7 +278,8 @@
return 0;
out_free_newdev:
- free_netdev(new_dev);
+ if (new_dev->reg_state == NETREG_UNINITIALIZED)
+ free_netdev(new_dev);
return err;
}
@@ -291,6 +292,10 @@
if (ether_addr_equal(vlan->real_dev_addr, dev->dev_addr))
return;
+ /* vlan continues to inherit address of lower device */
+ if (vlan_dev_inherit_address(vlandev, dev))
+ goto out;
+
/* vlan address was different from the old address and is equal to
* the new address */
if (!ether_addr_equal(vlandev->dev_addr, vlan->real_dev_addr) &&
@@ -303,6 +308,7 @@
!ether_addr_equal(vlandev->dev_addr, dev->dev_addr))
dev_uc_add(dev, vlandev->dev_addr);
+out:
ether_addr_copy(vlan->real_dev_addr, dev->dev_addr);
}
diff --git a/net/8021q/vlan.h b/net/8021q/vlan.h
index 9d010a09..cc15579 100644
--- a/net/8021q/vlan.h
+++ b/net/8021q/vlan.h
@@ -109,6 +109,8 @@
void vlan_setup(struct net_device *dev);
int register_vlan_dev(struct net_device *dev);
void unregister_vlan_dev(struct net_device *dev, struct list_head *head);
+bool vlan_dev_inherit_address(struct net_device *dev,
+ struct net_device *real_dev);
static inline u32 vlan_get_ingress_priority(struct net_device *dev,
u16 vlan_tci)
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index fded865..ca4dc90 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -244,6 +244,17 @@
strncpy(result, vlan_dev_priv(dev)->real_dev->name, 23);
}
+bool vlan_dev_inherit_address(struct net_device *dev,
+ struct net_device *real_dev)
+{
+ if (dev->addr_assign_type != NET_ADDR_STOLEN)
+ return false;
+
+ ether_addr_copy(dev->dev_addr, real_dev->dev_addr);
+ call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
+ return true;
+}
+
static int vlan_dev_open(struct net_device *dev)
{
struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
@@ -254,7 +265,8 @@
!(vlan->flags & VLAN_FLAG_LOOSE_BINDING))
return -ENETDOWN;
- if (!ether_addr_equal(dev->dev_addr, real_dev->dev_addr)) {
+ if (!ether_addr_equal(dev->dev_addr, real_dev->dev_addr) &&
+ !vlan_dev_inherit_address(dev, real_dev)) {
err = dev_uc_add(real_dev, dev->dev_addr);
if (err < 0)
goto out;
@@ -558,8 +570,10 @@
/* ipv6 shared card related stuff */
dev->dev_id = real_dev->dev_id;
- if (is_zero_ether_addr(dev->dev_addr))
- eth_hw_addr_inherit(dev, real_dev);
+ if (is_zero_ether_addr(dev->dev_addr)) {
+ ether_addr_copy(dev->dev_addr, real_dev->dev_addr);
+ dev->addr_assign_type = NET_ADDR_STOLEN;
+ }
if (is_zero_ether_addr(dev->broadcast))
memcpy(dev->broadcast, real_dev->broadcast, dev->addr_len);
diff --git a/net/9p/client.c b/net/9p/client.c
index ea79ee9..f5feac4 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -2101,6 +2101,10 @@
trace_9p_protocol_dump(clnt, req->rc);
goto free_and_error;
}
+ if (rsize < count) {
+ pr_err("bogus RREADDIR count (%d > %d)\n", count, rsize);
+ count = rsize;
+ }
p9_debug(P9_DEBUG_9P, "<<< RREADDIR count %d\n", count);
diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c
index abeccb56..b08d36c 100644
--- a/net/bluetooth/bnep/core.c
+++ b/net/bluetooth/bnep/core.c
@@ -484,16 +484,16 @@
struct net_device *dev = s->dev;
struct sock *sk = s->sock->sk;
struct sk_buff *skb;
- wait_queue_t wait;
+ DEFINE_WAIT_FUNC(wait, woken_wake_function);
BT_DBG("");
set_user_nice(current, -15);
- init_waitqueue_entry(&wait, current);
add_wait_queue(sk_sleep(sk), &wait);
while (1) {
- set_current_state(TASK_INTERRUPTIBLE);
+ /* Ensure session->terminate is updated */
+ smp_mb__before_atomic();
if (atomic_read(&s->terminate))
break;
@@ -515,9 +515,8 @@
break;
netif_wake_queue(dev);
- schedule();
+ wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
}
- __set_current_state(TASK_RUNNING);
remove_wait_queue(sk_sleep(sk), &wait);
/* Cleanup session */
@@ -663,7 +662,7 @@
s = __bnep_get_session(req->dst);
if (s) {
atomic_inc(&s->terminate);
- wake_up_process(s->task);
+ wake_up_interruptible(sk_sleep(s->sock->sk));
} else
err = -ENOENT;
diff --git a/net/bluetooth/cmtp/core.c b/net/bluetooth/cmtp/core.c
index 0117473..77f73bf 100644
--- a/net/bluetooth/cmtp/core.c
+++ b/net/bluetooth/cmtp/core.c
@@ -281,16 +281,16 @@
struct cmtp_session *session = arg;
struct sock *sk = session->sock->sk;
struct sk_buff *skb;
- wait_queue_t wait;
+ DEFINE_WAIT_FUNC(wait, woken_wake_function);
BT_DBG("session %pK", session);
set_user_nice(current, -15);
- init_waitqueue_entry(&wait, current);
add_wait_queue(sk_sleep(sk), &wait);
while (1) {
- set_current_state(TASK_INTERRUPTIBLE);
+ /* Ensure session->terminate is updated */
+ smp_mb__before_atomic();
if (atomic_read(&session->terminate))
break;
@@ -307,9 +307,8 @@
cmtp_process_transmit(session);
- schedule();
+ wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
}
- __set_current_state(TASK_RUNNING);
remove_wait_queue(sk_sleep(sk), &wait);
down_write(&cmtp_session_sem);
@@ -394,7 +393,7 @@
err = cmtp_attach_device(session);
if (err < 0) {
atomic_inc(&session->terminate);
- wake_up_process(session->task);
+ wake_up_interruptible(sk_sleep(session->sock->sk));
up_write(&cmtp_session_sem);
return err;
}
@@ -432,7 +431,11 @@
/* Stop session thread */
atomic_inc(&session->terminate);
- wake_up_process(session->task);
+
+ /* Ensure session->terminate is updated */
+ smp_mb__after_atomic();
+
+ wake_up_interruptible(sk_sleep(session->sock->sk));
} else
err = -ENOENT;
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index 87fd1a0..8402c34 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -1164,7 +1164,8 @@
if (msg->msg_flags & MSG_OOB)
return -EOPNOTSUPP;
- if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE))
+ if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE|
+ MSG_CMSG_COMPAT))
return -EINVAL;
if (len < 4 || len > HCI_MAX_FRAME_SIZE)
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index f02ffe5..f64de56 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -36,6 +36,7 @@
#define VERSION "1.2"
static DECLARE_RWSEM(hidp_session_sem);
+static DECLARE_WAIT_QUEUE_HEAD(hidp_session_wq);
static LIST_HEAD(hidp_session_list);
static unsigned char hidp_keycode[256] = {
@@ -1069,12 +1070,12 @@
* Wake up session thread and notify it to stop. This is asynchronous and
* returns immediately. Call this whenever a runtime error occurs and you want
* the session to stop.
- * Note: wake_up_process() performs any necessary memory-barriers for us.
+ * Note: wake_up_interruptible() performs any necessary memory-barriers for us.
*/
static void hidp_session_terminate(struct hidp_session *session)
{
atomic_inc(&session->terminate);
- wake_up_process(session->task);
+ wake_up_interruptible(&hidp_session_wq);
}
/*
@@ -1181,7 +1182,9 @@
struct sock *ctrl_sk = session->ctrl_sock->sk;
struct sock *intr_sk = session->intr_sock->sk;
struct sk_buff *skb;
+ DEFINE_WAIT_FUNC(wait, woken_wake_function);
+ add_wait_queue(&hidp_session_wq, &wait);
for (;;) {
/*
* This thread can be woken up two ways:
@@ -1189,12 +1192,10 @@
* session->terminate flag and wakes this thread up.
* - Via modifying the socket state of ctrl/intr_sock. This
* thread is woken up by ->sk_state_changed().
- *
- * Note: set_current_state() performs any necessary
- * memory-barriers for us.
*/
- set_current_state(TASK_INTERRUPTIBLE);
+ /* Ensure session->terminate is updated */
+ smp_mb__before_atomic();
if (atomic_read(&session->terminate))
break;
@@ -1228,11 +1229,22 @@
hidp_process_transmit(session, &session->ctrl_transmit,
session->ctrl_sock);
- schedule();
+ wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
}
+ remove_wait_queue(&hidp_session_wq, &wait);
atomic_inc(&session->terminate);
- set_current_state(TASK_RUNNING);
+
+ /* Ensure session->terminate is updated */
+ smp_mb__after_atomic();
+}
+
+static int hidp_session_wake_function(wait_queue_t *wait,
+ unsigned int mode,
+ int sync, void *key)
+{
+ wake_up_interruptible(&hidp_session_wq);
+ return false;
}
/*
@@ -1245,7 +1257,8 @@
static int hidp_session_thread(void *arg)
{
struct hidp_session *session = arg;
- wait_queue_t ctrl_wait, intr_wait;
+ DEFINE_WAIT_FUNC(ctrl_wait, hidp_session_wake_function);
+ DEFINE_WAIT_FUNC(intr_wait, hidp_session_wake_function);
BT_DBG("session %pK", session);
@@ -1255,8 +1268,6 @@
set_user_nice(current, -15);
hidp_set_timer(session);
- init_waitqueue_entry(&ctrl_wait, current);
- init_waitqueue_entry(&intr_wait, current);
add_wait_queue(sk_sleep(session->ctrl_sock->sk), &ctrl_wait);
add_wait_queue(sk_sleep(session->intr_sock->sk), &intr_wait);
/* This memory barrier is paired with wq_has_sleeper(). See
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index bd5937d..f703d46 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -57,7 +57,7 @@
u8 code, u8 ident, u16 dlen, void *data);
static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
void *data);
-static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
+static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size);
static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
@@ -1462,7 +1462,7 @@
set_bit(CONF_REQ_SENT, &chan->conf_state);
l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
- l2cap_build_conf_req(chan, buf), buf);
+ l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
chan->num_conf_req++;
}
@@ -2966,12 +2966,15 @@
return len;
}
-static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
+static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val, size_t size)
{
struct l2cap_conf_opt *opt = *ptr;
BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
+ if (size < L2CAP_CONF_OPT_SIZE + len)
+ return;
+
opt->type = type;
opt->len = len;
@@ -2996,7 +2999,7 @@
*ptr += L2CAP_CONF_OPT_SIZE + len;
}
-static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
+static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan, size_t size)
{
struct l2cap_conf_efs efs;
@@ -3024,7 +3027,7 @@
}
l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
- (unsigned long) &efs);
+ (unsigned long) &efs, size);
}
static void l2cap_ack_timeout(struct work_struct *work)
@@ -3170,11 +3173,12 @@
chan->ack_win = chan->tx_win;
}
-static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
+static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
{
struct l2cap_conf_req *req = data;
struct l2cap_conf_rfc rfc = { .mode = chan->mode };
void *ptr = req->data;
+ void *endptr = data + data_size;
u16 size;
BT_DBG("chan %pK", chan);
@@ -3199,7 +3203,7 @@
done:
if (chan->imtu != L2CAP_DEFAULT_MTU)
- l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu, endptr - ptr);
switch (chan->mode) {
case L2CAP_MODE_BASIC:
@@ -3218,7 +3222,7 @@
rfc.max_pdu_size = 0;
l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
- (unsigned long) &rfc);
+ (unsigned long) &rfc, endptr - ptr);
break;
case L2CAP_MODE_ERTM:
@@ -3238,21 +3242,21 @@
L2CAP_DEFAULT_TX_WINDOW);
l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
- (unsigned long) &rfc);
+ (unsigned long) &rfc, endptr - ptr);
if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
- l2cap_add_opt_efs(&ptr, chan);
+ l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
if (test_bit(FLAG_EXT_CTRL, &chan->flags))
l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
- chan->tx_win);
+ chan->tx_win, endptr - ptr);
if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
if (chan->fcs == L2CAP_FCS_NONE ||
test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
chan->fcs = L2CAP_FCS_NONE;
l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
- chan->fcs);
+ chan->fcs, endptr - ptr);
}
break;
@@ -3270,17 +3274,17 @@
rfc.max_pdu_size = cpu_to_le16(size);
l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
- (unsigned long) &rfc);
+ (unsigned long) &rfc, endptr - ptr);
if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
- l2cap_add_opt_efs(&ptr, chan);
+ l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
if (chan->fcs == L2CAP_FCS_NONE ||
test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
chan->fcs = L2CAP_FCS_NONE;
l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
- chan->fcs);
+ chan->fcs, endptr - ptr);
}
break;
}
@@ -3291,10 +3295,11 @@
return ptr - data;
}
-static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
+static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
{
struct l2cap_conf_rsp *rsp = data;
void *ptr = rsp->data;
+ void *endptr = data + data_size;
void *req = chan->conf_req;
int len = chan->conf_len;
int type, hint, olen;
@@ -3396,7 +3401,7 @@
return -ECONNREFUSED;
l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
- (unsigned long) &rfc);
+ (unsigned long) &rfc, endptr - ptr);
}
if (result == L2CAP_CONF_SUCCESS) {
@@ -3409,7 +3414,7 @@
chan->omtu = mtu;
set_bit(CONF_MTU_DONE, &chan->conf_state);
}
- l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu, endptr - ptr);
if (remote_efs) {
if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
@@ -3423,7 +3428,7 @@
l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
sizeof(efs),
- (unsigned long) &efs);
+ (unsigned long) &efs, endptr - ptr);
} else {
/* Send PENDING Conf Rsp */
result = L2CAP_CONF_PENDING;
@@ -3456,7 +3461,7 @@
set_bit(CONF_MODE_DONE, &chan->conf_state);
l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
- sizeof(rfc), (unsigned long) &rfc);
+ sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
chan->remote_id = efs.id;
@@ -3470,7 +3475,7 @@
le32_to_cpu(efs.sdu_itime);
l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
sizeof(efs),
- (unsigned long) &efs);
+ (unsigned long) &efs, endptr - ptr);
}
break;
@@ -3484,7 +3489,7 @@
set_bit(CONF_MODE_DONE, &chan->conf_state);
l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
- (unsigned long) &rfc);
+ (unsigned long) &rfc, endptr - ptr);
break;
@@ -3506,10 +3511,11 @@
}
static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
- void *data, u16 *result)
+ void *data, size_t size, u16 *result)
{
struct l2cap_conf_req *req = data;
void *ptr = req->data;
+ void *endptr = data + size;
int type, olen;
unsigned long val;
struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
@@ -3527,13 +3533,13 @@
chan->imtu = L2CAP_DEFAULT_MIN_MTU;
} else
chan->imtu = val;
- l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
+ l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu, endptr - ptr);
break;
case L2CAP_CONF_FLUSH_TO:
chan->flush_to = val;
l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
- 2, chan->flush_to);
+ 2, chan->flush_to, endptr - ptr);
break;
case L2CAP_CONF_RFC:
@@ -3547,13 +3553,13 @@
chan->fcs = 0;
l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
- sizeof(rfc), (unsigned long) &rfc);
+ sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
break;
case L2CAP_CONF_EWS:
chan->ack_win = min_t(u16, val, chan->ack_win);
l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
- chan->tx_win);
+ chan->tx_win, endptr - ptr);
break;
case L2CAP_CONF_EFS:
@@ -3566,7 +3572,7 @@
return -ECONNREFUSED;
l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
- (unsigned long) &efs);
+ (unsigned long) &efs, endptr - ptr);
break;
case L2CAP_CONF_FCS:
@@ -3671,7 +3677,7 @@
return;
l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
- l2cap_build_conf_req(chan, buf), buf);
+ l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
chan->num_conf_req++;
}
@@ -3879,7 +3885,7 @@
u8 buf[128];
set_bit(CONF_REQ_SENT, &chan->conf_state);
l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
- l2cap_build_conf_req(chan, buf), buf);
+ l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
chan->num_conf_req++;
}
@@ -3957,7 +3963,7 @@
break;
l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
- l2cap_build_conf_req(chan, req), req);
+ l2cap_build_conf_req(chan, req, sizeof(req)), req);
chan->num_conf_req++;
break;
@@ -4069,7 +4075,7 @@
}
/* Complete config. */
- len = l2cap_parse_conf_req(chan, rsp);
+ len = l2cap_parse_conf_req(chan, rsp, sizeof(rsp));
if (len < 0) {
l2cap_send_disconn_req(chan, ECONNRESET);
goto unlock;
@@ -4103,7 +4109,7 @@
if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
u8 buf[64];
l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
- l2cap_build_conf_req(chan, buf), buf);
+ l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
chan->num_conf_req++;
}
@@ -4163,7 +4169,7 @@
char buf[64];
len = l2cap_parse_conf_rsp(chan, rsp->data, len,
- buf, &result);
+ buf, sizeof(buf), &result);
if (len < 0) {
l2cap_send_disconn_req(chan, ECONNRESET);
goto done;
@@ -4193,7 +4199,7 @@
/* throw out any old stored conf requests */
result = L2CAP_CONF_SUCCESS;
len = l2cap_parse_conf_rsp(chan, rsp->data, len,
- req, &result);
+ req, sizeof(req), &result);
if (len < 0) {
l2cap_send_disconn_req(chan, ECONNRESET);
goto done;
@@ -4771,7 +4777,7 @@
set_bit(CONF_REQ_SENT, &chan->conf_state);
l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
L2CAP_CONF_REQ,
- l2cap_build_conf_req(chan, buf), buf);
+ l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
chan->num_conf_req++;
}
}
@@ -7443,7 +7449,7 @@
set_bit(CONF_REQ_SENT, &chan->conf_state);
l2cap_send_cmd(conn, l2cap_get_ident(conn),
L2CAP_CONF_REQ,
- l2cap_build_conf_req(chan, buf),
+ l2cap_build_conf_req(chan, buf, sizeof(buf)),
buf);
chan->num_conf_req++;
}
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index 6f2c704..b166b3e 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -23,6 +23,7 @@
#include <linux/debugfs.h>
#include <linux/crypto.h>
#include <linux/scatterlist.h>
+#include <crypto/algapi.h>
#include <crypto/b128ops.h>
#include <net/bluetooth/bluetooth.h>
@@ -524,7 +525,7 @@
if (err)
return false;
- return !memcmp(bdaddr->b, hash, 3);
+ return !crypto_memneq(bdaddr->b, hash, 3);
}
int smp_generate_rpa(struct hci_dev *hdev, const u8 irk[16], bdaddr_t *rpa)
@@ -577,7 +578,7 @@
/* This is unlikely, but we need to check that
* we didn't accidentially generate a debug key.
*/
- if (memcmp(smp->local_sk, debug_sk, 32))
+ if (crypto_memneq(smp->local_sk, debug_sk, 32))
break;
}
smp->debug_key = false;
@@ -991,7 +992,7 @@
if (ret)
return SMP_UNSPECIFIED;
- if (memcmp(smp->pcnf, confirm, sizeof(smp->pcnf)) != 0) {
+ if (crypto_memneq(smp->pcnf, confirm, sizeof(smp->pcnf))) {
BT_ERR("Pairing failed (confirmation values mismatch)");
return SMP_CONFIRM_FAILED;
}
@@ -1491,7 +1492,7 @@
smp->rrnd, r, cfm))
return SMP_UNSPECIFIED;
- if (memcmp(smp->pcnf, cfm, 16))
+ if (crypto_memneq(smp->pcnf, cfm, 16))
return SMP_CONFIRM_FAILED;
smp->passkey_round++;
@@ -1875,7 +1876,7 @@
/* This is unlikely, but we need to check that
* we didn't accidentially generate a debug key.
*/
- if (memcmp(smp->local_sk, debug_sk, 32))
+ if (crypto_memneq(smp->local_sk, debug_sk, 32))
break;
}
}
@@ -2140,7 +2141,7 @@
if (err)
return SMP_UNSPECIFIED;
- if (memcmp(smp->pcnf, cfm, 16))
+ if (crypto_memneq(smp->pcnf, cfm, 16))
return SMP_CONFIRM_FAILED;
} else {
smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, sizeof(smp->prnd),
@@ -2621,7 +2622,7 @@
if (err)
return SMP_UNSPECIFIED;
- if (memcmp(cfm.confirm_val, smp->pcnf, 16))
+ if (crypto_memneq(cfm.confirm_val, smp->pcnf, 16))
return SMP_CONFIRM_FAILED;
}
@@ -2654,7 +2655,7 @@
else
hcon->pending_sec_level = BT_SECURITY_FIPS;
- if (!memcmp(debug_pk, smp->remote_pk, 64))
+ if (!crypto_memneq(debug_pk, smp->remote_pk, 64))
set_bit(SMP_FLAG_DEBUG_KEY, &smp->flags);
if (smp->method == DSP_PASSKEY) {
@@ -2753,7 +2754,7 @@
if (err)
return SMP_UNSPECIFIED;
- if (memcmp(check->e, e, 16))
+ if (crypto_memneq(check->e, e, 16))
return SMP_DHKEY_CHECK_FAILED;
if (!hcon->out) {
@@ -3463,7 +3464,7 @@
if (err)
return err;
- if (memcmp(res, exp, 3))
+ if (crypto_memneq(res, exp, 3))
return -EINVAL;
return 0;
@@ -3493,7 +3494,7 @@
if (err)
return err;
- if (memcmp(res, exp, 16))
+ if (crypto_memneq(res, exp, 16))
return -EINVAL;
return 0;
@@ -3518,7 +3519,7 @@
if (err)
return err;
- if (memcmp(res, exp, 16))
+ if (crypto_memneq(res, exp, 16))
return -EINVAL;
return 0;
@@ -3550,7 +3551,7 @@
if (err)
return err;
- if (memcmp(res, exp, 16))
+ if (crypto_memneq(res, exp, 16))
return -EINVAL;
return 0;
@@ -3584,10 +3585,10 @@
if (err)
return err;
- if (memcmp(mackey, exp_mackey, 16))
+ if (crypto_memneq(mackey, exp_mackey, 16))
return -EINVAL;
- if (memcmp(ltk, exp_ltk, 16))
+ if (crypto_memneq(ltk, exp_ltk, 16))
return -EINVAL;
return 0;
@@ -3620,7 +3621,7 @@
if (err)
return err;
- if (memcmp(res, exp, 16))
+ if (crypto_memneq(res, exp, 16))
return -EINVAL;
return 0;
@@ -3674,7 +3675,7 @@
if (err)
return err;
- if (memcmp(res, exp, 16))
+ if (crypto_memneq(res, exp, 16))
return -EINVAL;
return 0;
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index 413d18e..ff8bb41 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -768,6 +768,13 @@
return -EPROTONOSUPPORT;
}
}
+
+ if (data[IFLA_BR_VLAN_DEFAULT_PVID]) {
+ __u16 defpvid = nla_get_u16(data[IFLA_BR_VLAN_DEFAULT_PVID]);
+
+ if (defpvid >= VLAN_VID_MASK)
+ return -EINVAL;
+ }
#endif
return 0;
diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
index 8a7ada8..bcb4559 100644
--- a/net/bridge/br_stp_if.c
+++ b/net/bridge/br_stp_if.c
@@ -166,6 +166,8 @@
br_debug(br, "using kernel STP\n");
/* To start timers on any ports left in blocking */
+ if (br->dev->flags & IFF_UP)
+ mod_timer(&br->hello_timer, jiffies + br->hello_time);
br_port_state_selection(br);
}
diff --git a/net/bridge/br_stp_timer.c b/net/bridge/br_stp_timer.c
index 5f0f5af..7dbe6a5 100644
--- a/net/bridge/br_stp_timer.c
+++ b/net/bridge/br_stp_timer.c
@@ -40,7 +40,7 @@
if (br->dev->flags & IFF_UP) {
br_config_bpdu_generation(br);
- if (br->stp_enabled != BR_USER_STP)
+ if (br->stp_enabled == BR_KERNEL_STP)
mod_timer(&br->hello_timer,
round_jiffies(jiffies + br->hello_time));
}
diff --git a/net/caif/cfpkt_skbuff.c b/net/caif/cfpkt_skbuff.c
index 59ce1fc..71b6ab2 100644
--- a/net/caif/cfpkt_skbuff.c
+++ b/net/caif/cfpkt_skbuff.c
@@ -81,11 +81,7 @@
{
struct sk_buff *skb;
- if (likely(in_interrupt()))
- skb = alloc_skb(len + pfx, GFP_ATOMIC);
- else
- skb = alloc_skb(len + pfx, GFP_KERNEL);
-
+ skb = alloc_skb(len + pfx, GFP_ATOMIC);
if (unlikely(skb == NULL))
return NULL;
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index b8d927c..a6b2f21 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -7,6 +7,7 @@
#include <linux/kthread.h>
#include <linux/net.h>
#include <linux/nsproxy.h>
+#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/socket.h>
#include <linux/string.h>
@@ -478,11 +479,16 @@
{
struct sockaddr_storage *paddr = &con->peer_addr.in_addr;
struct socket *sock;
+ unsigned int noio_flag;
int ret;
BUG_ON(con->sock);
+
+ /* sock_create_kern() allocates with GFP_KERNEL */
+ noio_flag = memalloc_noio_save();
ret = sock_create_kern(read_pnet(&con->msgr->net), paddr->ss_family,
SOCK_STREAM, IPPROTO_TCP, &sock);
+ memalloc_noio_restore(noio_flag);
if (ret)
return ret;
sock->sk->sk_allocation = GFP_NOFS;
diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c
index ddc3573..bc95e48 100644
--- a/net/ceph/osdmap.c
+++ b/net/ceph/osdmap.c
@@ -1265,7 +1265,6 @@
if ((map->osd_state[osd] & CEPH_OSD_EXISTS) &&
(xorstate & CEPH_OSD_EXISTS)) {
pr_info("osd%d does not exist\n", osd);
- map->osd_weight[osd] = CEPH_OSD_IN;
ret = set_primary_affinity(map, osd,
CEPH_OSD_DEFAULT_PRIMARY_AFFINITY);
if (ret)
diff --git a/net/core/dev.c b/net/core/dev.c
index 2587d7f..17d23ff 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -184,7 +184,7 @@
/* protects napi_hash addition/deletion and napi_gen_id */
static DEFINE_SPINLOCK(napi_hash_lock);
-static unsigned int napi_gen_id;
+static unsigned int napi_gen_id = NR_CPUS;
static DEFINE_HASHTABLE(napi_hash, 8);
static seqcount_t devnet_rename_seq;
@@ -1248,8 +1248,9 @@
if (!new_ifalias)
return -ENOMEM;
dev->ifalias = new_ifalias;
+ memcpy(dev->ifalias, alias, len);
+ dev->ifalias[len] = 0;
- strlcpy(dev->ifalias, alias, len+1);
return len;
}
@@ -2551,9 +2552,10 @@
static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path)
{
if (tx_path)
- return skb->ip_summed != CHECKSUM_PARTIAL;
- else
- return skb->ip_summed == CHECKSUM_NONE;
+ return skb->ip_summed != CHECKSUM_PARTIAL &&
+ skb->ip_summed != CHECKSUM_UNNECESSARY;
+
+ return skb->ip_summed == CHECKSUM_NONE;
}
/**
@@ -2572,11 +2574,12 @@
struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
netdev_features_t features, bool tx_path)
{
+ struct sk_buff *segs;
+
if (unlikely(skb_needs_check(skb, tx_path))) {
int err;
- skb_warn_bad_offload(skb);
-
+ /* We're going to init ->check field in TCP or UDP header */
err = skb_cow_head(skb, 0);
if (err < 0)
return ERR_PTR(err);
@@ -2591,7 +2594,12 @@
skb_reset_mac_header(skb);
skb_reset_mac_len(skb);
- return skb_mac_gso_segment(skb, features);
+ segs = skb_mac_gso_segment(skb, features);
+
+ if (unlikely(skb_needs_check(skb, tx_path)))
+ skb_warn_bad_offload(skb);
+
+ return segs;
}
EXPORT_SYMBOL(__skb_gso_segment);
@@ -3055,7 +3063,9 @@
int queue_index = 0;
#ifdef CONFIG_XPS
- if (skb->sender_cpu == 0)
+ u32 sender_cpu = skb->sender_cpu - 1;
+
+ if (sender_cpu >= (u32)NR_CPUS)
skb->sender_cpu = raw_smp_processor_id() + 1;
#endif
@@ -4380,6 +4390,12 @@
}
EXPORT_SYMBOL(gro_find_complete_by_type);
+static void napi_skb_free_stolen_head(struct sk_buff *skb)
+{
+ skb_dst_drop(skb);
+ kmem_cache_free(skbuff_head_cache, skb);
+}
+
static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
{
switch (ret) {
@@ -4393,12 +4409,10 @@
break;
case GRO_MERGED_FREE:
- if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD) {
- skb_dst_drop(skb);
- kmem_cache_free(skbuff_head_cache, skb);
- } else {
+ if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
+ napi_skb_free_stolen_head(skb);
+ else
__kfree_skb(skb);
- }
break;
case GRO_HELD:
@@ -4464,10 +4478,16 @@
break;
case GRO_DROP:
- case GRO_MERGED_FREE:
napi_reuse_skb(napi, skb);
break;
+ case GRO_MERGED_FREE:
+ if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
+ napi_skb_free_stolen_head(skb);
+ else
+ napi_reuse_skb(napi, skb);
+ break;
+
case GRO_MERGED:
break;
}
@@ -4745,25 +4765,22 @@
void napi_hash_add(struct napi_struct *napi)
{
- if (!test_and_set_bit(NAPI_STATE_HASHED, &napi->state)) {
+ if (test_and_set_bit(NAPI_STATE_HASHED, &napi->state))
+ return;
- spin_lock(&napi_hash_lock);
+ spin_lock(&napi_hash_lock);
- /* 0 is not a valid id, we also skip an id that is taken
- * we expect both events to be extremely rare
- */
- napi->napi_id = 0;
- while (!napi->napi_id) {
- napi->napi_id = ++napi_gen_id;
- if (napi_by_id(napi->napi_id))
- napi->napi_id = 0;
- }
+ /* 0..NR_CPUS+1 range is reserved for sender_cpu use */
+ do {
+ if (unlikely(++napi_gen_id < NR_CPUS + 1))
+ napi_gen_id = NR_CPUS + 1;
+ } while (napi_by_id(napi_gen_id));
+ napi->napi_id = napi_gen_id;
- hlist_add_head_rcu(&napi->napi_hash_node,
- &napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]);
+ hlist_add_head_rcu(&napi->napi_hash_node,
+ &napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]);
- spin_unlock(&napi_hash_lock);
- }
+ spin_unlock(&napi_hash_lock);
}
EXPORT_SYMBOL_GPL(napi_hash_add);
@@ -7083,8 +7100,8 @@
} else {
netdev_stats_to_stats64(storage, &dev->stats);
}
- storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
- storage->tx_dropped += atomic_long_read(&dev->tx_dropped);
+ storage->rx_dropped += (unsigned long)atomic_long_read(&dev->rx_dropped);
+ storage->tx_dropped += (unsigned long)atomic_long_read(&dev->tx_dropped);
return storage;
}
EXPORT_SYMBOL(dev_get_stats);
diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c
index b94b1d2..151e047 100644
--- a/net/core/dev_ioctl.c
+++ b/net/core/dev_ioctl.c
@@ -28,6 +28,7 @@
if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
return -EFAULT;
+ ifr.ifr_name[IFNAMSIZ-1] = 0;
error = netdev_get_name(net, ifr.ifr_name, ifr.ifr_ifindex);
if (error)
diff --git a/net/core/dst.c b/net/core/dst.c
index a1656e3..e72d706 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -151,13 +151,13 @@
}
EXPORT_SYMBOL(dst_discard_out);
-const u32 dst_default_metrics[RTAX_MAX + 1] = {
+const struct dst_metrics dst_default_metrics = {
/* This initializer is needed to force linker to place this variable
* into const section. Otherwise it might end into bss section.
* We really want to avoid false sharing on this variable, and catch
* any writes on it.
*/
- [RTAX_MAX] = 0xdeadbeef,
+ .refcnt = ATOMIC_INIT(1),
};
void dst_init(struct dst_entry *dst, struct dst_ops *ops,
@@ -169,7 +169,7 @@
if (dev)
dev_hold(dev);
dst->ops = ops;
- dst_init_metrics(dst, dst_default_metrics, true);
+ dst_init_metrics(dst, dst_default_metrics.metrics, true);
dst->expires = 0UL;
dst->path = dst;
dst->from = NULL;
@@ -315,25 +315,30 @@
u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old)
{
- u32 *p = kmalloc(sizeof(u32) * RTAX_MAX, GFP_ATOMIC);
+ struct dst_metrics *p = kmalloc(sizeof(*p), GFP_ATOMIC);
if (p) {
- u32 *old_p = __DST_METRICS_PTR(old);
+ struct dst_metrics *old_p = (struct dst_metrics *)__DST_METRICS_PTR(old);
unsigned long prev, new;
- memcpy(p, old_p, sizeof(u32) * RTAX_MAX);
+ atomic_set(&p->refcnt, 1);
+ memcpy(p->metrics, old_p->metrics, sizeof(p->metrics));
new = (unsigned long) p;
prev = cmpxchg(&dst->_metrics, old, new);
if (prev != old) {
kfree(p);
- p = __DST_METRICS_PTR(prev);
+ p = (struct dst_metrics *)__DST_METRICS_PTR(prev);
if (prev & DST_METRICS_READ_ONLY)
p = NULL;
+ } else if (prev & DST_METRICS_REFCOUNTED) {
+ if (atomic_dec_and_test(&old_p->refcnt))
+ kfree(old_p);
}
}
- return p;
+ BUILD_BUG_ON(offsetof(struct dst_metrics, metrics) != 0);
+ return (u32 *)p;
}
EXPORT_SYMBOL(dst_cow_metrics_generic);
@@ -342,7 +347,7 @@
{
unsigned long prev, new;
- new = ((unsigned long) dst_default_metrics) | DST_METRICS_READ_ONLY;
+ new = ((unsigned long) &dst_default_metrics) | DST_METRICS_READ_ONLY;
prev = cmpxchg(&dst->_metrics, old, new);
if (prev == old)
kfree(__DST_METRICS_PTR(old));
@@ -457,6 +462,20 @@
spin_lock_bh(&dst_garbage.lock);
dst = dst_garbage.list;
dst_garbage.list = NULL;
+ /* The code in dst_ifdown places a hold on the loopback device.
+ * If the gc entry processing is set to expire after a lengthy
+ * interval, this hold can cause netdev_wait_allrefs() to hang
+ * out and wait for a long time -- until the the loopback
+ * interface is released. If we're really unlucky, it'll emit
+ * pr_emerg messages to console too. Reset the interval here,
+ * so dst cleanups occur in a more timely fashion.
+ */
+ if (dst_garbage.timer_inc > DST_GC_INC) {
+ dst_garbage.timer_inc = DST_GC_INC;
+ dst_garbage.timer_expires = DST_GC_MIN;
+ mod_delayed_work(system_wq, &dst_gc_work,
+ dst_garbage.timer_expires);
+ }
spin_unlock_bh(&dst_garbage.lock);
if (last)
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 29edf74..b6bca62 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -886,9 +886,12 @@
if (regs.len > reglen)
regs.len = reglen;
- regbuf = vzalloc(reglen);
- if (reglen && !regbuf)
- return -ENOMEM;
+ regbuf = NULL;
+ if (reglen) {
+ regbuf = vzalloc(reglen);
+ if (!regbuf)
+ return -ENOMEM;
+ }
ops->get_regs(dev, ®s, regbuf);
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index b29250f..a57de3d 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -875,7 +875,8 @@
skb = skb_clone(skb, GFP_ATOMIC);
neigh_log_probe(neigh);
write_unlock(&neigh->lock);
- neigh->ops->solicit(neigh, skb);
+ if (neigh->ops->solicit)
+ neigh->ops->solicit(neigh, skb);
atomic_inc(&neigh->probes);
kfree_skb(skb);
}
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 94acfc8..440aa9f 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -105,15 +105,21 @@
while ((skb = skb_dequeue(&npinfo->txq))) {
struct net_device *dev = skb->dev;
struct netdev_queue *txq;
+ unsigned int q_index;
if (!netif_device_present(dev) || !netif_running(dev)) {
kfree_skb(skb);
continue;
}
- txq = skb_get_tx_queue(dev, skb);
-
local_irq_save(flags);
+ /* check if skb->queue_mapping is still valid */
+ q_index = skb_get_queue_mapping(skb);
+ if (unlikely(q_index >= dev->real_num_tx_queues)) {
+ q_index = q_index % dev->real_num_tx_queues;
+ skb_set_queue_mapping(skb, q_index);
+ }
+ txq = netdev_get_tx_queue(dev, q_index);
HARD_TX_LOCK(dev, txq, smp_processor_id());
if (netif_xmit_frozen_or_stopped(txq) ||
netpoll_start_xmit(skb, dev, txq) != NETDEV_TX_OK) {
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index b94e165..5b3d611 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -897,6 +897,7 @@
+ nla_total_size(1) /* IFLA_LINKMODE */
+ nla_total_size(4) /* IFLA_CARRIER_CHANGES */
+ nla_total_size(4) /* IFLA_LINK_NETNSID */
+ + nla_total_size(4) /* IFLA_GROUP */
+ nla_total_size(ext_filter_mask
& RTEXT_FILTER_VF ? 4 : 0) /* IFLA_NUM_VF */
+ rtnl_vfinfo_size(dev, ext_filter_mask) /* IFLA_VFINFO_LIST */
@@ -1018,7 +1019,7 @@
return err;
}
- if (nla_put(skb, IFLA_PHYS_PORT_NAME, strlen(name), name))
+ if (nla_put_string(skb, IFLA_PHYS_PORT_NAME, name))
return -EMSGSIZE;
return 0;
@@ -1089,6 +1090,8 @@
struct ifla_vf_mac vf_mac;
struct ifla_vf_info ivi;
+ memset(&ivi, 0, sizeof(ivi));
+
/* Not all SR-IOV capable drivers support the
* spoofcheck and "RSS query enable" query. Preset to
* -1 so the user space tool can detect that the driver
@@ -1097,7 +1100,6 @@
ivi.spoofchk = -1;
ivi.rss_query_en = -1;
ivi.trusted = -1;
- memset(ivi.mac, 0, sizeof(ivi.mac));
/* The default value for VF link state is "auto"
* IFLA_VF_LINK_STATE_AUTO which equals zero
*/
@@ -1370,6 +1372,7 @@
[IFLA_PHYS_SWITCH_ID] = { .type = NLA_BINARY, .len = MAX_PHYS_ITEM_ID_LEN },
[IFLA_LINK_NETNSID] = { .type = NLA_S32 },
[IFLA_PROTO_DOWN] = { .type = NLA_U8 },
+ [IFLA_GROUP] = { .type = NLA_U32 },
};
static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = {
@@ -1458,13 +1461,13 @@
cb->nlh->nlmsg_seq, 0,
NLM_F_MULTI,
ext_filter_mask);
- /* If we ran out of room on the first message,
- * we're in trouble
- */
- WARN_ON((err == -EMSGSIZE) && (skb->len == 0));
- if (err < 0)
- goto out;
+ if (err < 0) {
+ if (likely(skb->len))
+ goto out;
+
+ goto out_err;
+ }
nl_dump_check_consistent(cb, nlmsg_hdr(skb));
cont:
@@ -1472,10 +1475,12 @@
}
}
out:
+ err = skb->len;
+out_err:
cb->args[1] = idx;
cb->args[0] = h;
- return skb->len;
+ return err;
}
int rtnl_nla_parse_ifla(struct nlattr **tb, const struct nlattr *head, int len)
@@ -1737,7 +1742,8 @@
struct sockaddr *sa;
int len;
- len = sizeof(sa_family_t) + dev->addr_len;
+ len = sizeof(sa_family_t) + max_t(size_t, dev->addr_len,
+ sizeof(*sa));
sa = kmalloc(len, GFP_KERNEL);
if (!sa) {
err = -ENOMEM;
@@ -3127,8 +3133,12 @@
err = br_dev->netdev_ops->ndo_bridge_getlink(
skb, portid, seq, dev,
filter_mask, NLM_F_MULTI);
- if (err < 0 && err != -EOPNOTSUPP)
- break;
+ if (err < 0 && err != -EOPNOTSUPP) {
+ if (likely(skb->len))
+ break;
+
+ goto out_err;
+ }
}
idx++;
}
@@ -3139,16 +3149,22 @@
seq, dev,
filter_mask,
NLM_F_MULTI);
- if (err < 0 && err != -EOPNOTSUPP)
- break;
+ if (err < 0 && err != -EOPNOTSUPP) {
+ if (likely(skb->len))
+ break;
+
+ goto out_err;
+ }
}
idx++;
}
}
+ err = skb->len;
+out_err:
rcu_read_unlock();
cb->args[0] = idx;
- return skb->len;
+ return err;
}
static inline size_t bridge_nlmsg_size(void)
diff --git a/net/core/sock.c b/net/core/sock.c
index a84a154..39e9ab7 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1463,6 +1463,11 @@
pr_debug("%s: optmem leakage (%d bytes) detected\n",
__func__, atomic_read(&sk->sk_omem_alloc));
+ if (sk->sk_frag.page) {
+ put_page(sk->sk_frag.page);
+ sk->sk_frag.page = NULL;
+ }
+
if (sk->sk_peer_cred)
put_cred(sk->sk_peer_cred);
put_pid(sk->sk_peer_pid);
@@ -1564,6 +1569,12 @@
is_charged = sk_filter_charge(newsk, filter);
if (unlikely(!is_charged || xfrm_sk_clone_policy(newsk, sk))) {
+ /* We need to make sure that we don't uncharge the new
+ * socket if we couldn't charge it in the first place
+ * as otherwise we uncharge the parent's filter.
+ */
+ if (!is_charged)
+ RCU_INIT_POINTER(newsk->sk_filter, NULL);
/* It is still raw copy of parent, so invalidate
* destructor and make plain sk_free() */
newsk->sk_destruct = NULL;
@@ -1691,17 +1702,17 @@
void skb_orphan_partial(struct sk_buff *skb)
{
- /* TCP stack sets skb->ooo_okay based on sk_wmem_alloc,
- * so we do not completely orphan skb, but transfert all
- * accounted bytes but one, to avoid unexpected reorders.
- */
if (skb->destructor == sock_wfree
#ifdef CONFIG_INET
|| skb->destructor == tcp_wfree
#endif
) {
- atomic_sub(skb->truesize - 1, &skb->sk->sk_wmem_alloc);
- skb->truesize = 1;
+ struct sock *sk = skb->sk;
+
+ if (atomic_inc_not_zero(&sk->sk_refcnt)) {
+ atomic_sub(skb->truesize, &sk->sk_wmem_alloc);
+ skb->destructor = sock_efree;
+ }
} else {
skb_orphan(skb);
}
@@ -2706,11 +2717,6 @@
sk_refcnt_debug_release(sk);
- if (sk->sk_frag.page) {
- put_page(sk->sk_frag.page);
- sk->sk_frag.page = NULL;
- }
-
sock_put(sk);
}
EXPORT_SYMBOL(sk_common_release);
diff --git a/net/dccp/feat.c b/net/dccp/feat.c
index 1704948..f227f00 100644
--- a/net/dccp/feat.c
+++ b/net/dccp/feat.c
@@ -1471,9 +1471,12 @@
* singleton values (which always leads to failure).
* These settings can still (later) be overridden via sockopts.
*/
- if (ccid_get_builtin_ccids(&tx.val, &tx.len) ||
- ccid_get_builtin_ccids(&rx.val, &rx.len))
+ if (ccid_get_builtin_ccids(&tx.val, &tx.len))
return -ENOBUFS;
+ if (ccid_get_builtin_ccids(&rx.val, &rx.len)) {
+ kfree(tx.val);
+ return -ENOBUFS;
+ }
if (!dccp_feat_prefer(sysctl_dccp_tx_ccid, tx.val, tx.len) ||
!dccp_feat_prefer(sysctl_dccp_rx_ccid, rx.val, rx.len))
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index 6467bf39..e217f179 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -635,6 +635,7 @@
goto drop_and_free;
inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT);
+ reqsk_put(req);
return 0;
drop_and_free:
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index 8113ad5..09a9ab6 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -376,6 +376,7 @@
goto drop_and_free;
inet_csk_reqsk_queue_hash_add(sk, req, DCCP_TIMEOUT_INIT);
+ reqsk_put(req);
return 0;
drop_and_free:
@@ -422,6 +423,9 @@
newsk->sk_backlog_rcv = dccp_v4_do_rcv;
newnp->pktoptions = NULL;
newnp->opt = NULL;
+ newnp->ipv6_mc_list = NULL;
+ newnp->ipv6_ac_list = NULL;
+ newnp->ipv6_fl_list = NULL;
newnp->mcast_oif = inet6_iif(skb);
newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
@@ -486,6 +490,9 @@
/* Clone RX bits */
newnp->rxopt.all = np->rxopt.all;
+ newnp->ipv6_mc_list = NULL;
+ newnp->ipv6_ac_list = NULL;
+ newnp->ipv6_fl_list = NULL;
newnp->pktoptions = NULL;
newnp->opt = NULL;
newnp->mcast_oif = inet6_iif(skb);
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index 9fe25bf..b68168f 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -24,6 +24,7 @@
#include <net/checksum.h>
#include <net/inet_sock.h>
+#include <net/inet_common.h>
#include <net/sock.h>
#include <net/xfrm.h>
@@ -170,6 +171,15 @@
EXPORT_SYMBOL_GPL(dccp_packet_name);
+static void dccp_sk_destruct(struct sock *sk)
+{
+ struct dccp_sock *dp = dccp_sk(sk);
+
+ ccid_hc_tx_delete(dp->dccps_hc_tx_ccid, sk);
+ dp->dccps_hc_tx_ccid = NULL;
+ inet_sock_destruct(sk);
+}
+
int dccp_init_sock(struct sock *sk, const __u8 ctl_sock_initialized)
{
struct dccp_sock *dp = dccp_sk(sk);
@@ -179,6 +189,7 @@
icsk->icsk_syn_retries = sysctl_dccp_request_retries;
sk->sk_state = DCCP_CLOSED;
sk->sk_write_space = dccp_write_space;
+ sk->sk_destruct = dccp_sk_destruct;
icsk->icsk_sync_mss = dccp_sync_mss;
dp->dccps_mss_cache = 536;
dp->dccps_rate_last = jiffies;
@@ -201,10 +212,7 @@
{
struct dccp_sock *dp = dccp_sk(sk);
- /*
- * DCCP doesn't use sk_write_queue, just sk_send_head
- * for retransmissions
- */
+ __skb_queue_purge(&sk->sk_write_queue);
if (sk->sk_send_head != NULL) {
kfree_skb(sk->sk_send_head);
sk->sk_send_head = NULL;
@@ -222,8 +230,7 @@
dp->dccps_hc_rx_ackvec = NULL;
}
ccid_hc_rx_delete(dp->dccps_hc_rx_ccid, sk);
- ccid_hc_tx_delete(dp->dccps_hc_tx_ccid, sk);
- dp->dccps_hc_rx_ccid = dp->dccps_hc_tx_ccid = NULL;
+ dp->dccps_hc_rx_ccid = NULL;
/* clean up feature negotiation state */
dccp_feat_list_purge(&dp->dccps_featneg);
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
index b1dc096..403593b 100644
--- a/net/decnet/dn_route.c
+++ b/net/decnet/dn_route.c
@@ -188,12 +188,6 @@
call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free);
}
-static inline void dnrt_drop(struct dn_route *rt)
-{
- dst_release(&rt->dst);
- call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free);
-}
-
static void dn_dst_check_expire(unsigned long dummy)
{
int i;
@@ -248,7 +242,7 @@
}
*rtp = rt->dst.dn_next;
rt->dst.dn_next = NULL;
- dnrt_drop(rt);
+ dnrt_free(rt);
break;
}
spin_unlock_bh(&dn_rt_hash_table[i].lock);
@@ -350,7 +344,7 @@
dst_use(&rth->dst, now);
spin_unlock_bh(&dn_rt_hash_table[hash].lock);
- dnrt_drop(rt);
+ dst_free(&rt->dst);
*rp = rth;
return 0;
}
@@ -380,7 +374,7 @@
for(; rt; rt = next) {
next = rcu_dereference_raw(rt->dst.dn_next);
RCU_INIT_POINTER(rt->dst.dn_next, NULL);
- dst_free((struct dst_entry *)rt);
+ dnrt_free(rt);
}
nothing_to_declare:
@@ -1187,7 +1181,7 @@
if (dev_out->flags & IFF_LOOPBACK)
flags |= RTCF_LOCAL;
- rt = dst_alloc(&dn_dst_ops, dev_out, 1, DST_OBSOLETE_NONE, DST_HOST);
+ rt = dst_alloc(&dn_dst_ops, dev_out, 0, DST_OBSOLETE_NONE, DST_HOST);
if (rt == NULL)
goto e_nobufs;
diff --git a/net/decnet/netfilter/dn_rtmsg.c b/net/decnet/netfilter/dn_rtmsg.c
index 85f2fdc..29246bc 100644
--- a/net/decnet/netfilter/dn_rtmsg.c
+++ b/net/decnet/netfilter/dn_rtmsg.c
@@ -102,7 +102,9 @@
{
struct nlmsghdr *nlh = nlmsg_hdr(skb);
- if (nlh->nlmsg_len < sizeof(*nlh) || skb->len < nlh->nlmsg_len)
+ if (skb->len < sizeof(*nlh) ||
+ nlh->nlmsg_len < sizeof(*nlh) ||
+ skb->len < nlh->nlmsg_len)
return;
if (!netlink_capable(skb, CAP_NET_ADMIN))
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index 8dfe9fb..554c2a9 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -1006,10 +1006,8 @@
/* Use already configured phy mode */
if (p->phy_interface == PHY_INTERFACE_MODE_NA)
p->phy_interface = p->phy->interface;
- phy_connect_direct(slave_dev, p->phy, dsa_slave_adjust_link,
- p->phy_interface);
-
- return 0;
+ return phy_connect_direct(slave_dev, p->phy, dsa_slave_adjust_link,
+ p->phy_interface);
}
static int dsa_slave_phy_setup(struct dsa_slave_priv *p,
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index c9ef99f..aad274c 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -1035,7 +1035,7 @@
.type = SOCK_DGRAM,
.protocol = IPPROTO_ICMP,
.prot = &ping_prot,
- .ops = &inet_dgram_ops,
+ .ops = &inet_sockraw_ops,
.flags = INET_PROTOSW_REUSE,
},
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index 7025dc6..9ec2e39 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -624,8 +624,15 @@
}
EXPORT_SYMBOL(arp_create);
-static inline void arp_log(const struct sk_buff *skb,
- const struct ethhdr *ethhdr, int err) {
+static int arp_xmit_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
+{
+ return dev_queue_xmit(skb);
+}
+
+static inline bool arp_check_log(const struct sk_buff *skb,
+ struct ethhdr *eth, struct arphdr *arp,
+ char *tip, char *dev_name)
+{
struct ether_arp_hdr {
/* Lifted from the comments in uapi/linux/if_arp.h. */
unsigned char ar_sha[ETH_ALEN];
@@ -634,17 +641,35 @@
unsigned char ar_tip[4];
} __packed * arp_ether;
const struct net_device *dev = skb->dev;
- const struct arphdr *arp = arp_hdr(skb);
+
+ if (skb_headlen(skb) < sizeof(*eth) + sizeof(*arp) + sizeof(*arp_ether))
+ return false;
+
+ if (!dev || !arp_hdr(skb))
+ return false;
+
+ arp_ether = (struct ether_arp_hdr *)(arp_hdr(skb) + 1);
+ memcpy(arp, arp_hdr(skb), sizeof(*arp));
+ if (arp->ar_hrd != htons(ARPHRD_ETHER) ||
+ arp->ar_op != htons(ARPOP_REQUEST))
+ return false;
+
+ memcpy(eth, skb->data, sizeof(*eth));
+ if (ether_addr_equal(eth->h_dest, dev->broadcast))
+ return false;
+
+ memcpy(tip, arp_ether->ar_tip, 4);
+ memcpy(dev_name, dev->name, IFNAMSIZ);
+
+ return true;
+}
+
+static inline void arp_log(const struct ethhdr *eth,
+ const struct arphdr *arp, const char *tip,
+ const char *dev_name, int err)
+{
const char *msg, *type;
- if (skb_headlen(skb) <= sizeof(*arp) + sizeof(*arp_ether))
- return;
- if (!dev || !arp || arp->ar_hrd != htons(ARPHRD_ETHER))
- return;
- if (ether_addr_equal(ethhdr->h_dest, dev->broadcast))
- return;
-
- arp_ether = (struct ether_arp_hdr *)(arp + 1);
switch (htons(arp->ar_op)) {
case ARPOP_REQUEST:
type = "request";
@@ -658,21 +683,23 @@
}
msg = err ? "failed to send" : "sent";
pr_info("%s: %s ARP %s for %pI4 %pM %s err=%d\n",
- __func__, msg, type, arp_ether->ar_tip, ethhdr->h_dest,
- dev->name, err);
+ __func__, msg, type, tip, eth->h_dest, dev_name, err);
}
-static int arp_xmit_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
+static int arp_xmit_finish_log(struct net *net, struct sock *sk,
+ struct sk_buff *skb)
{
- int err;
+ char dev_name[IFNAMSIZ];
struct ethhdr eth;
+ struct arphdr arp;
+ char tip[4];
- if (skb_headlen(skb) >= sizeof(eth)) {
- /* Save the ethernet header, dev_queue_xmit might modify it. */
- memcpy(ð, skb->data, sizeof(eth));
- }
- err = dev_queue_xmit(skb);
- arp_log(skb, ð, err);
+ bool log = arp_check_log(skb, ð, &arp, tip, dev_name);
+ int err = arp_xmit_finish(net, sk, skb);
+
+ if (log)
+ arp_log(ð, &arp, tip, dev_name, err);
+
return err;
}
@@ -684,7 +711,7 @@
/* Send it off, maybe filter it using firewalling first. */
NF_HOOK(NFPROTO_ARP, NF_ARP_OUT,
dev_net(skb->dev), NULL, skb, NULL, skb->dev,
- arp_xmit_finish);
+ arp_xmit_finish_log);
}
EXPORT_SYMBOL(arp_xmit);
@@ -1297,7 +1324,7 @@
/*
* ax25 -> ASCII conversion
*/
-static char *ax2asc2(ax25_address *a, char *buf)
+static void ax2asc2(ax25_address *a, char *buf)
{
char c, *s;
int n;
@@ -1319,10 +1346,10 @@
*s++ = n + '0';
*s++ = '\0';
- if (*buf == '\0' || *buf == '-')
- return "*";
-
- return buf;
+ if (*buf == '\0' || *buf == '-') {
+ buf[0] = '*';
+ buf[1] = '\0';
+ }
}
#endif /* CONFIG_AX25 */
@@ -1356,7 +1383,7 @@
}
#endif
sprintf(tbuf, "%pI4", n->primary_key);
- seq_printf(seq, "%-16s 0x%-10x0x%-10x%s * %s\n",
+ seq_printf(seq, "%-16s 0x%-10x0x%-10x%-17s * %s\n",
tbuf, hatype, arp_state_to_flags(n), hbuffer, dev->name);
read_unlock(&n->lock);
}
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 7e30c7b..ee94bd3 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -758,7 +758,7 @@
unsigned int e = 0, s_e;
struct fib_table *tb;
struct hlist_head *head;
- int dumped = 0;
+ int dumped = 0, err;
if (nlmsg_len(cb->nlh) >= sizeof(struct rtmsg) &&
((struct rtmsg *) nlmsg_data(cb->nlh))->rtm_flags & RTM_F_CLONED)
@@ -778,20 +778,27 @@
if (dumped)
memset(&cb->args[2], 0, sizeof(cb->args) -
2 * sizeof(cb->args[0]));
- if (fib_table_dump(tb, skb, cb) < 0)
- goto out;
+ err = fib_table_dump(tb, skb, cb);
+ if (err < 0) {
+ if (likely(skb->len))
+ goto out;
+
+ goto out_err;
+ }
dumped = 1;
next:
e++;
}
}
out:
+ err = skb->len;
+out_err:
rcu_read_unlock();
cb->args[1] = e;
cb->args[0] = h;
- return skb->len;
+ return err;
}
/* Prepare and feed intra-kernel routing request.
@@ -1081,7 +1088,8 @@
net = sock_net(skb->sk);
nlh = nlmsg_hdr(skb);
- if (skb->len < NLMSG_HDRLEN || skb->len < nlh->nlmsg_len ||
+ if (skb->len < nlmsg_total_size(sizeof(*frn)) ||
+ skb->len < nlh->nlmsg_len ||
nlmsg_len(nlh) < sizeof(*frn))
return;
@@ -1312,13 +1320,14 @@
void __init ip_fib_init(void)
{
- rtnl_register(PF_INET, RTM_NEWROUTE, inet_rtm_newroute, NULL, NULL);
- rtnl_register(PF_INET, RTM_DELROUTE, inet_rtm_delroute, NULL, NULL);
- rtnl_register(PF_INET, RTM_GETROUTE, NULL, inet_dump_fib, NULL);
+ fib_trie_init();
register_pernet_subsys(&fib_net_ops);
+
register_netdevice_notifier(&fib_netdev_notifier);
register_inetaddr_notifier(&fib_inetaddr_notifier);
- fib_trie_init();
+ rtnl_register(PF_INET, RTM_NEWROUTE, inet_rtm_newroute, NULL, NULL);
+ rtnl_register(PF_INET, RTM_DELROUTE, inet_rtm_delroute, NULL, NULL);
+ rtnl_register(PF_INET, RTM_GETROUTE, NULL, inet_dump_fib, NULL);
}
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index 67d44aa..313e3c1 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -204,6 +204,7 @@
static void free_fib_info_rcu(struct rcu_head *head)
{
struct fib_info *fi = container_of(head, struct fib_info, rcu);
+ struct dst_metrics *m;
change_nexthops(fi) {
if (nexthop_nh->nh_dev)
@@ -214,8 +215,9 @@
rt_fibinfo_free(&nexthop_nh->nh_rth_input);
} endfor_nexthops(fi);
- if (fi->fib_metrics != (u32 *) dst_default_metrics)
- kfree(fi->fib_metrics);
+ m = fi->fib_metrics;
+ if (m != &dst_default_metrics && atomic_dec_and_test(&m->refcnt))
+ kfree(m);
kfree(fi);
}
@@ -982,11 +984,11 @@
val = 255;
if (type == RTAX_FEATURES && (val & ~RTAX_FEATURE_MASK))
return -EINVAL;
- fi->fib_metrics[type - 1] = val;
+ fi->fib_metrics->metrics[type - 1] = val;
}
if (ecn_ca)
- fi->fib_metrics[RTAX_FEATURES - 1] |= DST_FEATURE_ECN_CA;
+ fi->fib_metrics->metrics[RTAX_FEATURES - 1] |= DST_FEATURE_ECN_CA;
return 0;
}
@@ -1042,14 +1044,17 @@
fi = kzalloc(sizeof(*fi)+nhs*sizeof(struct fib_nh), GFP_KERNEL);
if (!fi)
goto failure;
- fib_info_cnt++;
if (cfg->fc_mx) {
- fi->fib_metrics = kzalloc(sizeof(u32) * RTAX_MAX, GFP_KERNEL);
- if (!fi->fib_metrics)
- goto failure;
- } else
- fi->fib_metrics = (u32 *) dst_default_metrics;
-
+ fi->fib_metrics = kzalloc(sizeof(*fi->fib_metrics), GFP_KERNEL);
+ if (unlikely(!fi->fib_metrics)) {
+ kfree(fi);
+ return ERR_PTR(err);
+ }
+ atomic_set(&fi->fib_metrics->refcnt, 1);
+ } else {
+ fi->fib_metrics = (struct dst_metrics *)&dst_default_metrics;
+ }
+ fib_info_cnt++;
fi->fib_net = net;
fi->fib_protocol = cfg->fc_protocol;
fi->fib_scope = cfg->fc_scope;
@@ -1251,7 +1256,7 @@
if (fi->fib_priority &&
nla_put_u32(skb, RTA_PRIORITY, fi->fib_priority))
goto nla_put_failure;
- if (rtnetlink_put_metrics(skb, fi->fib_metrics) < 0)
+ if (rtnetlink_put_metrics(skb, fi->fib_metrics->metrics) < 0)
goto nla_put_failure;
if (fi->fib_prefsrc &&
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 2059c20..fa59bc3 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -1906,6 +1906,8 @@
/* rcu_read_lock is hold by caller */
hlist_for_each_entry_rcu(fa, &l->leaf, fa_list) {
+ int err;
+
if (i < s_i) {
i++;
continue;
@@ -1916,17 +1918,14 @@
continue;
}
- if (fib_dump_info(skb, NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq,
- RTM_NEWROUTE,
- tb->tb_id,
- fa->fa_type,
- xkey,
- KEYLENGTH - fa->fa_slen,
- fa->fa_tos,
- fa->fa_info, NLM_F_MULTI) < 0) {
+ err = fib_dump_info(skb, NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq, RTM_NEWROUTE,
+ tb->tb_id, fa->fa_type,
+ xkey, KEYLENGTH - fa->fa_slen,
+ fa->fa_tos, fa->fa_info, NLM_F_MULTI);
+ if (err < 0) {
cb->args[4] = i;
- return -1;
+ return err;
}
i++;
}
@@ -1948,10 +1947,13 @@
t_key key = cb->args[3];
while ((l = leaf_walk_rcu(&tp, key)) != NULL) {
- if (fn_trie_dump_leaf(l, tb, skb, cb) < 0) {
+ int err;
+
+ err = fn_trie_dump_leaf(l, tb, skb, cb);
+ if (err < 0) {
cb->args[3] = key;
cb->args[2] = count;
- return -1;
+ return err;
}
++count;
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 17adfda..3809d52 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -1102,6 +1102,7 @@
pmc = kzalloc(sizeof(*pmc), GFP_KERNEL);
if (!pmc)
return;
+ spin_lock_init(&pmc->lock);
spin_lock_bh(&im->lock);
pmc->interface = im->interface;
in_dev_hold(in_dev);
@@ -2026,21 +2027,26 @@
static void ip_mc_clear_src(struct ip_mc_list *pmc)
{
- struct ip_sf_list *psf, *nextpsf;
+ struct ip_sf_list *psf, *nextpsf, *tomb, *sources;
- for (psf = pmc->tomb; psf; psf = nextpsf) {
- nextpsf = psf->sf_next;
- kfree(psf);
- }
+ spin_lock_bh(&pmc->lock);
+ tomb = pmc->tomb;
pmc->tomb = NULL;
- for (psf = pmc->sources; psf; psf = nextpsf) {
- nextpsf = psf->sf_next;
- kfree(psf);
- }
+ sources = pmc->sources;
pmc->sources = NULL;
pmc->sfmode = MCAST_EXCLUDE;
pmc->sfcount[MCAST_INCLUDE] = 0;
pmc->sfcount[MCAST_EXCLUDE] = 1;
+ spin_unlock_bh(&pmc->lock);
+
+ for (psf = tomb; psf; psf = nextpsf) {
+ nextpsf = psf->sf_next;
+ kfree(psf);
+ }
+ for (psf = sources; psf; psf = nextpsf) {
+ nextpsf = psf->sf_next;
+ kfree(psf);
+ }
}
/* Join a multicast group
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 661bda9..62e41d38 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -922,10 +922,12 @@
csummode = CHECKSUM_PARTIAL;
cork->length += length;
- if (((length > mtu) || (skb && skb_is_gso(skb))) &&
+ if ((skb && skb_is_gso(skb)) ||
+ (((length + (skb ? skb->len : fragheaderlen)) > mtu) &&
+ (skb_queue_len(queue) <= 1) &&
(sk->sk_protocol == IPPROTO_UDP) &&
(rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len &&
- (sk->sk_type == SOCK_DGRAM) && !sk->sk_no_check_tx) {
+ (sk->sk_type == SOCK_DGRAM) && !sk->sk_no_check_tx)) {
err = ip_ufo_append_data(sk, queue, getfrag, from, length,
hh_len, fragheaderlen, transhdrlen,
maxfraglen, flags);
@@ -1241,6 +1243,7 @@
return -EINVAL;
if ((size + skb->len > mtu) &&
+ (skb_queue_len(&sk->sk_write_queue) == 1) &&
(sk->sk_protocol == IPPROTO_UDP) &&
(rt->dst.dev->features & NETIF_F_UFO)) {
if (skb->ip_summed != CHECKSUM_PARTIAL)
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index 3a2b21e..72e1e83 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -154,17 +154,18 @@
void ping_unhash(struct sock *sk)
{
struct inet_sock *isk = inet_sk(sk);
+
pr_debug("ping_unhash(isk=%p,isk->num=%u)\n", isk, isk->inet_num);
+ write_lock_bh(&ping_table.lock);
if (sk_hashed(sk)) {
- write_lock_bh(&ping_table.lock);
hlist_nulls_del(&sk->sk_nulls_node);
sk_nulls_node_init(&sk->sk_nulls_node);
sock_put(sk);
isk->inet_num = 0;
isk->inet_sport = 0;
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
- write_unlock_bh(&ping_table.lock);
}
+ write_unlock_bh(&ping_table.lock);
}
EXPORT_SYMBOL_GPL(ping_unhash);
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 6287418..ca10314 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -354,6 +354,9 @@
rt->dst.dev->mtu);
return -EMSGSIZE;
}
+ if (length < sizeof(struct iphdr))
+ return -EINVAL;
+
if (flags&MSG_PROBE)
goto out;
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 9af0f46..45efdfa 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1246,7 +1246,7 @@
if (mtu)
return mtu;
- mtu = dst->dev->mtu;
+ mtu = READ_ONCE(dst->dev->mtu);
if (unlikely(dst_metric_locked(dst, RTAX_MTU))) {
if (rt->rt_uses_gateway && mtu > 576)
@@ -1361,8 +1361,12 @@
static void ipv4_dst_destroy(struct dst_entry *dst)
{
+ struct dst_metrics *p = (struct dst_metrics *)DST_METRICS_PTR(dst);
struct rtable *rt = (struct rtable *) dst;
+ if (p != &dst_default_metrics && atomic_dec_and_test(&p->refcnt))
+ kfree(p);
+
if (!list_empty(&rt->rt_uncached)) {
struct uncached_list *ul = rt->rt_uncached_list;
@@ -1414,7 +1418,11 @@
rt->rt_gateway = nh->nh_gw;
rt->rt_uses_gateway = 1;
}
- dst_init_metrics(&rt->dst, fi->fib_metrics, true);
+ dst_init_metrics(&rt->dst, fi->fib_metrics->metrics, true);
+ if (fi->fib_metrics != &dst_default_metrics) {
+ rt->dst._metrics |= DST_METRICS_REFCOUNTED;
+ atomic_inc(&fi->fib_metrics->refcnt);
+ }
#ifdef CONFIG_IP_ROUTE_CLASSID
rt->dst.tclassid = nh->nh_tclassid;
#endif
@@ -2570,7 +2578,7 @@
skb_reset_network_header(skb);
/* Bugfix: need to give ip_route_input enough of an IP header to not gag. */
- ip_hdr(skb)->protocol = IPPROTO_ICMP;
+ ip_hdr(skb)->protocol = IPPROTO_UDP;
skb_reserve(skb, MAX_HEADER + sizeof(struct iphdr));
src = tb[RTA_SRC] ? nla_get_in_addr(tb[RTA_SRC]) : 0;
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index 2dc982b..a2e1142 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -337,6 +337,7 @@
treq = tcp_rsk(req);
treq->rcv_isn = ntohl(th->seq) - 1;
treq->snt_isn = cookie;
+ treq->txhash = net_tx_rndhash();
req->mss = mss;
ireq->ir_num = ntohs(th->dest);
ireq->ir_rmt_port = th->source;
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 4ab7735..9bdd784 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1077,9 +1077,12 @@
int *copied, size_t size)
{
struct tcp_sock *tp = tcp_sk(sk);
+ struct sockaddr *uaddr = msg->msg_name;
int err, flags;
- if (!(sysctl_tcp_fastopen & TFO_CLIENT_ENABLE))
+ if (!(sysctl_tcp_fastopen & TFO_CLIENT_ENABLE) ||
+ (uaddr && msg->msg_namelen >= sizeof(uaddr->sa_family) &&
+ uaddr->sa_family == AF_UNSPEC))
return -EOPNOTSUPP;
if (tp->fastopen_req)
return -EALREADY; /* Another Fast Open is in progress */
@@ -1092,7 +1095,7 @@
tp->fastopen_req->size = size;
flags = (msg->msg_flags & MSG_DONTWAIT) ? O_NONBLOCK : 0;
- err = __inet_stream_connect(sk->sk_socket, msg->msg_name,
+ err = __inet_stream_connect(sk->sk_socket, uaddr,
msg->msg_namelen, flags);
*copied = tp->fastopen_req->copied;
tcp_free_fastopen_req(tp);
@@ -2269,6 +2272,9 @@
tcp_init_send_head(sk);
memset(&tp->rx_opt, 0, sizeof(tp->rx_opt));
__sk_dst_reset(sk);
+ dst_release(sk->sk_rx_dst);
+ sk->sk_rx_dst = NULL;
+ tcp_saved_syn_free(tp);
WARN_ON(inet->inet_num && !icsk->icsk_bind_hash);
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
index 882caa4..aafe681 100644
--- a/net/ipv4/tcp_cong.c
+++ b/net/ipv4/tcp_cong.c
@@ -183,6 +183,7 @@
{
const struct inet_connection_sock *icsk = inet_csk(sk);
+ tcp_sk(sk)->prior_ssthresh = 0;
if (icsk->icsk_ca_ops->init)
icsk->icsk_ca_ops->init(sk);
if (tcp_ca_needs_ecn(sk))
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index e7e227f..0047b15 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1135,13 +1135,14 @@
*/
if (pkt_len > mss) {
unsigned int new_len = (pkt_len / mss) * mss;
- if (!in_sack && new_len < pkt_len) {
+ if (!in_sack && new_len < pkt_len)
new_len += mss;
- if (new_len >= skb->len)
- return 0;
- }
pkt_len = new_len;
}
+
+ if (pkt_len >= skb->len && !in_sack)
+ return 0;
+
err = tcp_fragment(sk, skb, pkt_len, mss, GFP_ATOMIC);
if (err < 0)
return err;
@@ -2165,8 +2166,7 @@
{
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *skb;
- int cnt, oldcnt;
- int err;
+ int cnt, oldcnt, lost;
unsigned int mss;
/* Use SACK to deduce losses of new sequences sent during recovery */
const u32 loss_high = tcp_is_sack(tp) ? tp->snd_nxt : tp->high_seq;
@@ -2206,9 +2206,10 @@
break;
mss = tcp_skb_mss(skb);
- err = tcp_fragment(sk, skb, (packets - oldcnt) * mss,
- mss, GFP_ATOMIC);
- if (err < 0)
+ /* If needed, chop off the prefix to mark as lost. */
+ lost = (packets - oldcnt) * mss;
+ if (lost < skb->len &&
+ tcp_fragment(sk, skb, lost, mss, GFP_ATOMIC) < 0)
break;
cnt = packets;
}
@@ -2503,8 +2504,8 @@
struct tcp_sock *tp = tcp_sk(sk);
/* Reset cwnd to ssthresh in CWR or Recovery (unless it's undone) */
- if (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR ||
- (tp->undo_marker && tp->snd_ssthresh < TCP_INFINITE_SSTHRESH)) {
+ if (tp->snd_ssthresh < TCP_INFINITE_SSTHRESH &&
+ (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR || tp->undo_marker)) {
tp->snd_cwnd = tp->snd_ssthresh;
tp->snd_cwnd_stamp = tcp_time_stamp;
}
@@ -3028,8 +3029,7 @@
/* delta may not be positive if the socket is locked
* when the retrans timer fires and is rescheduled.
*/
- if (delta > 0)
- rto = delta;
+ rto = max(delta, 1);
}
inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, rto,
TCP_RTO_MAX);
@@ -3220,7 +3220,7 @@
int delta;
/* Non-retransmitted hole got filled? That's reordering */
- if (reord < prior_fackets)
+ if (reord < prior_fackets && reord <= tp->fackets_out)
tcp_update_reordering(sk, tp->fackets_out - reord, 0);
delta = tcp_is_fack(tp) ? pkts_acked :
@@ -5437,6 +5437,7 @@
struct inet_connection_sock *icsk = inet_csk(sk);
tcp_set_state(sk, TCP_ESTABLISHED);
+ icsk->icsk_ack.lrcvtime = tcp_time_stamp;
if (skb) {
icsk->icsk_af_ops->sk_rx_dst_set(sk, skb);
@@ -5649,7 +5650,6 @@
* to stand against the temptation 8) --ANK
*/
inet_csk_schedule_ack(sk);
- icsk->icsk_ack.lrcvtime = tcp_time_stamp;
tcp_enter_quickack_mode(sk);
inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
TCP_DELACK_MAX, TCP_RTO_MAX);
diff --git a/net/ipv4/tcp_lp.c b/net/ipv4/tcp_lp.c
index 1e70fa8..3861ded 100644
--- a/net/ipv4/tcp_lp.c
+++ b/net/ipv4/tcp_lp.c
@@ -264,13 +264,15 @@
{
struct tcp_sock *tp = tcp_sk(sk);
struct lp *lp = inet_csk_ca(sk);
+ u32 delta;
if (rtt_us > 0)
tcp_lp_rtt_sample(sk, rtt_us);
/* calc inference */
- if (tcp_time_stamp > tp->rx_opt.rcv_tsecr)
- lp->inference = 3 * (tcp_time_stamp - tp->rx_opt.rcv_tsecr);
+ delta = tcp_time_stamp - tp->rx_opt.rcv_tsecr;
+ if ((s32)delta > 0)
+ lp->inference = 3 * delta;
/* test if within inference */
if (lp->last_drop && (tcp_time_stamp - lp->last_drop < lp->inference))
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 9475a27..4c1c94f 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -472,6 +472,7 @@
newtp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT);
newtp->rtt_min[0].rtt = ~0U;
newicsk->icsk_rto = TCP_TIMEOUT_INIT;
+ newicsk->icsk_ack.lrcvtime = tcp_time_stamp;
newtp->packets_out = 0;
newtp->retrans_out = 0;
@@ -546,6 +547,7 @@
newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len;
newtp->rx_opt.mss_clamp = req->mss;
tcp_ecn_openreq_child(newtp, req);
+ newtp->fastopen_req = NULL;
newtp->fastopen_rsk = NULL;
newtp->syn_data_acked = 0;
newtp->rack.mstamp.v64 = 0;
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 8a62ad0..4e88f93 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -1221,7 +1221,7 @@
* eventually). The difference is that pulled data not copied, but
* immediately discarded.
*/
-static void __pskb_trim_head(struct sk_buff *skb, int len)
+static int __pskb_trim_head(struct sk_buff *skb, int len)
{
struct skb_shared_info *shinfo;
int i, k, eat;
@@ -1231,7 +1231,7 @@
__skb_pull(skb, eat);
len -= eat;
if (!len)
- return;
+ return 0;
}
eat = len;
k = 0;
@@ -1257,23 +1257,28 @@
skb_reset_tail_pointer(skb);
skb->data_len -= len;
skb->len = skb->data_len;
+ return len;
}
/* Remove acked data from a packet in the transmit queue. */
int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
{
+ u32 delta_truesize;
+
if (skb_unclone(skb, GFP_ATOMIC))
return -ENOMEM;
- __pskb_trim_head(skb, len);
+ delta_truesize = __pskb_trim_head(skb, len);
TCP_SKB_CB(skb)->seq += len;
skb->ip_summed = CHECKSUM_PARTIAL;
- skb->truesize -= len;
- sk->sk_wmem_queued -= len;
- sk_mem_uncharge(sk, len);
- sock_set_flag(sk, SOCK_QUEUE_SHRUNK);
+ if (delta_truesize) {
+ skb->truesize -= delta_truesize;
+ sk->sk_wmem_queued -= delta_truesize;
+ sk_mem_uncharge(sk, delta_truesize);
+ sock_set_flag(sk, SOCK_QUEUE_SHRUNK);
+ }
/* Any change of skb->len requires recalculation of tso factor. */
if (tcp_skb_pcount(skb) > 1)
@@ -3251,6 +3256,9 @@
struct sk_buff *buff;
int err;
+ if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk))
+ return -EHOSTUNREACH; /* Routing failure or similar. */
+
tcp_connect_init(sk);
if (unlikely(tp->repair)) {
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 4aef80d..c1a8447 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -640,7 +640,8 @@
goto death;
}
- if (!sock_flag(sk, SOCK_KEEPOPEN) || sk->sk_state == TCP_CLOSE)
+ if (!sock_flag(sk, SOCK_KEEPOPEN) ||
+ ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_SYN_SENT)))
goto out;
elapsed = keepalive_time_when(tp);
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 254fcc7..4d6f09c 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -824,7 +824,7 @@
if (is_udplite) /* UDP-Lite */
csum = udplite_csum(skb);
- else if (sk->sk_no_check_tx) { /* UDP csum disabled */
+ else if (sk->sk_no_check_tx && !skb_is_gso(skb)) { /* UDP csum off */
skb->ip_summed = CHECKSUM_NONE;
goto send;
diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
index 6396f1c..6dfc3da 100644
--- a/net/ipv4/udp_offload.c
+++ b/net/ipv4/udp_offload.c
@@ -231,7 +231,7 @@
if (uh->check == 0)
uh->check = CSUM_MANGLED_0;
- skb->ip_summed = CHECKSUM_NONE;
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
/* Fragment the skb. IP headers of the fragments are updated in
* inet_gso_segment()
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 3d72aef..4b707ad 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -320,9 +320,9 @@
static void addrconf_mod_dad_work(struct inet6_ifaddr *ifp,
unsigned long delay)
{
- if (!delayed_work_pending(&ifp->dad_work))
- in6_ifa_hold(ifp);
- mod_delayed_work(addrconf_wq, &ifp->dad_work, delay);
+ in6_ifa_hold(ifp);
+ if (mod_delayed_work(addrconf_wq, &ifp->dad_work, delay))
+ in6_ifa_put(ifp);
}
static int snmp6_alloc_dev(struct inet6_dev *idev)
@@ -1801,17 +1801,7 @@
static void addrconf_dad_stop(struct inet6_ifaddr *ifp, int dad_failed)
{
- if (ifp->flags&IFA_F_PERMANENT) {
- spin_lock_bh(&ifp->lock);
- addrconf_del_dad_work(ifp);
- ifp->flags |= IFA_F_TENTATIVE;
- if (dad_failed)
- ifp->flags |= IFA_F_DADFAILED;
- spin_unlock_bh(&ifp->lock);
- if (dad_failed)
- ipv6_ifa_notify(0, ifp);
- in6_ifa_put(ifp);
- } else if (ifp->flags&IFA_F_TEMPORARY) {
+ if (ifp->flags&IFA_F_TEMPORARY) {
struct inet6_ifaddr *ifpub;
spin_lock_bh(&ifp->lock);
ifpub = ifp->ifpub;
@@ -1824,6 +1814,16 @@
spin_unlock_bh(&ifp->lock);
}
ipv6_del_addr(ifp);
+ } else if (ifp->flags&IFA_F_PERMANENT || !dad_failed) {
+ spin_lock_bh(&ifp->lock);
+ addrconf_del_dad_work(ifp);
+ ifp->flags |= IFA_F_TENTATIVE;
+ if (dad_failed)
+ ifp->flags |= IFA_F_DADFAILED;
+ spin_unlock_bh(&ifp->lock);
+ if (dad_failed)
+ ipv6_ifa_notify(0, ifp);
+ in6_ifa_put(ifp);
} else {
ipv6_del_addr(ifp);
}
@@ -3212,6 +3212,7 @@
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
struct inet6_dev *idev = __in6_dev_get(dev);
+ struct net *net = dev_net(dev);
int run_pending = 0;
int err;
@@ -3227,7 +3228,7 @@
case NETDEV_CHANGEMTU:
/* if MTU under IPV6_MIN_MTU stop IPv6 on this interface. */
if (dev->mtu < IPV6_MIN_MTU) {
- addrconf_ifdown(dev, 1);
+ addrconf_ifdown(dev, dev != net->loopback_dev);
break;
}
@@ -3280,9 +3281,15 @@
}
if (idev) {
- if (idev->if_flags & IF_READY)
- /* device is already configured. */
+ if (idev->if_flags & IF_READY) {
+ /* device is already configured -
+ * but resend MLD reports, we might
+ * have roamed and need to update
+ * multicast snooping switches
+ */
+ ipv6_mc_up(idev);
break;
+ }
idev->if_flags |= IF_READY;
}
@@ -3334,7 +3341,7 @@
* IPV6_MIN_MTU stop IPv6 on this interface.
*/
if (dev->mtu < IPV6_MIN_MTU)
- addrconf_ifdown(dev, 1);
+ addrconf_ifdown(dev, dev != net->loopback_dev);
}
break;
@@ -3375,6 +3382,7 @@
*/
static struct notifier_block ipv6_dev_notf = {
.notifier_call = addrconf_notify,
+ .priority = ADDRCONF_NOTIFY_PRIORITY,
};
static void addrconf_type_change(struct net_device *dev, unsigned long event)
@@ -6046,6 +6054,8 @@
goto errlo;
}
+ ip6_route_init_special_entries();
+
for (i = 0; i < IN6_ADDR_HSIZE; i++)
INIT_HLIST_HEAD(&inet6_addr_lst[i]);
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index 1da23f1..d7c1ee7 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -76,18 +76,22 @@
}
}
- addr_type = ipv6_addr_type(&usin->sin6_addr);
-
- if (addr_type == IPV6_ADDR_ANY) {
+ if (ipv6_addr_any(&usin->sin6_addr)) {
/*
* connect to self
*/
- usin->sin6_addr.s6_addr[15] = 0x01;
+ if (ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr))
+ ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK),
+ &usin->sin6_addr);
+ else
+ usin->sin6_addr = in6addr_loopback;
}
+ addr_type = ipv6_addr_type(&usin->sin6_addr);
+
daddr = &usin->sin6_addr;
- if (addr_type == IPV6_ADDR_MAPPED) {
+ if (addr_type & IPV6_ADDR_MAPPED) {
struct sockaddr_in sin;
if (__ipv6_only_sock(sk)) {
diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c
index ed33abf..9ac4f0c 100644
--- a/net/ipv6/fib6_rules.c
+++ b/net/ipv6/fib6_rules.c
@@ -32,7 +32,6 @@
struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi6 *fl6,
int flags, pol_lookup_t lookup)
{
- struct rt6_info *rt;
struct fib_lookup_arg arg = {
.lookup_ptr = lookup,
.flags = FIB_LOOKUP_NOREF,
@@ -41,21 +40,11 @@
fib_rules_lookup(net->ipv6.fib6_rules_ops,
flowi6_to_flowi(fl6), flags, &arg);
- rt = arg.result;
+ if (arg.result)
+ return arg.result;
- if (!rt) {
- dst_hold(&net->ipv6.ip6_null_entry->dst);
- return &net->ipv6.ip6_null_entry->dst;
- }
-
- if (rt->rt6i_flags & RTF_REJECT &&
- rt->dst.error == -EAGAIN) {
- ip6_rt_put(rt);
- rt = net->ipv6.ip6_null_entry;
- dst_hold(&rt->dst);
- }
-
- return &rt->dst;
+ dst_hold(&net->ipv6.ip6_null_entry->dst);
+ return &net->ipv6.ip6_null_entry->dst;
}
static int fib6_rule_action(struct fib_rule *rule, struct flowi *flp,
@@ -116,7 +105,8 @@
flp6->saddr = saddr;
}
err = rt->dst.error;
- goto out;
+ if (err != -EAGAIN)
+ goto out;
}
again:
ip6_rt_put(rt);
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 85bf864..aad8cdf 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -290,8 +290,7 @@
struct rt6_info *rt;
rt = lookup(net, net->ipv6.fib6_main_tbl, fl6, flags);
- if (rt->rt6i_flags & RTF_REJECT &&
- rt->dst.error == -EAGAIN) {
+ if (rt->dst.error == -EAGAIN) {
ip6_rt_put(rt);
rt = net->ipv6.ip6_null_entry;
dst_hold(&rt->dst);
@@ -768,10 +767,7 @@
goto next_iter;
}
- if (iter->dst.dev == rt->dst.dev &&
- iter->rt6i_idev == rt->rt6i_idev &&
- ipv6_addr_equal(&iter->rt6i_gateway,
- &rt->rt6i_gateway)) {
+ if (rt6_duplicate_nexthop(iter, rt)) {
if (rt->rt6i_nsiblings)
rt->rt6i_nsiblings = 0;
if (!(iter->rt6i_flags & RTF_EXPIRES))
@@ -896,6 +892,8 @@
}
nsiblings = iter->rt6i_nsiblings;
fib6_purge_rt(iter, fn, info->nl_net);
+ if (fn->rr_ptr == iter)
+ fn->rr_ptr = NULL;
rt6_release(iter);
if (nsiblings) {
@@ -908,6 +906,8 @@
if (rt6_qualify_for_ecmp(iter)) {
*ins = iter->dst.rt6_next;
fib6_purge_rt(iter, fn, info->nl_net);
+ if (fn->rr_ptr == iter)
+ fn->rr_ptr = NULL;
rt6_release(iter);
nsiblings--;
} else {
@@ -996,7 +996,7 @@
/* Create subtree root node */
sfn = node_alloc();
if (!sfn)
- goto st_failure;
+ goto failure;
sfn->leaf = info->nl_net->ipv6.ip6_null_entry;
atomic_inc(&info->nl_net->ipv6.ip6_null_entry->rt6i_ref);
@@ -1012,12 +1012,12 @@
if (IS_ERR(sn)) {
/* If it is failed, discard just allocated
- root, and then (in st_failure) stale node
+ root, and then (in failure) stale node
in main tree.
*/
node_free(sfn);
err = PTR_ERR(sn);
- goto st_failure;
+ goto failure;
}
/* Now link new subtree to main tree */
@@ -1031,7 +1031,7 @@
if (IS_ERR(sn)) {
err = PTR_ERR(sn);
- goto st_failure;
+ goto failure;
}
}
@@ -1073,22 +1073,22 @@
atomic_inc(&pn->leaf->rt6i_ref);
}
#endif
- if (!(rt->dst.flags & DST_NOCACHE))
- dst_free(&rt->dst);
+ goto failure;
}
return err;
-#ifdef CONFIG_IPV6_SUBTREES
- /* Subtree creation failed, probably main tree node
- is orphan. If it is, shoot it.
+failure:
+ /* fn->leaf could be NULL if fn is an intermediate node and we
+ * failed to add the new route to it in both subtree creation
+ * failure and fib6_add_rt2node() failure case.
+ * In both cases, fib6_repair_tree() should be called to fix
+ * fn->leaf.
*/
-st_failure:
if (fn && !(fn->fn_flags & (RTN_RTINFO|RTN_ROOT)))
fib6_repair_tree(info->nl_net, fn);
if (!(rt->dst.flags & DST_NOCACHE))
dst_free(&rt->dst);
return err;
-#endif
}
/*
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
index 3feeca6..c612daad 100644
--- a/net/ipv6/ip6_offload.c
+++ b/net/ipv6/ip6_offload.c
@@ -62,7 +62,6 @@
const struct net_offload *ops;
int proto;
struct frag_hdr *fptr;
- unsigned int unfrag_ip6hlen;
u8 *prevhdr;
int offset = 0;
bool encap, udpfrag;
@@ -121,8 +120,12 @@
skb->network_header = (u8 *)ipv6h - skb->head;
if (udpfrag) {
- unfrag_ip6hlen = ip6_find_1stfragopt(skb, &prevhdr);
- fptr = (struct frag_hdr *)((u8 *)ipv6h + unfrag_ip6hlen);
+ int err = ip6_find_1stfragopt(skb, &prevhdr);
+ if (err < 0) {
+ kfree_skb_list(segs);
+ return ERR_PTR(err);
+ }
+ fptr = (struct frag_hdr *)((u8 *)ipv6h + err);
fptr->frag_off = htons(offset);
if (skb->next)
fptr->frag_off |= htons(IP6_MF);
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index cf90a9b..635742a 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -571,7 +571,10 @@
int ptr, offset = 0, err = 0;
u8 *prevhdr, nexthdr = 0;
- hlen = ip6_find_1stfragopt(skb, &prevhdr);
+ err = ip6_find_1stfragopt(skb, &prevhdr);
+ if (err < 0)
+ goto fail;
+ hlen = err;
nexthdr = *prevhdr;
mtu = ip6_skb_dst_mtu(skb);
@@ -644,8 +647,6 @@
*prevhdr = NEXTHDR_FRAGMENT;
tmp_hdr = kmemdup(skb_network_header(skb), hlen, GFP_ATOMIC);
if (!tmp_hdr) {
- IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
- IPSTATS_MIB_FRAGFAILS);
err = -ENOMEM;
goto fail;
}
@@ -764,8 +765,6 @@
frag = alloc_skb(len + hlen + sizeof(struct frag_hdr) +
hroom + troom, GFP_ATOMIC);
if (!frag) {
- IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
- IPSTATS_MIB_FRAGFAILS);
err = -ENOMEM;
goto fail;
}
@@ -1000,6 +999,11 @@
}
}
#endif
+ if (ipv6_addr_v4mapped(&fl6->saddr) &&
+ !(ipv6_addr_v4mapped(&fl6->daddr) || ipv6_addr_any(&fl6->daddr))) {
+ err = -EAFNOSUPPORT;
+ goto out_err_release;
+ }
return 0;
@@ -1352,11 +1356,12 @@
*/
cork->length += length;
- if (((length > mtu) ||
- (skb && skb_is_gso(skb))) &&
+ if ((skb && skb_is_gso(skb)) ||
+ (((length + (skb ? skb->len : headersize)) > mtu) &&
+ (skb_queue_len(queue) <= 1) &&
(sk->sk_protocol == IPPROTO_UDP) &&
(rt->dst.dev->features & NETIF_F_UFO) &&
- (sk->sk_type == SOCK_DGRAM) && !udp_get_no_check6_tx(sk)) {
+ (sk->sk_type == SOCK_DGRAM) && !udp_get_no_check6_tx(sk))) {
err = ip6_ufo_append_data(sk, queue, getfrag, from, length,
hh_len, fragheaderlen, exthdrlen,
transhdrlen, mtu, flags, fl6);
@@ -1428,6 +1433,11 @@
*/
alloclen += sizeof(struct frag_hdr);
+ copy = datalen - transhdrlen - fraggap;
+ if (copy < 0) {
+ err = -EINVAL;
+ goto error;
+ }
if (transhdrlen) {
skb = sock_alloc_send_skb(sk,
alloclen + hh_len,
@@ -1477,13 +1487,9 @@
data += fraggap;
pskb_trim_unique(skb_prev, maxfraglen);
}
- copy = datalen - transhdrlen - fraggap;
-
- if (copy < 0) {
- err = -EINVAL;
- kfree_skb(skb);
- goto error;
- } else if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
+ if (copy > 0 &&
+ getfrag(from, data + transhdrlen, offset,
+ copy, fraggap, skb) < 0) {
err = -EFAULT;
kfree_skb(skb);
goto error;
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 8b11a49..600975c 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -1049,7 +1049,7 @@
struct ip6_tnl *t = netdev_priv(dev);
struct net *net = t->net;
struct net_device_stats *stats = &t->dev->stats;
- struct ipv6hdr *ipv6h = ipv6_hdr(skb);
+ struct ipv6hdr *ipv6h;
struct ipv6_tel_txoption opt;
struct dst_entry *dst = NULL, *ndst = NULL;
struct net_device *tdev;
@@ -1061,26 +1061,28 @@
/* NBMA tunnel */
if (ipv6_addr_any(&t->parms.raddr)) {
- struct in6_addr *addr6;
- struct neighbour *neigh;
- int addr_type;
+ if (skb->protocol == htons(ETH_P_IPV6)) {
+ struct in6_addr *addr6;
+ struct neighbour *neigh;
+ int addr_type;
- if (!skb_dst(skb))
- goto tx_err_link_failure;
+ if (!skb_dst(skb))
+ goto tx_err_link_failure;
- neigh = dst_neigh_lookup(skb_dst(skb),
- &ipv6_hdr(skb)->daddr);
- if (!neigh)
- goto tx_err_link_failure;
+ neigh = dst_neigh_lookup(skb_dst(skb),
+ &ipv6_hdr(skb)->daddr);
+ if (!neigh)
+ goto tx_err_link_failure;
- addr6 = (struct in6_addr *)&neigh->primary_key;
- addr_type = ipv6_addr_type(addr6);
+ addr6 = (struct in6_addr *)&neigh->primary_key;
+ addr_type = ipv6_addr_type(addr6);
- if (addr_type == IPV6_ADDR_ANY)
- addr6 = &ipv6_hdr(skb)->daddr;
+ if (addr_type == IPV6_ADDR_ANY)
+ addr6 = &ipv6_hdr(skb)->daddr;
- memcpy(&fl6->daddr, addr6, sizeof(fl6->daddr));
- neigh_release(neigh);
+ memcpy(&fl6->daddr, addr6, sizeof(fl6->daddr));
+ neigh_release(neigh);
+ }
} else if (!(t->parms.flags &
(IP6_TNL_F_USE_ORIG_TCLASS | IP6_TNL_F_USE_ORIG_FWMARK))) {
/* enable the cache only only if the routing decision does
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index d9843e5..8361d73 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -774,7 +774,8 @@
* Delete a VIF entry
*/
-static int mif6_delete(struct mr6_table *mrt, int vifi, struct list_head *head)
+static int mif6_delete(struct mr6_table *mrt, int vifi, int notify,
+ struct list_head *head)
{
struct mif_device *v;
struct net_device *dev;
@@ -820,7 +821,7 @@
dev->ifindex, &in6_dev->cnf);
}
- if (v->flags & MIFF_REGISTER)
+ if ((v->flags & MIFF_REGISTER) && !notify)
unregister_netdevice_queue(dev, head);
dev_put(dev);
@@ -1330,7 +1331,6 @@
struct mr6_table *mrt;
struct mif_device *v;
int ct;
- LIST_HEAD(list);
if (event != NETDEV_UNREGISTER)
return NOTIFY_DONE;
@@ -1339,10 +1339,9 @@
v = &mrt->vif6_table[0];
for (ct = 0; ct < mrt->maxvif; ct++, v++) {
if (v->dev == dev)
- mif6_delete(mrt, ct, &list);
+ mif6_delete(mrt, ct, 1, NULL);
}
}
- unregister_netdevice_many(&list);
return NOTIFY_DONE;
}
@@ -1551,7 +1550,7 @@
for (i = 0; i < mrt->maxvif; i++) {
if (!all && (mrt->vif6_table[i].flags & VIFF_STATIC))
continue;
- mif6_delete(mrt, i, &list);
+ mif6_delete(mrt, i, 0, &list);
}
unregister_netdevice_many(&list);
@@ -1704,7 +1703,7 @@
if (copy_from_user(&mifi, optval, sizeof(mifi_t)))
return -EFAULT;
rtnl_lock();
- ret = mif6_delete(mrt, mifi, NULL);
+ ret = mif6_delete(mrt, mifi, 0, NULL);
rtnl_unlock();
return ret;
diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c
index 1d18432..f9f0258 100644
--- a/net/ipv6/output_core.c
+++ b/net/ipv6/output_core.c
@@ -78,15 +78,15 @@
int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
{
- u16 offset = sizeof(struct ipv6hdr);
- struct ipv6_opt_hdr *exthdr =
- (struct ipv6_opt_hdr *)(ipv6_hdr(skb) + 1);
+ unsigned int offset = sizeof(struct ipv6hdr);
unsigned int packet_len = skb_tail_pointer(skb) -
skb_network_header(skb);
int found_rhdr = 0;
*nexthdr = &ipv6_hdr(skb)->nexthdr;
- while (offset + 1 <= packet_len) {
+ while (offset <= packet_len) {
+ struct ipv6_opt_hdr *exthdr;
+ unsigned int len;
switch (**nexthdr) {
@@ -107,13 +107,19 @@
return offset;
}
- offset += ipv6_optlen(exthdr);
- *nexthdr = &exthdr->nexthdr;
+ if (offset + sizeof(struct ipv6_opt_hdr) > packet_len)
+ return -EINVAL;
+
exthdr = (struct ipv6_opt_hdr *)(skb_network_header(skb) +
offset);
+ len = ipv6_optlen(exthdr);
+ if (len + offset >= IPV6_MAXPLEN)
+ return -EINVAL;
+ offset += len;
+ *nexthdr = &exthdr->nexthdr;
}
- return offset;
+ return -EINVAL;
}
EXPORT_SYMBOL(ip6_find_1stfragopt);
diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
index 1737fc0..4c753f4 100644
--- a/net/ipv6/ping.c
+++ b/net/ipv6/ping.c
@@ -50,7 +50,7 @@
.type = SOCK_DGRAM,
.protocol = IPPROTO_ICMPV6,
.prot = &pingv6_prot,
- .ops = &inet6_dgram_ops,
+ .ops = &inet6_sockraw_ops,
.flags = INET_PROTOSW_REUSE,
};
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index d503b7f..0ef8e11 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -630,6 +630,8 @@
ipv6_local_error(sk, EMSGSIZE, fl6, rt->dst.dev->mtu);
return -EMSGSIZE;
}
+ if (length < sizeof(struct ipv6hdr))
+ return -EINVAL;
if (flags&MSG_PROBE)
goto out;
@@ -1145,8 +1147,7 @@
spin_lock_bh(&sk->sk_receive_queue.lock);
skb = skb_peek(&sk->sk_receive_queue);
if (skb)
- amount = skb_tail_pointer(skb) -
- skb_transport_header(skb);
+ amount = skb->len;
spin_unlock_bh(&sk->sk_receive_queue.lock);
return put_user(amount, (int __user *)arg);
}
@@ -1303,7 +1304,7 @@
#endif /* CONFIG_PROC_FS */
/* Same as inet6_dgram_ops, sans udp_poll. */
-static const struct proto_ops inet6_sockraw_ops = {
+const struct proto_ops inet6_sockraw_ops = {
.family = PF_INET6,
.owner = THIS_MODULE,
.release = inet6_release,
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index add152e8..dd37fe0 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -1756,6 +1756,10 @@
int addr_type;
int err = -EINVAL;
+ /* RTF_PCPU is an internal flag; can not be set by userspace */
+ if (cfg->fc_flags & RTF_PCPU)
+ goto out;
+
if (cfg->fc_dst_len > 128 || cfg->fc_src_len > 128)
goto out;
#ifndef CONFIG_IPV6_SUBTREES
@@ -2082,6 +2086,8 @@
continue;
if (cfg->fc_metric && cfg->fc_metric != rt->rt6i_metric)
continue;
+ if (cfg->fc_protocol && cfg->fc_protocol != rt->rt6i_protocol)
+ continue;
dst_hold(&rt->dst);
read_unlock_bh(&table->tb6_lock);
@@ -2816,17 +2822,11 @@
struct rt6_info *rt, struct fib6_config *r_cfg)
{
struct rt6_nh *nh;
- struct rt6_info *rtnh;
int err = -EEXIST;
list_for_each_entry(nh, rt6_nh_list, next) {
/* check if rt6_info already exists */
- rtnh = nh->rt6_info;
-
- if (rtnh->dst.dev == rt->dst.dev &&
- rtnh->rt6i_idev == rt->rt6i_idev &&
- ipv6_addr_equal(&rtnh->rt6i_gateway,
- &rt->rt6i_gateway))
+ if (rt6_duplicate_nexthop(nh->rt6_info, rt))
return err;
}
@@ -3356,7 +3356,10 @@
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
struct net *net = dev_net(dev);
- if (event == NETDEV_REGISTER && (dev->flags & IFF_LOOPBACK)) {
+ if (!(dev->flags & IFF_LOOPBACK))
+ return NOTIFY_OK;
+
+ if (event == NETDEV_REGISTER) {
net->ipv6.ip6_null_entry->dst.dev = dev;
net->ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(dev);
#ifdef CONFIG_IPV6_MULTIPLE_TABLES
@@ -3365,6 +3368,12 @@
net->ipv6.ip6_blk_hole_entry->dst.dev = dev;
net->ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(dev);
#endif
+ } else if (event == NETDEV_UNREGISTER) {
+ in6_dev_put(net->ipv6.ip6_null_entry->rt6i_idev);
+#ifdef CONFIG_IPV6_MULTIPLE_TABLES
+ in6_dev_put(net->ipv6.ip6_prohibit_entry->rt6i_idev);
+ in6_dev_put(net->ipv6.ip6_blk_hole_entry->rt6i_idev);
+#endif
}
return NOTIFY_OK;
@@ -3671,9 +3680,24 @@
static struct notifier_block ip6_route_dev_notifier = {
.notifier_call = ip6_route_dev_notify,
- .priority = 0,
+ .priority = ADDRCONF_NOTIFY_PRIORITY - 10,
};
+void __init ip6_route_init_special_entries(void)
+{
+ /* Registering of the loopback is done before this portion of code,
+ * the loopback reference in rt6_info will not be taken, do it
+ * manually for init_net */
+ init_net.ipv6.ip6_null_entry->dst.dev = init_net.loopback_dev;
+ init_net.ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
+ #ifdef CONFIG_IPV6_MULTIPLE_TABLES
+ init_net.ipv6.ip6_prohibit_entry->dst.dev = init_net.loopback_dev;
+ init_net.ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
+ init_net.ipv6.ip6_blk_hole_entry->dst.dev = init_net.loopback_dev;
+ init_net.ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
+ #endif
+}
+
int __init ip6_route_init(void)
{
int ret;
@@ -3700,17 +3724,6 @@
ip6_dst_blackhole_ops.kmem_cachep = ip6_dst_ops_template.kmem_cachep;
- /* Registering of the loopback is done before this portion of code,
- * the loopback reference in rt6_info will not be taken, do it
- * manually for init_net */
- init_net.ipv6.ip6_null_entry->dst.dev = init_net.loopback_dev;
- init_net.ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
- #ifdef CONFIG_IPV6_MULTIPLE_TABLES
- init_net.ipv6.ip6_prohibit_entry->dst.dev = init_net.loopback_dev;
- init_net.ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
- init_net.ipv6.ip6_blk_hole_entry->dst.dev = init_net.loopback_dev;
- init_net.ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
- #endif
ret = fib6_init();
if (ret)
goto out_register_subsys;
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
index 336843c..7f36676 100644
--- a/net/ipv6/syncookies.c
+++ b/net/ipv6/syncookies.c
@@ -210,6 +210,7 @@
treq->snt_synack.v64 = 0;
treq->rcv_isn = ntohl(th->seq) - 1;
treq->snt_isn = cookie;
+ treq->txhash = net_tx_rndhash();
/*
* We need to lookup the dst_entry to get the correct window size.
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 8532768..bf9ba15 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -149,8 +149,13 @@
* connect() to INADDR_ANY means loopback (BSD'ism).
*/
- if (ipv6_addr_any(&usin->sin6_addr))
- usin->sin6_addr.s6_addr[15] = 0x1;
+ if (ipv6_addr_any(&usin->sin6_addr)) {
+ if (ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr))
+ ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK),
+ &usin->sin6_addr);
+ else
+ usin->sin6_addr = in6addr_loopback;
+ }
addr_type = ipv6_addr_type(&usin->sin6_addr);
@@ -189,7 +194,7 @@
* TCP over IPv4
*/
- if (addr_type == IPV6_ADDR_MAPPED) {
+ if (addr_type & IPV6_ADDR_MAPPED) {
u32 exthdrlen = icsk->icsk_ext_hdr_len;
struct sockaddr_in sin;
@@ -1035,6 +1040,7 @@
newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
#endif
+ newnp->ipv6_mc_list = NULL;
newnp->ipv6_ac_list = NULL;
newnp->ipv6_fl_list = NULL;
newnp->pktoptions = NULL;
@@ -1104,6 +1110,7 @@
First: no IPv4 options.
*/
newinet->inet_opt = NULL;
+ newnp->ipv6_mc_list = NULL;
newnp->ipv6_ac_list = NULL;
newnp->ipv6_fl_list = NULL;
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 48f5145..9f27cab 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -1201,6 +1201,10 @@
if (addr_len < SIN6_LEN_RFC2133)
return -EINVAL;
daddr = &sin6->sin6_addr;
+ if (ipv6_addr_any(daddr) &&
+ ipv6_addr_v4mapped(&np->saddr))
+ ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK),
+ daddr);
break;
case AF_INET:
goto do_udp_sendmsg;
diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c
index 7441e1e..2e3c12e 100644
--- a/net/ipv6/udp_offload.c
+++ b/net/ipv6/udp_offload.c
@@ -29,6 +29,7 @@
u8 frag_hdr_sz = sizeof(struct frag_hdr);
__wsum csum;
int tnl_hlen;
+ int err;
mss = skb_shinfo(skb)->gso_size;
if (unlikely(skb->len <= mss))
@@ -85,7 +86,7 @@
if (uh->check == 0)
uh->check = CSUM_MANGLED_0;
- skb->ip_summed = CHECKSUM_NONE;
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
/* Check if there is enough headroom to insert fragment header. */
tnl_hlen = skb_tnl_header_len(skb);
@@ -97,7 +98,10 @@
/* Find the unfragmentable header and shift it left by frag_hdr_sz
* bytes to insert fragment header.
*/
- unfrag_ip6hlen = ip6_find_1stfragopt(skb, &prevhdr);
+ err = ip6_find_1stfragopt(skb, &prevhdr);
+ if (err < 0)
+ return ERR_PTR(err);
+ unfrag_ip6hlen = err;
nexthdr = *prevhdr;
*prevhdr = NEXTHDR_FRAGMENT;
unfrag_len = (skb_network_header(skb) - skb_mac_header(skb)) +
diff --git a/net/ipv6/xfrm6_mode_ro.c b/net/ipv6/xfrm6_mode_ro.c
index 0e01590..07d3657 100644
--- a/net/ipv6/xfrm6_mode_ro.c
+++ b/net/ipv6/xfrm6_mode_ro.c
@@ -47,6 +47,8 @@
iph = ipv6_hdr(skb);
hdr_len = x->type->hdr_offset(x, skb, &prevhdr);
+ if (hdr_len < 0)
+ return hdr_len;
skb_set_mac_header(skb, (prevhdr - x->props.header_len) - skb->data);
skb_set_network_header(skb, -x->props.header_len);
skb->transport_header = skb->network_header + hdr_len;
diff --git a/net/ipv6/xfrm6_mode_transport.c b/net/ipv6/xfrm6_mode_transport.c
index 4e34410..1d3bbe6 100644
--- a/net/ipv6/xfrm6_mode_transport.c
+++ b/net/ipv6/xfrm6_mode_transport.c
@@ -28,6 +28,8 @@
iph = ipv6_hdr(skb);
hdr_len = x->type->hdr_offset(x, skb, &prevhdr);
+ if (hdr_len < 0)
+ return hdr_len;
skb_set_mac_header(skb, (prevhdr - x->props.header_len) - skb->data);
skb_set_network_header(skb, -x->props.header_len);
skb->transport_header = skb->network_header + hdr_len;
diff --git a/net/ipx/af_ipx.c b/net/ipx/af_ipx.c
index 48d0dc89b..e735f78 100644
--- a/net/ipx/af_ipx.c
+++ b/net/ipx/af_ipx.c
@@ -1168,11 +1168,10 @@
sipx->sipx_network = ipxif->if_netnum;
memcpy(sipx->sipx_node, ipxif->if_node,
sizeof(sipx->sipx_node));
- rc = -EFAULT;
- if (copy_to_user(arg, &ifr, sizeof(ifr)))
- break;
- ipxitf_put(ipxif);
rc = 0;
+ if (copy_to_user(arg, &ifr, sizeof(ifr)))
+ rc = -EFAULT;
+ ipxitf_put(ipxif);
break;
}
case SIOCAIPXITFCRT:
diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
index 8d2f7c9b..4a116d7 100644
--- a/net/irda/af_irda.c
+++ b/net/irda/af_irda.c
@@ -2227,7 +2227,7 @@
{
struct sock *sk = sock->sk;
struct irda_sock *self = irda_sk(sk);
- struct irda_device_list list;
+ struct irda_device_list list = { 0 };
struct irda_device_info *discoveries;
struct irda_ias_set * ias_opt; /* IAS get/query params */
struct ias_object * ias_obj; /* Object in IAS */
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 62adcde..94bf810 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -63,6 +63,7 @@
} u;
struct sk_buff *skb;
} dump;
+ struct mutex dump_lock;
};
static int parse_sockaddr_pair(struct sockaddr *sa, int ext_len,
@@ -143,6 +144,7 @@
{
struct netns_pfkey *net_pfkey = net_generic(net, pfkey_net_id);
struct sock *sk;
+ struct pfkey_sock *pfk;
int err;
if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
@@ -157,6 +159,9 @@
if (sk == NULL)
goto out;
+ pfk = pfkey_sk(sk);
+ mutex_init(&pfk->dump_lock);
+
sock->ops = &pfkey_ops;
sock_init_data(sock, sk);
@@ -223,7 +228,7 @@
#define BROADCAST_ONE 1
#define BROADCAST_REGISTERED 2
#define BROADCAST_PROMISC_ONLY 4
-static int pfkey_broadcast(struct sk_buff *skb,
+static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation,
int broadcast_flags, struct sock *one_sk,
struct net *net)
{
@@ -273,7 +278,7 @@
rcu_read_unlock();
if (one_sk != NULL)
- err = pfkey_broadcast_one(skb, &skb2, GFP_KERNEL, one_sk);
+ err = pfkey_broadcast_one(skb, &skb2, allocation, one_sk);
kfree_skb(skb2);
kfree_skb(skb);
@@ -285,23 +290,36 @@
struct sadb_msg *hdr;
int rc;
+ mutex_lock(&pfk->dump_lock);
+ if (!pfk->dump.dump) {
+ rc = 0;
+ goto out;
+ }
+
rc = pfk->dump.dump(pfk);
- if (rc == -ENOBUFS)
- return 0;
+ if (rc == -ENOBUFS) {
+ rc = 0;
+ goto out;
+ }
if (pfk->dump.skb) {
- if (!pfkey_can_dump(&pfk->sk))
- return 0;
+ if (!pfkey_can_dump(&pfk->sk)) {
+ rc = 0;
+ goto out;
+ }
hdr = (struct sadb_msg *) pfk->dump.skb->data;
hdr->sadb_msg_seq = 0;
hdr->sadb_msg_errno = rc;
- pfkey_broadcast(pfk->dump.skb, BROADCAST_ONE,
+ pfkey_broadcast(pfk->dump.skb, GFP_ATOMIC, BROADCAST_ONE,
&pfk->sk, sock_net(&pfk->sk));
pfk->dump.skb = NULL;
}
pfkey_terminate_dump(pfk);
+
+out:
+ mutex_unlock(&pfk->dump_lock);
return rc;
}
@@ -337,7 +355,7 @@
hdr->sadb_msg_len = (sizeof(struct sadb_msg) /
sizeof(uint64_t));
- pfkey_broadcast(skb, BROADCAST_ONE, sk, sock_net(sk));
+ pfkey_broadcast(skb, GFP_KERNEL, BROADCAST_ONE, sk, sock_net(sk));
return 0;
}
@@ -1139,6 +1157,7 @@
goto out;
}
+ err = -ENOBUFS;
key = ext_hdrs[SADB_EXT_KEY_AUTH - 1];
if (sa->sadb_sa_auth) {
int keysize = 0;
@@ -1150,8 +1169,10 @@
if (key)
keysize = (key->sadb_key_bits + 7) / 8;
x->aalg = kmalloc(sizeof(*x->aalg) + keysize, GFP_KERNEL);
- if (!x->aalg)
+ if (!x->aalg) {
+ err = -ENOMEM;
goto out;
+ }
strcpy(x->aalg->alg_name, a->name);
x->aalg->alg_key_len = 0;
if (key) {
@@ -1170,8 +1191,10 @@
goto out;
}
x->calg = kmalloc(sizeof(*x->calg), GFP_KERNEL);
- if (!x->calg)
+ if (!x->calg) {
+ err = -ENOMEM;
goto out;
+ }
strcpy(x->calg->alg_name, a->name);
x->props.calgo = sa->sadb_sa_encrypt;
} else {
@@ -1185,8 +1208,10 @@
if (key)
keysize = (key->sadb_key_bits + 7) / 8;
x->ealg = kmalloc(sizeof(*x->ealg) + keysize, GFP_KERNEL);
- if (!x->ealg)
+ if (!x->ealg) {
+ err = -ENOMEM;
goto out;
+ }
strcpy(x->ealg->alg_name, a->name);
x->ealg->alg_key_len = 0;
if (key) {
@@ -1231,8 +1256,10 @@
struct xfrm_encap_tmpl *natt;
x->encap = kmalloc(sizeof(*x->encap), GFP_KERNEL);
- if (!x->encap)
+ if (!x->encap) {
+ err = -ENOMEM;
goto out;
+ }
natt = x->encap;
n_type = ext_hdrs[SADB_X_EXT_NAT_T_TYPE-1];
@@ -1369,7 +1396,7 @@
xfrm_state_put(x);
- pfkey_broadcast(resp_skb, BROADCAST_ONE, sk, net);
+ pfkey_broadcast(resp_skb, GFP_KERNEL, BROADCAST_ONE, sk, net);
return 0;
}
@@ -1456,7 +1483,7 @@
hdr->sadb_msg_seq = c->seq;
hdr->sadb_msg_pid = c->portid;
- pfkey_broadcast(skb, BROADCAST_ALL, NULL, xs_net(x));
+ pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ALL, NULL, xs_net(x));
return 0;
}
@@ -1569,7 +1596,7 @@
out_hdr->sadb_msg_reserved = 0;
out_hdr->sadb_msg_seq = hdr->sadb_msg_seq;
out_hdr->sadb_msg_pid = hdr->sadb_msg_pid;
- pfkey_broadcast(out_skb, BROADCAST_ONE, sk, sock_net(sk));
+ pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_ONE, sk, sock_net(sk));
return 0;
}
@@ -1674,8 +1701,8 @@
return -ENOBUFS;
}
- pfkey_broadcast(supp_skb, BROADCAST_REGISTERED, sk, sock_net(sk));
-
+ pfkey_broadcast(supp_skb, GFP_KERNEL, BROADCAST_REGISTERED, sk,
+ sock_net(sk));
return 0;
}
@@ -1693,7 +1720,8 @@
hdr->sadb_msg_errno = (uint8_t) 0;
hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t));
- return pfkey_broadcast(skb, BROADCAST_ONE, sk, sock_net(sk));
+ return pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ONE, sk,
+ sock_net(sk));
}
static int key_notify_sa_flush(const struct km_event *c)
@@ -1714,7 +1742,7 @@
hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t));
hdr->sadb_msg_reserved = 0;
- pfkey_broadcast(skb, BROADCAST_ALL, NULL, c->net);
+ pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ALL, NULL, c->net);
return 0;
}
@@ -1771,7 +1799,7 @@
out_hdr->sadb_msg_pid = pfk->dump.msg_portid;
if (pfk->dump.skb)
- pfkey_broadcast(pfk->dump.skb, BROADCAST_ONE,
+ pfkey_broadcast(pfk->dump.skb, GFP_ATOMIC, BROADCAST_ONE,
&pfk->sk, sock_net(&pfk->sk));
pfk->dump.skb = out_skb;
@@ -1797,19 +1825,26 @@
struct xfrm_address_filter *filter = NULL;
struct pfkey_sock *pfk = pfkey_sk(sk);
- if (pfk->dump.dump != NULL)
+ mutex_lock(&pfk->dump_lock);
+ if (pfk->dump.dump != NULL) {
+ mutex_unlock(&pfk->dump_lock);
return -EBUSY;
+ }
proto = pfkey_satype2proto(hdr->sadb_msg_satype);
- if (proto == 0)
+ if (proto == 0) {
+ mutex_unlock(&pfk->dump_lock);
return -EINVAL;
+ }
if (ext_hdrs[SADB_X_EXT_FILTER - 1]) {
struct sadb_x_filter *xfilter = ext_hdrs[SADB_X_EXT_FILTER - 1];
filter = kmalloc(sizeof(*filter), GFP_KERNEL);
- if (filter == NULL)
+ if (filter == NULL) {
+ mutex_unlock(&pfk->dump_lock);
return -ENOMEM;
+ }
memcpy(&filter->saddr, &xfilter->sadb_x_filter_saddr,
sizeof(xfrm_address_t));
@@ -1825,6 +1860,7 @@
pfk->dump.dump = pfkey_dump_sa;
pfk->dump.done = pfkey_dump_sa_done;
xfrm_state_walk_init(&pfk->dump.u.state, proto, filter);
+ mutex_unlock(&pfk->dump_lock);
return pfkey_do_dump(pfk);
}
@@ -1851,7 +1887,7 @@
new_hdr->sadb_msg_errno = 0;
}
- pfkey_broadcast(skb, BROADCAST_ALL, NULL, sock_net(sk));
+ pfkey_broadcast(skb, GFP_KERNEL, BROADCAST_ALL, NULL, sock_net(sk));
return 0;
}
@@ -2184,7 +2220,7 @@
out_hdr->sadb_msg_errno = 0;
out_hdr->sadb_msg_seq = c->seq;
out_hdr->sadb_msg_pid = c->portid;
- pfkey_broadcast(out_skb, BROADCAST_ALL, NULL, xp_net(xp));
+ pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_ALL, NULL, xp_net(xp));
return 0;
}
@@ -2404,7 +2440,7 @@
out_hdr->sadb_msg_errno = 0;
out_hdr->sadb_msg_seq = hdr->sadb_msg_seq;
out_hdr->sadb_msg_pid = hdr->sadb_msg_pid;
- pfkey_broadcast(out_skb, BROADCAST_ONE, sk, xp_net(xp));
+ pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_ONE, sk, xp_net(xp));
err = 0;
out:
@@ -2660,7 +2696,7 @@
out_hdr->sadb_msg_pid = pfk->dump.msg_portid;
if (pfk->dump.skb)
- pfkey_broadcast(pfk->dump.skb, BROADCAST_ONE,
+ pfkey_broadcast(pfk->dump.skb, GFP_ATOMIC, BROADCAST_ONE,
&pfk->sk, sock_net(&pfk->sk));
pfk->dump.skb = out_skb;
@@ -2684,14 +2720,18 @@
{
struct pfkey_sock *pfk = pfkey_sk(sk);
- if (pfk->dump.dump != NULL)
+ mutex_lock(&pfk->dump_lock);
+ if (pfk->dump.dump != NULL) {
+ mutex_unlock(&pfk->dump_lock);
return -EBUSY;
+ }
pfk->dump.msg_version = hdr->sadb_msg_version;
pfk->dump.msg_portid = hdr->sadb_msg_pid;
pfk->dump.dump = pfkey_dump_sp;
pfk->dump.done = pfkey_dump_sp_done;
xfrm_policy_walk_init(&pfk->dump.u.policy, XFRM_POLICY_TYPE_MAIN);
+ mutex_unlock(&pfk->dump_lock);
return pfkey_do_dump(pfk);
}
@@ -2713,7 +2753,7 @@
hdr->sadb_msg_satype = SADB_SATYPE_UNSPEC;
hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t));
hdr->sadb_msg_reserved = 0;
- pfkey_broadcast(skb_out, BROADCAST_ALL, NULL, c->net);
+ pfkey_broadcast(skb_out, GFP_ATOMIC, BROADCAST_ALL, NULL, c->net);
return 0;
}
@@ -2775,7 +2815,7 @@
void *ext_hdrs[SADB_EXT_MAX];
int err;
- pfkey_broadcast(skb_clone(skb, GFP_KERNEL),
+ pfkey_broadcast(skb_clone(skb, GFP_KERNEL), GFP_KERNEL,
BROADCAST_PROMISC_ONLY, NULL, sock_net(sk));
memset(ext_hdrs, 0, sizeof(ext_hdrs));
@@ -2997,7 +3037,8 @@
out_hdr->sadb_msg_seq = 0;
out_hdr->sadb_msg_pid = 0;
- pfkey_broadcast(out_skb, BROADCAST_REGISTERED, NULL, xs_net(x));
+ pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_REGISTERED, NULL,
+ xs_net(x));
return 0;
}
@@ -3187,7 +3228,8 @@
xfrm_ctx->ctx_len);
}
- return pfkey_broadcast(skb, BROADCAST_REGISTERED, NULL, xs_net(x));
+ return pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_REGISTERED, NULL,
+ xs_net(x));
}
static struct xfrm_policy *pfkey_compile_policy(struct sock *sk, int opt,
@@ -3385,7 +3427,8 @@
n_port->sadb_x_nat_t_port_port = sport;
n_port->sadb_x_nat_t_port_reserved = 0;
- return pfkey_broadcast(skb, BROADCAST_REGISTERED, NULL, xs_net(x));
+ return pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_REGISTERED, NULL,
+ xs_net(x));
}
#ifdef CONFIG_NET_KEY_MIGRATE
@@ -3577,7 +3620,7 @@
}
/* broadcast migrate message to sockets */
- pfkey_broadcast(skb, BROADCAST_ALL, NULL, &init_net);
+ pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ALL, NULL, &init_net);
return 0;
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index ec17cbe..d3dec41 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -278,7 +278,8 @@
}
EXPORT_SYMBOL_GPL(l2tp_session_find);
-struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth)
+struct l2tp_session *l2tp_session_get_nth(struct l2tp_tunnel *tunnel, int nth,
+ bool do_ref)
{
int hash;
struct l2tp_session *session;
@@ -288,6 +289,9 @@
for (hash = 0; hash < L2TP_HASH_SIZE; hash++) {
hlist_for_each_entry(session, &tunnel->session_hlist[hash], hlist) {
if (++count > nth) {
+ l2tp_session_inc_refcount(session);
+ if (do_ref && session->ref)
+ session->ref(session);
read_unlock_bh(&tunnel->hlist_lock);
return session;
}
@@ -298,7 +302,7 @@
return NULL;
}
-EXPORT_SYMBOL_GPL(l2tp_session_find_nth);
+EXPORT_SYMBOL_GPL(l2tp_session_get_nth);
/* Lookup a session by interface name.
* This is very inefficient but is only used by management interfaces.
diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
index 763e8e2..555d962 100644
--- a/net/l2tp/l2tp_core.h
+++ b/net/l2tp/l2tp_core.h
@@ -243,7 +243,8 @@
struct l2tp_session *l2tp_session_find(struct net *net,
struct l2tp_tunnel *tunnel,
u32 session_id);
-struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth);
+struct l2tp_session *l2tp_session_get_nth(struct l2tp_tunnel *tunnel, int nth,
+ bool do_ref);
struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname);
struct l2tp_tunnel *l2tp_tunnel_find(struct net *net, u32 tunnel_id);
struct l2tp_tunnel *l2tp_tunnel_find_nth(struct net *net, int nth);
diff --git a/net/l2tp/l2tp_debugfs.c b/net/l2tp/l2tp_debugfs.c
index 2d6760a..d100aed 100644
--- a/net/l2tp/l2tp_debugfs.c
+++ b/net/l2tp/l2tp_debugfs.c
@@ -53,7 +53,7 @@
static void l2tp_dfs_next_session(struct l2tp_dfs_seq_data *pd)
{
- pd->session = l2tp_session_find_nth(pd->tunnel, pd->session_idx);
+ pd->session = l2tp_session_get_nth(pd->tunnel, pd->session_idx, true);
pd->session_idx++;
if (pd->session == NULL) {
@@ -238,10 +238,14 @@
}
/* Show the tunnel or session context */
- if (pd->session == NULL)
+ if (!pd->session) {
l2tp_dfs_seq_tunnel_show(m, pd->tunnel);
- else
+ } else {
l2tp_dfs_seq_session_show(m, pd->session);
+ if (pd->session->deref)
+ pd->session->deref(pd->session);
+ l2tp_session_dec_refcount(pd->session);
+ }
out:
return 0;
diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c
index 2caaa84..665cc74 100644
--- a/net/l2tp/l2tp_netlink.c
+++ b/net/l2tp/l2tp_netlink.c
@@ -827,7 +827,7 @@
goto out;
}
- session = l2tp_session_find_nth(tunnel, si);
+ session = l2tp_session_get_nth(tunnel, si, false);
if (session == NULL) {
ti++;
tunnel = NULL;
@@ -837,8 +837,11 @@
if (l2tp_nl_session_send(skb, NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq, NLM_F_MULTI,
- session, L2TP_CMD_SESSION_GET) < 0)
+ session, L2TP_CMD_SESSION_GET) < 0) {
+ l2tp_session_dec_refcount(session);
break;
+ }
+ l2tp_session_dec_refcount(session);
si++;
}
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index 1ad18c5..8ab9c5d 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -467,6 +467,10 @@
static void pppol2tp_session_destruct(struct sock *sk)
{
struct l2tp_session *session = sk->sk_user_data;
+
+ skb_queue_purge(&sk->sk_receive_queue);
+ skb_queue_purge(&sk->sk_write_queue);
+
if (session) {
sk->sk_user_data = NULL;
BUG_ON(session->magic != L2TP_SESSION_MAGIC);
@@ -505,9 +509,6 @@
l2tp_session_queue_purge(session);
sock_put(sk);
}
- skb_queue_purge(&sk->sk_receive_queue);
- skb_queue_purge(&sk->sk_write_queue);
-
release_sock(sk);
/* This will delete the session context via
@@ -1574,7 +1575,7 @@
static void pppol2tp_next_session(struct net *net, struct pppol2tp_seq_data *pd)
{
- pd->session = l2tp_session_find_nth(pd->tunnel, pd->session_idx);
+ pd->session = l2tp_session_get_nth(pd->tunnel, pd->session_idx, true);
pd->session_idx++;
if (pd->session == NULL) {
@@ -1701,10 +1702,14 @@
/* Show the tunnel or session context.
*/
- if (pd->session == NULL)
+ if (!pd->session) {
pppol2tp_seq_tunnel_show(m, pd->tunnel);
- else
+ } else {
pppol2tp_seq_session_show(m, pd->session);
+ if (pd->session->deref)
+ pd->session->deref(pd->session);
+ l2tp_session_dec_refcount(pd->session);
+ }
out:
return 0;
@@ -1863,4 +1868,4 @@
MODULE_LICENSE("GPL");
MODULE_VERSION(PPPOL2TP_DRV_VERSION);
MODULE_ALIAS("pppox-proto-" __stringify(PX_PROTO_OL2TP));
-MODULE_ALIAS_L2TP_PWTYPE(11);
+MODULE_ALIAS_L2TP_PWTYPE(7);
diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c
index f598ff8..a830356 100644
--- a/net/mac80211/agg-rx.c
+++ b/net/mac80211/agg-rx.c
@@ -290,10 +290,13 @@
buf_size = IEEE80211_MAX_AMPDU_BUF;
/* make sure the size doesn't exceed the maximum supported by the hw */
- if (buf_size > local->hw.max_rx_aggregation_subframes)
- buf_size = local->hw.max_rx_aggregation_subframes;
+ if (buf_size > sta->sta.max_rx_aggregation_subframes)
+ buf_size = sta->sta.max_rx_aggregation_subframes;
params.buf_size = buf_size;
+ ht_dbg(sta->sdata, "AddBA Req buf_size=%d for %pM\n",
+ buf_size, sta->sta.addr);
+
/* examine state machine */
mutex_lock(&sta->ampdu_mlme.mtx);
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index 0851727..61a8f7d 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -66,6 +66,8 @@
2 + (IEEE80211_MAX_SUPP_RATES - 8) +
2 + sizeof(struct ieee80211_ht_cap) +
2 + sizeof(struct ieee80211_ht_operation) +
+ 2 + sizeof(struct ieee80211_vht_cap) +
+ 2 + sizeof(struct ieee80211_vht_operation) +
ifibss->ie_len;
presp = kzalloc(sizeof(*presp) + frame_len, GFP_KERNEL);
if (!presp)
@@ -486,14 +488,14 @@
struct beacon_data *presp, *old_presp;
struct cfg80211_bss *cbss;
const struct cfg80211_bss_ies *ies;
- u16 capability = 0;
+ u16 capability = WLAN_CAPABILITY_IBSS;
u64 tsf;
int ret = 0;
sdata_assert_lock(sdata);
if (ifibss->privacy)
- capability = WLAN_CAPABILITY_PRIVACY;
+ capability |= WLAN_CAPABILITY_PRIVACY;
cbss = cfg80211_get_bss(sdata->local->hw.wiphy, ifibss->chandef.chan,
ifibss->bssid, ifibss->ssid,
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 2faf156..24ee6c6 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -891,12 +891,17 @@
supp_ht = supp_ht || sband->ht_cap.ht_supported;
supp_vht = supp_vht || sband->vht_cap.vht_supported;
- if (sband->ht_cap.ht_supported)
- local->rx_chains =
- max(ieee80211_mcs_to_chains(&sband->ht_cap.mcs),
- local->rx_chains);
+ if (!sband->ht_cap.ht_supported)
+ continue;
/* TODO: consider VHT for RX chains, hopefully it's the same */
+ local->rx_chains =
+ max(ieee80211_mcs_to_chains(&sband->ht_cap.mcs),
+ local->rx_chains);
+
+ /* no need to mask, SM_PS_DISABLED has all bits set */
+ sband->ht_cap.cap |= WLAN_HT_CAP_SM_PS_DISABLED <<
+ IEEE80211_HT_CAP_SM_PS_SHIFT;
}
/* if low-level driver supports AP, we also support VLAN */
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 3516f4d..729d3cc 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -1456,12 +1456,16 @@
*/
if (!ieee80211_hw_check(&sta->local->hw, AP_LINK_PS) &&
!ieee80211_has_morefrags(hdr->frame_control) &&
+ !ieee80211_is_back_req(hdr->frame_control) &&
!(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) &&
(rx->sdata->vif.type == NL80211_IFTYPE_AP ||
rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN) &&
- /* PM bit is only checked in frames where it isn't reserved,
+ /*
+ * PM bit is only checked in frames where it isn't reserved,
* in AP mode it's reserved in non-bufferable management frames
* (cf. IEEE 802.11-2012 8.2.4.1.7 Power Management field)
+ * BAR frames should be ignored as specified in
+ * IEEE 802.11-2012 10.2.1.2.
*/
(!ieee80211_is_mgmt(hdr->frame_control) ||
ieee80211_is_bufferable_mmpdu(hdr->frame_control))) {
@@ -3397,6 +3401,27 @@
!ether_addr_equal(bssid, hdr->addr1))
return false;
}
+
+ /*
+ * 802.11-2016 Table 9-26 says that for data frames, A1 must be
+ * the BSSID - we've checked that already but may have accepted
+ * the wildcard (ff:ff:ff:ff:ff:ff).
+ *
+ * It also says:
+ * The BSSID of the Data frame is determined as follows:
+ * a) If the STA is contained within an AP or is associated
+ * with an AP, the BSSID is the address currently in use
+ * by the STA contained in the AP.
+ *
+ * So we should not accept data frames with an address that's
+ * multicast.
+ *
+ * Accepting it also opens a security problem because stations
+ * could encrypt it with the GTK and inject traffic that way.
+ */
+ if (ieee80211_is_data(hdr->frame_control) && multicast)
+ return false;
+
return true;
case NL80211_IFTYPE_WDS:
if (bssid || !ieee80211_is_data(hdr->frame_control))
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index fe88071..d207580 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -330,6 +330,9 @@
memcpy(sta->addr, addr, ETH_ALEN);
memcpy(sta->sta.addr, addr, ETH_ALEN);
+ sta->sta.max_rx_aggregation_subframes =
+ local->hw.max_rx_aggregation_subframes;
+
sta->local = local;
sta->sdata = sdata;
sta->rx_stats.last_rx = jiffies;
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
index d824c38..e19ea1c5 100644
--- a/net/mac80211/wpa.c
+++ b/net/mac80211/wpa.c
@@ -16,6 +16,7 @@
#include <asm/unaligned.h>
#include <net/mac80211.h>
#include <crypto/aes.h>
+#include <crypto/algapi.h>
#include "ieee80211_i.h"
#include "michael.h"
@@ -152,7 +153,7 @@
data_len = skb->len - hdrlen - MICHAEL_MIC_LEN;
key = &rx->key->conf.key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY];
michael_mic(key, hdr, data, data_len, mic);
- if (memcmp(mic, data + data_len, MICHAEL_MIC_LEN) != 0)
+ if (crypto_memneq(mic, data + data_len, MICHAEL_MIC_LEN))
goto mic_fail;
/* remove Michael MIC from payload */
@@ -1044,7 +1045,7 @@
bip_aad(skb, aad);
ieee80211_aes_cmac(key->u.aes_cmac.tfm, aad,
skb->data + 24, skb->len - 24, mic);
- if (memcmp(mic, mmie->mic, sizeof(mmie->mic)) != 0) {
+ if (crypto_memneq(mic, mmie->mic, sizeof(mmie->mic))) {
key->u.aes_cmac.icverrors++;
return RX_DROP_UNUSABLE;
}
@@ -1094,7 +1095,7 @@
bip_aad(skb, aad);
ieee80211_aes_cmac_256(key->u.aes_cmac.tfm, aad,
skb->data + 24, skb->len - 24, mic);
- if (memcmp(mic, mmie->mic, sizeof(mmie->mic)) != 0) {
+ if (crypto_memneq(mic, mmie->mic, sizeof(mmie->mic))) {
key->u.aes_cmac.icverrors++;
return RX_DROP_UNUSABLE;
}
@@ -1198,7 +1199,7 @@
if (ieee80211_aes_gmac(key->u.aes_gmac.tfm, aad, nonce,
skb->data + 24, skb->len - 24,
mic) < 0 ||
- memcmp(mic, mmie->mic, sizeof(mmie->mic)) != 0) {
+ crypto_memneq(mic, mmie->mic, sizeof(mmie->mic))) {
key->u.aes_gmac.icverrors++;
return RX_DROP_UNUSABLE;
}
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index 4da5600..dd1649c 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -845,10 +845,8 @@
{
unsigned int verdict = NF_DROP;
- if (IP_VS_FWD_METHOD(cp) != 0) {
- pr_err("shouldn't reach here, because the box is on the "
- "half connection in the tun/dr module.\n");
- }
+ if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ)
+ goto ignore_cp;
/* Ensure the checksum is correct */
if (!skb_csum_unnecessary(skb) && ip_vs_checksum_complete(skb, ihl)) {
@@ -882,6 +880,8 @@
ip_vs_notrack(skb);
else
ip_vs_update_conntrack(skb, cp, 0);
+
+ignore_cp:
verdict = NF_ACCEPT;
out:
@@ -1242,8 +1242,11 @@
*/
cp = pp->conn_out_get(ipvs, af, skb, &iph);
- if (likely(cp))
+ if (likely(cp)) {
+ if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ)
+ goto ignore_cp;
return handle_response(af, skb, pd, cp, &iph, hooknum);
+ }
if (sysctl_nat_icmp_send(ipvs) &&
(pp->protocol == IPPROTO_TCP ||
pp->protocol == IPPROTO_UDP ||
@@ -1285,9 +1288,15 @@
}
}
}
+
+out:
IP_VS_DBG_PKT(12, af, pp, skb, iph.off,
"ip_vs_out: packet continues traversal as normal");
return NF_ACCEPT;
+
+ignore_cp:
+ __ip_vs_conn_put(cp);
+ goto out;
}
/*
diff --git a/net/netfilter/nf_conntrack_extend.c b/net/netfilter/nf_conntrack_extend.c
index 1a95459..531ca55 100644
--- a/net/netfilter/nf_conntrack_extend.c
+++ b/net/netfilter/nf_conntrack_extend.c
@@ -53,7 +53,11 @@
rcu_read_lock();
t = rcu_dereference(nf_ct_ext_types[id]);
- BUG_ON(t == NULL);
+ if (!t) {
+ rcu_read_unlock();
+ return NULL;
+ }
+
off = ALIGN(sizeof(struct nf_ct_ext), t->align);
len = off + t->len + var_alloc_len;
alloc_size = t->alloc_size + var_alloc_len;
@@ -88,7 +92,10 @@
rcu_read_lock();
t = rcu_dereference(nf_ct_ext_types[id]);
- BUG_ON(t == NULL);
+ if (!t) {
+ rcu_read_unlock();
+ return NULL;
+ }
newoff = ALIGN(old->len, t->align);
newlen = newoff + t->len + var_alloc_len;
@@ -186,6 +193,6 @@
RCU_INIT_POINTER(nf_ct_ext_types[type->id], NULL);
update_alloc_size(type);
mutex_unlock(&nf_ct_ext_type_mutex);
- rcu_barrier(); /* Wait for completion of call_rcu()'s */
+ synchronize_rcu();
}
EXPORT_SYMBOL_GPL(nf_ct_extend_unregister);
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 4f5a4b3..60b2c6a 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -45,6 +45,8 @@
#include <net/netfilter/nf_conntrack_zones.h>
#include <net/netfilter/nf_conntrack_timestamp.h>
#include <net/netfilter/nf_conntrack_labels.h>
+#include <net/netfilter/nf_conntrack_seqadj.h>
+#include <net/netfilter/nf_conntrack_synproxy.h>
#ifdef CONFIG_NF_NAT_NEEDED
#include <net/netfilter/nf_nat_core.h>
#include <net/netfilter/nf_nat_l4proto.h>
@@ -1797,6 +1799,8 @@
nf_ct_tstamp_ext_add(ct, GFP_ATOMIC);
nf_ct_ecache_ext_add(ct, 0, 0, GFP_ATOMIC);
nf_ct_labels_ext_add(ct);
+ nfct_seqadj_ext_add(ct);
+ nfct_synproxy_ext_add(ct);
/* we must add conntrack extensions before confirmation. */
ct->status |= IPS_CONFIRMED;
diff --git a/net/netfilter/xt_TCPMSS.c b/net/netfilter/xt_TCPMSS.c
index b7c43de..00f798b 100644
--- a/net/netfilter/xt_TCPMSS.c
+++ b/net/netfilter/xt_TCPMSS.c
@@ -104,7 +104,7 @@
tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff);
tcp_hdrlen = tcph->doff * 4;
- if (len < tcp_hdrlen)
+ if (len < tcp_hdrlen || tcp_hdrlen < sizeof(struct tcphdr))
return -1;
if (info->mss == XT_TCPMSS_CLAMP_PMTU) {
@@ -156,6 +156,10 @@
if (len > tcp_hdrlen)
return 0;
+ /* tcph->doff has 4 bits, do not wrap it to 0 */
+ if (tcp_hdrlen >= 15 * 4)
+ return 0;
+
/*
* MSS Option not found ?! add it..
*/
diff --git a/net/netfilter/xt_qtaguid.c b/net/netfilter/xt_qtaguid.c
index fef69cb..6f5cdc5 100644
--- a/net/netfilter/xt_qtaguid.c
+++ b/net/netfilter/xt_qtaguid.c
@@ -1713,18 +1713,8 @@
}
MT_DEBUG("qtaguid[%d]: sk=%p got_sock=%d fam=%d proto=%d\n",
par->hooknum, sk, got_sock, par->family, ipx_proto(skb, par));
- if (sk != NULL) {
- set_sk_callback_lock = true;
- read_lock_bh(&sk->sk_callback_lock);
- MT_DEBUG("qtaguid[%d]: sk=%p->sk_socket=%p->file=%p\n",
- par->hooknum, sk, sk->sk_socket,
- sk->sk_socket ? sk->sk_socket->file : (void *)-1LL);
- filp = sk->sk_socket ? sk->sk_socket->file : NULL;
- MT_DEBUG("qtaguid[%d]: filp...uid=%u\n",
- par->hooknum, filp ? from_kuid(&init_user_ns, filp->f_cred->fsuid) : -1);
- }
- if (sk == NULL || sk->sk_socket == NULL) {
+ if (!sk) {
/*
* Here, the qtaguid_find_sk() using connection tracking
* couldn't find the owner, so for now we just count them
@@ -1732,9 +1722,7 @@
*/
if (do_tag_stat)
account_for_uid(skb, sk, 0, par);
- MT_DEBUG("qtaguid[%d]: leaving (sk?sk->sk_socket)=%p\n",
- par->hooknum,
- sk ? sk->sk_socket : NULL);
+ MT_DEBUG("qtaguid[%d]: leaving (sk=NULL)\n", par->hooknum);
res = (info->match ^ info->invert) == 0;
atomic64_inc(&qtu_events.match_no_sk);
goto put_sock_ret_res;
@@ -1742,19 +1730,10 @@
res = false;
goto put_sock_ret_res;
}
- filp = sk->sk_socket->file;
- if (filp == NULL) {
- MT_DEBUG("qtaguid[%d]: leaving filp=NULL\n", par->hooknum);
- if (do_tag_stat)
- account_for_uid(skb, sk, 0, par);
- res = ((info->match ^ info->invert) &
- (XT_QTAGUID_UID | XT_QTAGUID_GID)) == 0;
- atomic64_inc(&qtu_events.match_no_sk_file);
- goto put_sock_ret_res;
- }
- sock_uid = filp->f_cred->fsuid;
+ sock_uid = sk->sk_uid;
if (do_tag_stat)
- account_for_uid(skb, sk, from_kuid(&init_user_ns, sock_uid), par);
+ account_for_uid(skb, sk, from_kuid(&init_user_ns, sock_uid),
+ par);
/*
* The following two tests fail the match when:
@@ -1766,8 +1745,8 @@
kuid_t uid_min = make_kuid(&init_user_ns, info->uid_min);
kuid_t uid_max = make_kuid(&init_user_ns, info->uid_max);
- if ((uid_gte(filp->f_cred->fsuid, uid_min) &&
- uid_lte(filp->f_cred->fsuid, uid_max)) ^
+ if ((uid_gte(sock_uid, uid_min) &&
+ uid_lte(sock_uid, uid_max)) ^
!(info->invert & XT_QTAGUID_UID)) {
MT_DEBUG("qtaguid[%d]: leaving uid not matching\n",
par->hooknum);
@@ -1778,7 +1757,21 @@
if (info->match & XT_QTAGUID_GID) {
kgid_t gid_min = make_kgid(&init_user_ns, info->gid_min);
kgid_t gid_max = make_kgid(&init_user_ns, info->gid_max);
-
+ set_sk_callback_lock = true;
+ read_lock_bh(&sk->sk_callback_lock);
+ MT_DEBUG("qtaguid[%d]: sk=%p->sk_socket=%p->file=%p\n",
+ par->hooknum, sk, sk->sk_socket,
+ sk->sk_socket ? sk->sk_socket->file : (void *)-1LL);
+ filp = sk->sk_socket ? sk->sk_socket->file : NULL;
+ if (!filp) {
+ res = ((info->match ^ info->invert) &
+ XT_QTAGUID_GID) == 0;
+ atomic64_inc(&qtu_events.match_no_sk_gid);
+ goto put_sock_ret_res;
+ }
+ MT_DEBUG("qtaguid[%d]: filp...uid=%u\n",
+ par->hooknum, filp ?
+ from_kuid(&init_user_ns, filp->f_cred->fsuid) : -1);
if ((gid_gte(filp->f_cred->fsgid, gid_min) &&
gid_lte(filp->f_cred->fsgid, gid_max)) ^
!(info->invert & XT_QTAGUID_GID)) {
@@ -1950,7 +1943,7 @@
"match_found_sk_in_ct=%llu "
"match_found_no_sk_in_ct=%llu "
"match_no_sk=%llu "
- "match_no_sk_file=%llu\n",
+ "match_no_sk_gid=%llu\n",
(u64)atomic64_read(&qtu_events.sockets_tagged),
(u64)atomic64_read(&qtu_events.sockets_untagged),
(u64)atomic64_read(&qtu_events.counter_set_changes),
@@ -1962,7 +1955,7 @@
(u64)atomic64_read(&qtu_events.match_found_sk_in_ct),
(u64)atomic64_read(&qtu_events.match_found_no_sk_in_ct),
(u64)atomic64_read(&qtu_events.match_no_sk),
- (u64)atomic64_read(&qtu_events.match_no_sk_file));
+ (u64)atomic64_read(&qtu_events.match_no_sk_gid));
/* Count the following as part of the last item_index. No need
* to lock the sock_tag_list here since it is already locked when
diff --git a/net/netfilter/xt_qtaguid_internal.h b/net/netfilter/xt_qtaguid_internal.h
index 8178fbd..c705270 100644
--- a/net/netfilter/xt_qtaguid_internal.h
+++ b/net/netfilter/xt_qtaguid_internal.h
@@ -289,10 +289,10 @@
*/
atomic64_t match_no_sk;
/*
- * The file ptr in the sk_socket wasn't there.
+ * The file ptr in the sk_socket wasn't there and we couldn't get GID.
* This might happen for traffic while the socket is being closed.
*/
- atomic64_t match_no_sk_file;
+ atomic64_t match_no_sk_gid;
};
/* Track the set active_set for the given tag. */
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 96fe1c1..2141d04 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -2087,7 +2087,7 @@
if (!skb) {
alloc_size = alloc_min_size;
skb = netlink_alloc_skb(sk, alloc_size, nlk->portid,
- (GFP_KERNEL & ~__GFP_DIRECT_RECLAIM));
+ GFP_KERNEL);
}
if (!skb)
goto errout_skb;
diff --git a/net/nfc/core.c b/net/nfc/core.c
index 1fe3d3b..c5a2c7e 100644
--- a/net/nfc/core.c
+++ b/net/nfc/core.c
@@ -969,6 +969,8 @@
kfree(se);
}
+ ida_simple_remove(&nfc_index_ida, dev->idx);
+
kfree(dev);
}
@@ -1043,6 +1045,7 @@
int tx_headroom, int tx_tailroom)
{
struct nfc_dev *dev;
+ int rc;
if (!ops->start_poll || !ops->stop_poll || !ops->activate_target ||
!ops->deactivate_target || !ops->im_transceive)
@@ -1055,6 +1058,15 @@
if (!dev)
return NULL;
+ rc = ida_simple_get(&nfc_index_ida, 0, 0, GFP_KERNEL);
+ if (rc < 0)
+ goto err_free_dev;
+ dev->idx = rc;
+
+ dev->dev.class = &nfc_class;
+ dev_set_name(&dev->dev, "nfc%d", dev->idx);
+ device_initialize(&dev->dev);
+
dev->ops = ops;
dev->supported_protocols = supported_protocols;
dev->tx_headroom = tx_headroom;
@@ -1077,6 +1089,11 @@
}
return dev;
+
+err_free_dev:
+ kfree(dev);
+
+ return ERR_PTR(rc);
}
EXPORT_SYMBOL(nfc_allocate_device);
@@ -1091,14 +1108,6 @@
pr_debug("dev_name=%s\n", dev_name(&dev->dev));
- dev->idx = ida_simple_get(&nfc_index_ida, 0, 0, GFP_KERNEL);
- if (dev->idx < 0)
- return dev->idx;
-
- dev->dev.class = &nfc_class;
- dev_set_name(&dev->dev, "nfc%d", dev->idx);
- device_initialize(&dev->dev);
-
mutex_lock(&nfc_devlist_mutex);
nfc_devlist_generation++;
rc = device_add(&dev->dev);
@@ -1136,12 +1145,10 @@
*/
void nfc_unregister_device(struct nfc_dev *dev)
{
- int rc, id;
+ int rc;
pr_debug("dev_name=%s\n", dev_name(&dev->dev));
- id = dev->idx;
-
if (dev->rfkill) {
rfkill_unregister(dev->rfkill);
rfkill_destroy(dev->rfkill);
@@ -1166,8 +1173,6 @@
nfc_devlist_generation++;
device_del(&dev->dev);
mutex_unlock(&nfc_devlist_mutex);
-
- ida_simple_remove(&nfc_index_ida, id);
}
EXPORT_SYMBOL(nfc_unregister_device);
diff --git a/net/nfc/llcp_sock.c b/net/nfc/llcp_sock.c
index ecf0a01..9c222a1 100644
--- a/net/nfc/llcp_sock.c
+++ b/net/nfc/llcp_sock.c
@@ -76,7 +76,8 @@
struct sockaddr_nfc_llcp llcp_addr;
int len, ret = 0;
- if (!addr || addr->sa_family != AF_NFC)
+ if (!addr || alen < offsetofend(struct sockaddr, sa_family) ||
+ addr->sa_family != AF_NFC)
return -EINVAL;
pr_debug("sk %p addr %p family %d\n", sk, addr, addr->sa_family);
@@ -150,7 +151,8 @@
struct sockaddr_nfc_llcp llcp_addr;
int len, ret = 0;
- if (!addr || addr->sa_family != AF_NFC)
+ if (!addr || alen < offsetofend(struct sockaddr, sa_family) ||
+ addr->sa_family != AF_NFC)
return -EINVAL;
pr_debug("sk %p addr %p family %d\n", sk, addr, addr->sa_family);
@@ -655,8 +657,7 @@
pr_debug("sock %p sk %p flags 0x%x\n", sock, sk, flags);
- if (!addr || len < sizeof(struct sockaddr_nfc) ||
- addr->sa_family != AF_NFC)
+ if (!addr || len < sizeof(*addr) || addr->sa_family != AF_NFC)
return -EINVAL;
if (addr->service_name_len == 0 && addr->dsap == 0)
diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c
index 10c99a5..67583ad 100644
--- a/net/nfc/nci/core.c
+++ b/net/nfc/nci/core.c
@@ -1084,8 +1084,7 @@
return ndev;
free_nfc:
- kfree(ndev->nfc_dev);
-
+ nfc_free_device(ndev->nfc_dev);
free_nci:
kfree(ndev);
return NULL;
diff --git a/net/nfc/netlink.c b/net/nfc/netlink.c
index f58c1fb..12dfb45 100644
--- a/net/nfc/netlink.c
+++ b/net/nfc/netlink.c
@@ -873,7 +873,9 @@
u32 device_idx, target_idx, protocol;
int rc;
- if (!info->attrs[NFC_ATTR_DEVICE_INDEX])
+ if (!info->attrs[NFC_ATTR_DEVICE_INDEX] ||
+ !info->attrs[NFC_ATTR_TARGET_INDEX] ||
+ !info->attrs[NFC_ATTR_PROTOCOLS])
return -EINVAL;
device_idx = nla_get_u32(info->attrs[NFC_ATTR_DEVICE_INDEX]);
diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
index ad58d2a..6a2507f 100644
--- a/net/openvswitch/conntrack.c
+++ b/net/openvswitch/conntrack.c
@@ -577,8 +577,8 @@
nla_for_each_nested(a, attr, rem) {
int type = nla_type(a);
- int maxlen = ovs_ct_attr_lens[type].maxlen;
- int minlen = ovs_ct_attr_lens[type].minlen;
+ int maxlen;
+ int minlen;
if (type > OVS_CT_ATTR_MAX) {
OVS_NLERR(log,
@@ -586,6 +586,9 @@
type, OVS_CT_ATTR_MAX);
return -EINVAL;
}
+
+ maxlen = ovs_ct_attr_lens[type].maxlen;
+ minlen = ovs_ct_attr_lens[type].minlen;
if (nla_len(a) < minlen || nla_len(a) > maxlen) {
OVS_NLERR(log,
"Conntrack attr type has unexpected length (type=%d, length=%d, expected=%d)",
diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
index d1bd4a4..d26b28d 100644
--- a/net/openvswitch/flow_netlink.c
+++ b/net/openvswitch/flow_netlink.c
@@ -588,7 +588,7 @@
ipv4 = true;
break;
case OVS_TUNNEL_KEY_ATTR_IPV6_SRC:
- SW_FLOW_KEY_PUT(match, tun_key.u.ipv6.dst,
+ SW_FLOW_KEY_PUT(match, tun_key.u.ipv6.src,
nla_get_in6_addr(a), is_mask);
ipv6 = true;
break;
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index d768001..148ec13 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -3622,12 +3622,19 @@
if (optlen != sizeof(val))
return -EINVAL;
- if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
- return -EBUSY;
if (copy_from_user(&val, optval, sizeof(val)))
return -EFAULT;
- po->tp_reserve = val;
- return 0;
+ if (val > INT_MAX)
+ return -EINVAL;
+ lock_sock(sk);
+ if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
+ ret = -EBUSY;
+ } else {
+ po->tp_reserve = val;
+ ret = 0;
+ }
+ release_sock(sk);
+ return ret;
}
case PACKET_LOSS:
{
@@ -4150,6 +4157,8 @@
rb->frames_per_block = req->tp_block_size / req->tp_frame_size;
if (unlikely(rb->frames_per_block == 0))
goto out;
+ if (unlikely(req->tp_block_size > UINT_MAX / req->tp_block_nr))
+ goto out;
if (unlikely((rb->frames_per_block * req->tp_block_nr) !=
req->tp_frame_nr))
goto out;
@@ -4221,7 +4230,7 @@
register_prot_hook(sk);
}
spin_unlock(&po->bind_lock);
- if (closing && (po->tp_version > TPACKET_V2)) {
+ if (pg_vec && (po->tp_version > TPACKET_V2)) {
/* Because we don't support block-based V3 on tx-ring */
if (!tx_ring)
prb_shutdown_retire_blk_timer(po, rb_queue);
diff --git a/net/rds/cong.c b/net/rds/cong.c
index e6144b82..6641bcf 100644
--- a/net/rds/cong.c
+++ b/net/rds/cong.c
@@ -299,7 +299,7 @@
i = be16_to_cpu(port) / RDS_CONG_MAP_PAGE_BITS;
off = be16_to_cpu(port) % RDS_CONG_MAP_PAGE_BITS;
- __set_bit_le(off, (void *)map->m_page_addrs[i]);
+ set_bit_le(off, (void *)map->m_page_addrs[i]);
}
void rds_cong_clear_bit(struct rds_cong_map *map, __be16 port)
@@ -313,7 +313,7 @@
i = be16_to_cpu(port) / RDS_CONG_MAP_PAGE_BITS;
off = be16_to_cpu(port) % RDS_CONG_MAP_PAGE_BITS;
- __clear_bit_le(off, (void *)map->m_page_addrs[i]);
+ clear_bit_le(off, (void *)map->m_page_addrs[i]);
}
static int rds_cong_test_bit(struct rds_cong_map *map, __be16 port)
diff --git a/net/rds/tcp_listen.c b/net/rds/tcp_listen.c
index 0936a4a..e353e32 100644
--- a/net/rds/tcp_listen.c
+++ b/net/rds/tcp_listen.c
@@ -78,7 +78,7 @@
struct inet_sock *inet;
struct rds_tcp_connection *rs_tcp;
- ret = sock_create_kern(sock_net(sock->sk), sock->sk->sk_family,
+ ret = sock_create_lite(sock->sk->sk_family,
sock->sk->sk_type, sock->sk->sk_protocol,
&new_sock);
if (ret)
diff --git a/net/rxrpc/ar-key.c b/net/rxrpc/ar-key.c
index da3cc09..91d43ab 100644
--- a/net/rxrpc/ar-key.c
+++ b/net/rxrpc/ar-key.c
@@ -215,7 +215,7 @@
unsigned int *_toklen)
{
const __be32 *xdr = *_xdr;
- unsigned int toklen = *_toklen, n_parts, loop, tmp;
+ unsigned int toklen = *_toklen, n_parts, loop, tmp, paddedlen;
/* there must be at least one name, and at least #names+1 length
* words */
@@ -245,16 +245,16 @@
toklen -= 4;
if (tmp <= 0 || tmp > AFSTOKEN_STRING_MAX)
return -EINVAL;
- if (tmp > toklen)
+ paddedlen = (tmp + 3) & ~3;
+ if (paddedlen > toklen)
return -EINVAL;
princ->name_parts[loop] = kmalloc(tmp + 1, GFP_KERNEL);
if (!princ->name_parts[loop])
return -ENOMEM;
memcpy(princ->name_parts[loop], xdr, tmp);
princ->name_parts[loop][tmp] = 0;
- tmp = (tmp + 3) & ~3;
- toklen -= tmp;
- xdr += tmp >> 2;
+ toklen -= paddedlen;
+ xdr += paddedlen >> 2;
}
if (toklen < 4)
@@ -263,16 +263,16 @@
toklen -= 4;
if (tmp <= 0 || tmp > AFSTOKEN_K5_REALM_MAX)
return -EINVAL;
- if (tmp > toklen)
+ paddedlen = (tmp + 3) & ~3;
+ if (paddedlen > toklen)
return -EINVAL;
princ->realm = kmalloc(tmp + 1, GFP_KERNEL);
if (!princ->realm)
return -ENOMEM;
memcpy(princ->realm, xdr, tmp);
princ->realm[tmp] = 0;
- tmp = (tmp + 3) & ~3;
- toklen -= tmp;
- xdr += tmp >> 2;
+ toklen -= paddedlen;
+ xdr += paddedlen >> 2;
_debug("%s/...@%s", princ->name_parts[0], princ->realm);
@@ -291,7 +291,7 @@
unsigned int *_toklen)
{
const __be32 *xdr = *_xdr;
- unsigned int toklen = *_toklen, len;
+ unsigned int toklen = *_toklen, len, paddedlen;
/* there must be at least one tag and one length word */
if (toklen <= 8)
@@ -305,15 +305,17 @@
toklen -= 8;
if (len > max_data_size)
return -EINVAL;
+ paddedlen = (len + 3) & ~3;
+ if (paddedlen > toklen)
+ return -EINVAL;
td->data_len = len;
if (len > 0) {
td->data = kmemdup(xdr, len, GFP_KERNEL);
if (!td->data)
return -ENOMEM;
- len = (len + 3) & ~3;
- toklen -= len;
- xdr += len >> 2;
+ toklen -= paddedlen;
+ xdr += paddedlen >> 2;
}
_debug("tag %x len %x", td->tag, td->data_len);
@@ -385,7 +387,7 @@
const __be32 **_xdr, unsigned int *_toklen)
{
const __be32 *xdr = *_xdr;
- unsigned int toklen = *_toklen, len;
+ unsigned int toklen = *_toklen, len, paddedlen;
/* there must be at least one length word */
if (toklen <= 4)
@@ -397,6 +399,9 @@
toklen -= 4;
if (len > AFSTOKEN_K5_TIX_MAX)
return -EINVAL;
+ paddedlen = (len + 3) & ~3;
+ if (paddedlen > toklen)
+ return -EINVAL;
*_tktlen = len;
_debug("ticket len %u", len);
@@ -405,9 +410,8 @@
*_ticket = kmemdup(xdr, len, GFP_KERNEL);
if (!*_ticket)
return -ENOMEM;
- len = (len + 3) & ~3;
- toklen -= len;
- xdr += len >> 2;
+ toklen -= paddedlen;
+ xdr += paddedlen >> 2;
}
*_xdr = xdr;
@@ -550,7 +554,7 @@
{
const __be32 *xdr = prep->data, *token;
const char *cp;
- unsigned int len, tmp, loop, ntoken, toklen, sec_ix;
+ unsigned int len, paddedlen, loop, ntoken, toklen, sec_ix;
size_t datalen = prep->datalen;
int ret;
@@ -576,22 +580,21 @@
if (len < 1 || len > AFSTOKEN_CELL_MAX)
goto not_xdr;
datalen -= 4;
- tmp = (len + 3) & ~3;
- if (tmp > datalen)
+ paddedlen = (len + 3) & ~3;
+ if (paddedlen > datalen)
goto not_xdr;
cp = (const char *) xdr;
for (loop = 0; loop < len; loop++)
if (!isprint(cp[loop]))
goto not_xdr;
- if (len < tmp)
- for (; loop < tmp; loop++)
- if (cp[loop])
- goto not_xdr;
+ for (; loop < paddedlen; loop++)
+ if (cp[loop])
+ goto not_xdr;
_debug("cellname: [%u/%u] '%*.*s'",
- len, tmp, len, len, (const char *) xdr);
- datalen -= tmp;
- xdr += tmp >> 2;
+ len, paddedlen, len, len, (const char *) xdr);
+ datalen -= paddedlen;
+ xdr += paddedlen >> 2;
/* get the token count */
if (datalen < 12)
@@ -612,10 +615,11 @@
sec_ix = ntohl(*xdr);
datalen -= 4;
_debug("token: [%x/%zx] %x", toklen, datalen, sec_ix);
- if (toklen < 20 || toklen > datalen)
+ paddedlen = (toklen + 3) & ~3;
+ if (toklen < 20 || toklen > datalen || paddedlen > datalen)
goto not_xdr;
- datalen -= (toklen + 3) & ~3;
- xdr += (toklen + 3) >> 2;
+ datalen -= paddedlen;
+ xdr += paddedlen >> 2;
} while (--loop > 0);
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
index d058696..075b0d2 100644
--- a/net/sched/act_ipt.c
+++ b/net/sched/act_ipt.c
@@ -34,6 +34,7 @@
{
struct xt_tgchk_param par;
struct xt_target *target;
+ struct ipt_entry e = {};
int ret = 0;
target = xt_request_find_target(AF_INET, t->u.user.name,
@@ -42,8 +43,9 @@
return PTR_ERR(target);
t->u.kernel.target = target;
+ memset(&par, 0, sizeof(par));
par.table = table;
- par.entryinfo = NULL;
+ par.entryinfo = &e;
par.target = target;
par.targinfo = t->data;
par.hook_mask = hook;
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index e384d6a..1090a52c 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -36,14 +36,15 @@
static void tcf_mirred_release(struct tc_action *a, int bind)
{
struct tcf_mirred *m = to_mirred(a);
- struct net_device *dev = rcu_dereference_protected(m->tcfm_dev, 1);
+ struct net_device *dev;
/* We could be called either in a RCU callback or with RTNL lock held. */
spin_lock_bh(&mirred_list_lock);
list_del(&m->tcfm_list);
- spin_unlock_bh(&mirred_list_lock);
+ dev = rcu_dereference_protected(m->tcfm_dev, 1);
if (dev)
dev_put(dev);
+ spin_unlock_bh(&mirred_list_lock);
}
static const struct nla_policy mirred_policy[TCA_MIRRED_MAX + 1] = {
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 5474dc7..ca4ecc2 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -1004,6 +1004,9 @@
return sch;
}
+ /* ops->init() failed, we call ->destroy() like qdisc_create_dflt() */
+ if (ops->destroy)
+ ops->destroy(sch);
err_out3:
dev_put(dev);
kfree((char *) sch - sch->padded);
diff --git a/net/sched/sch_hhf.c b/net/sched/sch_hhf.c
index 13d6f83..45d4b2f 100644
--- a/net/sched/sch_hhf.c
+++ b/net/sched/sch_hhf.c
@@ -636,7 +636,9 @@
q->hhf_arrays[i] = hhf_zalloc(HHF_ARRAYS_LEN *
sizeof(u32));
if (!q->hhf_arrays[i]) {
- hhf_destroy(sch);
+ /* Note: hhf_destroy() will be called
+ * by our caller.
+ */
return -ENOMEM;
}
}
@@ -647,7 +649,9 @@
q->hhf_valid_bits[i] = hhf_zalloc(HHF_ARRAYS_LEN /
BITS_PER_BYTE);
if (!q->hhf_valid_bits[i]) {
- hhf_destroy(sch);
+ /* Note: hhf_destroy() will be called
+ * by our caller.
+ */
return -ENOMEM;
}
}
diff --git a/net/sched/sch_mq.c b/net/sched/sch_mq.c
index 3e82f04..d9c8432 100644
--- a/net/sched/sch_mq.c
+++ b/net/sched/sch_mq.c
@@ -52,7 +52,7 @@
/* pre-allocate qdiscs, attachment can't fail */
priv->qdiscs = kcalloc(dev->num_tx_queues, sizeof(priv->qdiscs[0]),
GFP_KERNEL);
- if (priv->qdiscs == NULL)
+ if (!priv->qdiscs)
return -ENOMEM;
for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
@@ -60,18 +60,14 @@
qdisc = qdisc_create_dflt(dev_queue, default_qdisc_ops,
TC_H_MAKE(TC_H_MAJ(sch->handle),
TC_H_MIN(ntx + 1)));
- if (qdisc == NULL)
- goto err;
+ if (!qdisc)
+ return -ENOMEM;
priv->qdiscs[ntx] = qdisc;
qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
}
sch->flags |= TCQ_F_MQROOT;
return 0;
-
-err:
- mq_destroy(sch);
- return -ENOMEM;
}
static void mq_attach(struct Qdisc *sch)
diff --git a/net/sched/sch_mqprio.c b/net/sched/sch_mqprio.c
index ad70ecf..66bccc5 100644
--- a/net/sched/sch_mqprio.c
+++ b/net/sched/sch_mqprio.c
@@ -117,20 +117,17 @@
/* pre-allocate qdisc, attachment can't fail */
priv->qdiscs = kcalloc(dev->num_tx_queues, sizeof(priv->qdiscs[0]),
GFP_KERNEL);
- if (priv->qdiscs == NULL) {
- err = -ENOMEM;
- goto err;
- }
+ if (!priv->qdiscs)
+ return -ENOMEM;
for (i = 0; i < dev->num_tx_queues; i++) {
dev_queue = netdev_get_tx_queue(dev, i);
qdisc = qdisc_create_dflt(dev_queue, default_qdisc_ops,
TC_H_MAKE(TC_H_MAJ(sch->handle),
TC_H_MIN(i + 1)));
- if (qdisc == NULL) {
- err = -ENOMEM;
- goto err;
- }
+ if (!qdisc)
+ return -ENOMEM;
+
priv->qdiscs[i] = qdisc;
qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
}
@@ -143,7 +140,7 @@
priv->hw_owned = 1;
err = dev->netdev_ops->ndo_setup_tc(dev, qopt->num_tc);
if (err)
- goto err;
+ return err;
} else {
netdev_set_num_tc(dev, qopt->num_tc);
for (i = 0; i < qopt->num_tc; i++)
@@ -157,10 +154,6 @@
sch->flags |= TCQ_F_MQROOT;
return 0;
-
-err:
- mqprio_destroy(sch);
- return err;
}
static void mqprio_attach(struct Qdisc *sch)
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index 498f0a2..3f2c3ee 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -434,6 +434,7 @@
qdisc_drop(head, sch);
slot_queue_add(slot, skb);
+ qdisc_tree_reduce_backlog(sch, 0, delta);
return NET_XMIT_CN;
}
@@ -465,8 +466,10 @@
/* Return Congestion Notification only if we dropped a packet
* from this flow.
*/
- if (qlen != slot->qlen)
+ if (qlen != slot->qlen) {
+ qdisc_tree_reduce_backlog(sch, 0, dropped - qdisc_pkt_len(skb));
return NET_XMIT_CN;
+ }
/* As we dropped a packet, better let upper stack know this */
qdisc_tree_reduce_backlog(sch, 1, dropped);
@@ -742,9 +745,10 @@
q->ht = sfq_alloc(sizeof(q->ht[0]) * q->divisor);
q->slots = sfq_alloc(sizeof(q->slots[0]) * q->maxflows);
if (!q->ht || !q->slots) {
- sfq_destroy(sch);
+ /* Note: sfq_destroy() will be called by our caller */
return -ENOMEM;
}
+
for (i = 0; i < q->divisor; i++)
q->ht[i] = SFQ_EMPTY_SLOT;
diff --git a/net/sctp/input.c b/net/sctp/input.c
index b6493b3..2d7859c 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -472,15 +472,14 @@
struct sctp_association **app,
struct sctp_transport **tpp)
{
+ struct sctp_init_chunk *chunkhdr, _chunkhdr;
union sctp_addr saddr;
union sctp_addr daddr;
struct sctp_af *af;
struct sock *sk = NULL;
struct sctp_association *asoc;
struct sctp_transport *transport = NULL;
- struct sctp_init_chunk *chunkhdr;
__u32 vtag = ntohl(sctphdr->vtag);
- int len = skb->len - ((void *)sctphdr - (void *)skb->data);
*app = NULL; *tpp = NULL;
@@ -515,13 +514,16 @@
* discard the packet.
*/
if (vtag == 0) {
- chunkhdr = (void *)sctphdr + sizeof(struct sctphdr);
- if (len < sizeof(struct sctphdr) + sizeof(sctp_chunkhdr_t)
- + sizeof(__be32) ||
+ /* chunk header + first 4 octects of init header */
+ chunkhdr = skb_header_pointer(skb, skb_transport_offset(skb) +
+ sizeof(struct sctphdr),
+ sizeof(struct sctp_chunkhdr) +
+ sizeof(__be32), &_chunkhdr);
+ if (!chunkhdr ||
chunkhdr->chunk_hdr.type != SCTP_CID_INIT ||
- ntohl(chunkhdr->init_hdr.init_tag) != asoc->c.my_vtag) {
+ ntohl(chunkhdr->init_hdr.init_tag) != asoc->c.my_vtag)
goto out;
- }
+
} else if (vtag != asoc->c.peer_vtag) {
goto out;
}
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index 3567224..e33e9bd 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -239,12 +239,10 @@
struct sctp_bind_addr *bp;
struct ipv6_pinfo *np = inet6_sk(sk);
struct sctp_sockaddr_entry *laddr;
- union sctp_addr *baddr = NULL;
union sctp_addr *daddr = &t->ipaddr;
union sctp_addr dst_saddr;
struct in6_addr *final_p, final;
__u8 matchlen = 0;
- __u8 bmatchlen;
sctp_scope_t scope;
memset(fl6, 0, sizeof(struct flowi6));
@@ -311,23 +309,37 @@
*/
rcu_read_lock();
list_for_each_entry_rcu(laddr, &bp->address_list, list) {
- if (!laddr->valid)
+ struct dst_entry *bdst;
+ __u8 bmatchlen;
+
+ if (!laddr->valid ||
+ laddr->state != SCTP_ADDR_SRC ||
+ laddr->a.sa.sa_family != AF_INET6 ||
+ scope > sctp_scope(&laddr->a))
continue;
- if ((laddr->state == SCTP_ADDR_SRC) &&
- (laddr->a.sa.sa_family == AF_INET6) &&
- (scope <= sctp_scope(&laddr->a))) {
- bmatchlen = sctp_v6_addr_match_len(daddr, &laddr->a);
- if (!baddr || (matchlen < bmatchlen)) {
- baddr = &laddr->a;
- matchlen = bmatchlen;
- }
- }
- }
- if (baddr) {
- fl6->saddr = baddr->v6.sin6_addr;
- fl6->fl6_sport = baddr->v6.sin6_port;
+
+ fl6->saddr = laddr->a.v6.sin6_addr;
+ fl6->fl6_sport = laddr->a.v6.sin6_port;
final_p = fl6_update_dst(fl6, rcu_dereference(np->opt), &final);
- dst = ip6_dst_lookup_flow(sk, fl6, final_p);
+ bdst = ip6_dst_lookup_flow(sk, fl6, final_p);
+
+ if (!IS_ERR(bdst) &&
+ ipv6_chk_addr(dev_net(bdst->dev),
+ &laddr->a.v6.sin6_addr, bdst->dev, 1)) {
+ if (!IS_ERR_OR_NULL(dst))
+ dst_release(dst);
+ dst = bdst;
+ break;
+ }
+
+ bmatchlen = sctp_v6_addr_match_len(daddr, &laddr->a);
+ if (matchlen > bmatchlen)
+ continue;
+
+ if (!IS_ERR_OR_NULL(dst))
+ dst_release(dst);
+ dst = bdst;
+ matchlen = bmatchlen;
}
rcu_read_unlock();
@@ -498,7 +510,9 @@
{
addr->sa.sa_family = AF_INET6;
addr->v6.sin6_port = port;
+ addr->v6.sin6_flowinfo = 0;
addr->v6.sin6_addr = *saddr;
+ addr->v6.sin6_scope_id = 0;
}
/* Compare addresses exactly.
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 138f2d6..3ebf3b6 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -235,8 +235,12 @@
sctp_assoc_t id)
{
struct sctp_association *addr_asoc = NULL, *id_asoc = NULL;
- struct sctp_transport *transport;
+ struct sctp_af *af = sctp_get_af_specific(addr->ss_family);
union sctp_addr *laddr = (union sctp_addr *)addr;
+ struct sctp_transport *transport;
+
+ if (!af || sctp_verify_addr(sk, laddr, af->sockaddr_len))
+ return NULL;
addr_asoc = sctp_endpoint_lookup_assoc(sctp_sk(sk)->ep,
laddr,
@@ -4422,6 +4426,12 @@
if (!asoc)
return -EINVAL;
+ /* If there is a thread waiting on more sndbuf space for
+ * sending on this asoc, it cannot be peeled.
+ */
+ if (waitqueue_active(&asoc->wait))
+ return -EBUSY;
+
/* An association cannot be branched off from an already peeled-off
* socket, nor is this supported for tcp style sockets.
*/
@@ -6388,6 +6398,9 @@
if (sock->state != SS_UNCONNECTED)
goto out;
+ if (!sctp_sstate(sk, LISTENING) && !sctp_sstate(sk, CLOSED))
+ goto out;
+
/* If backlog is zero, disable listening. */
if (!backlog) {
if (sctp_sstate(sk, CLOSED))
@@ -6960,8 +6973,6 @@
*/
release_sock(sk);
current_timeo = schedule_timeout(current_timeo);
- if (sk != asoc->base.sk)
- goto do_error;
lock_sock(sk);
*timeo_p = current_timeo;
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index 06095cc..1f0687d 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -541,9 +541,13 @@
return gss_new;
gss_msg = gss_add_msg(gss_new);
if (gss_msg == gss_new) {
- int res = rpc_queue_upcall(gss_new->pipe, &gss_new->msg);
+ int res;
+ atomic_inc(&gss_msg->count);
+ res = rpc_queue_upcall(gss_new->pipe, &gss_new->msg);
if (res) {
gss_unhash_msg(gss_new);
+ atomic_dec(&gss_msg->count);
+ gss_release_msg(gss_new);
gss_msg = ERR_PTR(res);
}
} else
@@ -836,6 +840,7 @@
warn_gssd();
gss_release_msg(gss_msg);
}
+ gss_release_msg(gss_msg);
}
static void gss_pipe_dentry_destroy(struct dentry *dir,
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index 648f2a6..cb13815 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -381,6 +381,10 @@
dev = dev_get_by_name(net, driver_name);
if (!dev)
return -ENODEV;
+ if (tipc_mtu_bad(dev, 0)) {
+ dev_put(dev);
+ return -EINVAL;
+ }
/* Associate TIPC bearer with L2 bearer */
rcu_assign_pointer(b->media_ptr, dev);
@@ -570,14 +574,19 @@
if (!b_ptr)
return NOTIFY_DONE;
- b_ptr->mtu = dev->mtu;
-
switch (evt) {
case NETDEV_CHANGE:
if (netif_carrier_ok(dev))
break;
case NETDEV_GOING_DOWN:
+ tipc_reset_bearer(net, b_ptr);
+ break;
case NETDEV_CHANGEMTU:
+ if (tipc_mtu_bad(dev, 0)) {
+ bearer_disable(net, b_ptr);
+ break;
+ }
+ b_ptr->mtu = dev->mtu;
tipc_reset_bearer(net, b_ptr);
break;
case NETDEV_CHANGEADDR:
diff --git a/net/tipc/bearer.h b/net/tipc/bearer.h
index 552185b..5f11e18 100644
--- a/net/tipc/bearer.h
+++ b/net/tipc/bearer.h
@@ -39,6 +39,7 @@
#include "netlink.h"
#include "core.h"
+#include "msg.h"
#include <net/genetlink.h>
#define MAX_MEDIA 3
@@ -61,6 +62,9 @@
#define TIPC_MEDIA_TYPE_IB 2
#define TIPC_MEDIA_TYPE_UDP 3
+/* minimum bearer MTU */
+#define TIPC_MIN_BEARER_MTU (MAX_H_SIZE + INT_H_SIZE)
+
/**
* struct tipc_node_map - set of node identifiers
* @count: # of nodes in set
@@ -226,4 +230,13 @@
void tipc_bearer_bc_xmit(struct net *net, u32 bearer_id,
struct sk_buff_head *xmitq);
+/* check if device MTU is too low for tipc headers */
+static inline bool tipc_mtu_bad(struct net_device *dev, unsigned int reserve)
+{
+ if (dev->mtu >= TIPC_MIN_BEARER_MTU + reserve)
+ return false;
+ netdev_warn(dev, "MTU too low for tipc bearer\n");
+ return true;
+}
+
#endif /* _TIPC_BEARER_H */
diff --git a/net/tipc/core.c b/net/tipc/core.c
index 03a8428..e2bdb07a 100644
--- a/net/tipc/core.c
+++ b/net/tipc/core.c
@@ -69,6 +69,7 @@
if (err)
goto out_nametbl;
+ INIT_LIST_HEAD(&tn->dist_queue);
err = tipc_topsrv_start(net);
if (err)
goto out_subscr;
diff --git a/net/tipc/core.h b/net/tipc/core.h
index 18e95a8..fe3b89e 100644
--- a/net/tipc/core.h
+++ b/net/tipc/core.h
@@ -103,6 +103,9 @@
spinlock_t nametbl_lock;
struct name_table *nametbl;
+ /* Name dist queue */
+ struct list_head dist_queue;
+
/* Topology subscription server */
struct tipc_server *topsrv;
atomic_t subscription_count;
diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c
index f51c8bd..c4c151b 100644
--- a/net/tipc/name_distr.c
+++ b/net/tipc/name_distr.c
@@ -40,11 +40,6 @@
int sysctl_tipc_named_timeout __read_mostly = 2000;
-/**
- * struct tipc_dist_queue - queue holding deferred name table updates
- */
-static struct list_head tipc_dist_queue = LIST_HEAD_INIT(tipc_dist_queue);
-
struct distr_queue_item {
struct distr_item i;
u32 dtype;
@@ -67,6 +62,8 @@
/**
* named_prepare_buf - allocate & initialize a publication message
+ *
+ * The buffer returned is of size INT_H_SIZE + payload size
*/
static struct sk_buff *named_prepare_buf(struct net *net, u32 type, u32 size,
u32 dest)
@@ -171,9 +168,9 @@
struct publication *publ;
struct sk_buff *skb = NULL;
struct distr_item *item = NULL;
- uint msg_dsz = (tipc_node_get_mtu(net, dnode, 0) / ITEM_SIZE) *
- ITEM_SIZE;
- uint msg_rem = msg_dsz;
+ u32 msg_dsz = ((tipc_node_get_mtu(net, dnode, 0) - INT_H_SIZE) /
+ ITEM_SIZE) * ITEM_SIZE;
+ u32 msg_rem = msg_dsz;
list_for_each_entry(publ, pls, local_list) {
/* Prepare next buffer: */
@@ -340,9 +337,11 @@
* tipc_named_add_backlog - add a failed name table update to the backlog
*
*/
-static void tipc_named_add_backlog(struct distr_item *i, u32 type, u32 node)
+static void tipc_named_add_backlog(struct net *net, struct distr_item *i,
+ u32 type, u32 node)
{
struct distr_queue_item *e;
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
unsigned long now = get_jiffies_64();
e = kzalloc(sizeof(*e), GFP_ATOMIC);
@@ -352,7 +351,7 @@
e->node = node;
e->expires = now + msecs_to_jiffies(sysctl_tipc_named_timeout);
memcpy(e, i, sizeof(*i));
- list_add_tail(&e->next, &tipc_dist_queue);
+ list_add_tail(&e->next, &tn->dist_queue);
}
/**
@@ -362,10 +361,11 @@
void tipc_named_process_backlog(struct net *net)
{
struct distr_queue_item *e, *tmp;
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
char addr[16];
unsigned long now = get_jiffies_64();
- list_for_each_entry_safe(e, tmp, &tipc_dist_queue, next) {
+ list_for_each_entry_safe(e, tmp, &tn->dist_queue, next) {
if (time_after(e->expires, now)) {
if (!tipc_update_nametbl(net, &e->i, e->node, e->dtype))
continue;
@@ -405,7 +405,7 @@
node = msg_orignode(msg);
while (count--) {
if (!tipc_update_nametbl(net, item, node, mtype))
- tipc_named_add_backlog(item, mtype, node);
+ tipc_named_add_backlog(net, item, mtype, node);
item++;
}
kfree_skb(skb);
diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c
index a0c9057..f86c655 100644
--- a/net/tipc/netlink_compat.c
+++ b/net/tipc/netlink_compat.c
@@ -258,13 +258,15 @@
arg = nlmsg_new(0, GFP_KERNEL);
if (!arg) {
kfree_skb(msg->rep);
+ msg->rep = NULL;
return -ENOMEM;
}
err = __tipc_nl_compat_dumpit(cmd, msg, arg);
- if (err)
+ if (err) {
kfree_skb(msg->rep);
-
+ msg->rep = NULL;
+ }
kfree_skb(arg);
return err;
diff --git a/net/tipc/node.c b/net/tipc/node.c
index 3926b56..2df0b98 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -102,9 +102,10 @@
static void tipc_node_kref_release(struct kref *kref)
{
- struct tipc_node *node = container_of(kref, struct tipc_node, kref);
+ struct tipc_node *n = container_of(kref, struct tipc_node, kref);
- tipc_node_delete(node);
+ kfree(n->bc_entry.link);
+ kfree_rcu(n, rcu);
}
void tipc_node_put(struct tipc_node *node)
@@ -216,21 +217,20 @@
{
list_del_rcu(&node->list);
hlist_del_rcu(&node->hash);
- kfree(node->bc_entry.link);
- kfree_rcu(node, rcu);
+ tipc_node_put(node);
+
+ del_timer_sync(&node->timer);
+ tipc_node_put(node);
}
void tipc_node_stop(struct net *net)
{
- struct tipc_net *tn = net_generic(net, tipc_net_id);
+ struct tipc_net *tn = tipc_net(net);
struct tipc_node *node, *t_node;
spin_lock_bh(&tn->node_list_lock);
- list_for_each_entry_safe(node, t_node, &tn->node_list, list) {
- if (del_timer(&node->timer))
- tipc_node_put(node);
- tipc_node_put(node);
- }
+ list_for_each_entry_safe(node, t_node, &tn->node_list, list)
+ tipc_node_delete(node);
spin_unlock_bh(&tn->node_list_lock);
}
@@ -313,9 +313,7 @@
if (rc & TIPC_LINK_DOWN_EVT)
tipc_node_link_down(n, bearer_id, false);
}
- if (!mod_timer(&n->timer, jiffies + n->keepalive_intv))
- tipc_node_get(n);
- tipc_node_put(n);
+ mod_timer(&n->timer, jiffies + n->keepalive_intv);
}
/**
@@ -730,7 +728,7 @@
state = SELF_UP_PEER_UP;
break;
case SELF_LOST_CONTACT_EVT:
- state = SELF_DOWN_PEER_LEAVING;
+ state = SELF_DOWN_PEER_DOWN;
break;
case SELF_ESTABL_CONTACT_EVT:
case PEER_LOST_CONTACT_EVT:
@@ -749,7 +747,7 @@
state = SELF_UP_PEER_UP;
break;
case PEER_LOST_CONTACT_EVT:
- state = SELF_LEAVING_PEER_DOWN;
+ state = SELF_DOWN_PEER_DOWN;
break;
case SELF_LOST_CONTACT_EVT:
case PEER_ESTABL_CONTACT_EVT:
diff --git a/net/tipc/server.c b/net/tipc/server.c
index 922e04a..50f5b0c 100644
--- a/net/tipc/server.c
+++ b/net/tipc/server.c
@@ -452,6 +452,11 @@
if (!con)
return -EINVAL;
+ if (!test_bit(CF_CONNECTED, &con->flags)) {
+ conn_put(con);
+ return 0;
+ }
+
e = tipc_alloc_entry(data, len);
if (!e) {
conn_put(con);
@@ -465,12 +470,8 @@
list_add_tail(&e->list, &con->outqueue);
spin_unlock_bh(&con->outqueue_lock);
- if (test_bit(CF_CONNECTED, &con->flags)) {
- if (!queue_work(s->send_wq, &con->swork))
- conn_put(con);
- } else {
+ if (!queue_work(s->send_wq, &con->swork))
conn_put(con);
- }
return 0;
}
@@ -494,7 +495,7 @@
int ret;
spin_lock_bh(&con->outqueue_lock);
- while (1) {
+ while (test_bit(CF_CONNECTED, &con->flags)) {
e = list_entry(con->outqueue.next, struct outqueue_entry,
list);
if ((struct list_head *) e == &con->outqueue)
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index b26b7a1..65171f8 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -777,9 +777,11 @@
* @tsk: receiving socket
* @skb: pointer to message buffer.
*/
-static void tipc_sk_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb)
+static void tipc_sk_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb,
+ struct sk_buff_head *xmitq)
{
struct sock *sk = &tsk->sk;
+ u32 onode = tsk_own_node(tsk);
struct tipc_msg *hdr = buf_msg(skb);
int mtyp = msg_type(hdr);
int conn_cong;
@@ -792,7 +794,8 @@
if (mtyp == CONN_PROBE) {
msg_set_type(hdr, CONN_PROBE_REPLY);
- tipc_sk_respond(sk, skb, TIPC_OK);
+ if (tipc_msg_reverse(onode, &skb, TIPC_OK))
+ __skb_queue_tail(xmitq, skb);
return;
} else if (mtyp == CONN_ACK) {
conn_cong = tsk_conn_cong(tsk);
@@ -1647,7 +1650,8 @@
*
* Returns true if message was added to socket receive queue, otherwise false
*/
-static bool filter_rcv(struct sock *sk, struct sk_buff *skb)
+static bool filter_rcv(struct sock *sk, struct sk_buff *skb,
+ struct sk_buff_head *xmitq)
{
struct socket *sock = sk->sk_socket;
struct tipc_sock *tsk = tipc_sk(sk);
@@ -1657,7 +1661,7 @@
int usr = msg_user(hdr);
if (unlikely(msg_user(hdr) == CONN_MANAGER)) {
- tipc_sk_proto_rcv(tsk, skb);
+ tipc_sk_proto_rcv(tsk, skb, xmitq);
return false;
}
@@ -1700,7 +1704,8 @@
return true;
reject:
- tipc_sk_respond(sk, skb, err);
+ if (tipc_msg_reverse(tsk_own_node(tsk), &skb, err))
+ __skb_queue_tail(xmitq, skb);
return false;
}
@@ -1716,9 +1721,24 @@
static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb)
{
unsigned int truesize = skb->truesize;
+ struct sk_buff_head xmitq;
+ u32 dnode, selector;
- if (likely(filter_rcv(sk, skb)))
+ __skb_queue_head_init(&xmitq);
+
+ if (likely(filter_rcv(sk, skb, &xmitq))) {
atomic_add(truesize, &tipc_sk(sk)->dupl_rcvcnt);
+ return 0;
+ }
+
+ if (skb_queue_empty(&xmitq))
+ return 0;
+
+ /* Send response/rejected message */
+ skb = __skb_dequeue(&xmitq);
+ dnode = msg_destnode(buf_msg(skb));
+ selector = msg_origport(buf_msg(skb));
+ tipc_node_xmit_skb(sock_net(sk), skb, dnode, selector);
return 0;
}
@@ -1732,12 +1752,13 @@
* Caller must hold socket lock
*/
static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk,
- u32 dport)
+ u32 dport, struct sk_buff_head *xmitq)
{
+ unsigned long time_limit = jiffies + 2;
+ struct sk_buff *skb;
unsigned int lim;
atomic_t *dcnt;
- struct sk_buff *skb;
- unsigned long time_limit = jiffies + 2;
+ u32 onode;
while (skb_queue_len(inputq)) {
if (unlikely(time_after_eq(jiffies, time_limit)))
@@ -1749,20 +1770,22 @@
/* Add message directly to receive queue if possible */
if (!sock_owned_by_user(sk)) {
- filter_rcv(sk, skb);
+ filter_rcv(sk, skb, xmitq);
continue;
}
/* Try backlog, compensating for double-counted bytes */
dcnt = &tipc_sk(sk)->dupl_rcvcnt;
- if (sk->sk_backlog.len)
+ if (!sk->sk_backlog.len)
atomic_set(dcnt, 0);
lim = rcvbuf_limit(sk, skb) + atomic_read(dcnt);
if (likely(!sk_add_backlog(sk, skb, lim)))
continue;
/* Overload => reject message back to sender */
- tipc_sk_respond(sk, skb, TIPC_ERR_OVERLOAD);
+ onode = tipc_own_addr(sock_net(sk));
+ if (tipc_msg_reverse(onode, &skb, TIPC_ERR_OVERLOAD))
+ __skb_queue_tail(xmitq, skb);
break;
}
}
@@ -1775,12 +1798,14 @@
*/
void tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq)
{
+ struct sk_buff_head xmitq;
u32 dnode, dport = 0;
int err;
struct tipc_sock *tsk;
struct sock *sk;
struct sk_buff *skb;
+ __skb_queue_head_init(&xmitq);
while (skb_queue_len(inputq)) {
dport = tipc_skb_peek_port(inputq, dport);
tsk = tipc_sk_lookup(net, dport);
@@ -1788,9 +1813,14 @@
if (likely(tsk)) {
sk = &tsk->sk;
if (likely(spin_trylock_bh(&sk->sk_lock.slock))) {
- tipc_sk_enqueue(inputq, sk, dport);
+ tipc_sk_enqueue(inputq, sk, dport, &xmitq);
spin_unlock_bh(&sk->sk_lock.slock);
}
+ /* Send pending response/rejected messages, if any */
+ while ((skb = __skb_dequeue(&xmitq))) {
+ dnode = msg_destnode(buf_msg(skb));
+ tipc_node_xmit_skb(net, skb, dnode, dport);
+ }
sock_put(sk);
continue;
}
diff --git a/net/tipc/udp_media.c b/net/tipc/udp_media.c
index 6af78c6..78d6b78 100644
--- a/net/tipc/udp_media.c
+++ b/net/tipc/udp_media.c
@@ -52,7 +52,7 @@
/* IANA assigned UDP port */
#define UDP_PORT_DEFAULT 6118
-#define UDP_MIN_HEADROOM 28
+#define UDP_MIN_HEADROOM 48
static const struct nla_policy tipc_nl_udp_policy[TIPC_NLA_UDP_MAX + 1] = {
[TIPC_NLA_UDP_UNSPEC] = {.type = NLA_UNSPEC},
@@ -376,6 +376,11 @@
udp_conf.local_ip.s_addr = htonl(INADDR_ANY);
udp_conf.use_udp_checksums = false;
ub->ifindex = dev->ifindex;
+ if (tipc_mtu_bad(dev, sizeof(struct iphdr) +
+ sizeof(struct udphdr))) {
+ err = -EINVAL;
+ goto err;
+ }
b->mtu = dev->mtu - sizeof(struct iphdr)
- sizeof(struct udphdr);
#if IS_ENABLED(CONFIG_IPV6)
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 1f5d18d..decd8b8 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -997,7 +997,8 @@
struct path path = { NULL, NULL };
err = -EINVAL;
- if (sunaddr->sun_family != AF_UNIX)
+ if (addr_len < offsetofend(struct sockaddr_un, sun_family) ||
+ sunaddr->sun_family != AF_UNIX)
goto out;
if (addr_len == sizeof(short)) {
@@ -1108,6 +1109,10 @@
unsigned int hash;
int err;
+ err = -EINVAL;
+ if (alen < offsetofend(struct sockaddr, sa_family))
+ goto out;
+
if (addr->sa_family != AF_UNSPEC) {
err = unix_mkname(sunaddr, alen, &hash);
if (err < 0)
diff --git a/net/unix/garbage.c b/net/unix/garbage.c
index 6a0d485..c36757e7 100644
--- a/net/unix/garbage.c
+++ b/net/unix/garbage.c
@@ -146,6 +146,7 @@
if (s) {
struct unix_sock *u = unix_sk(s);
+ BUG_ON(!atomic_long_read(&u->inflight));
BUG_ON(list_empty(&u->link));
if (atomic_long_dec_and_test(&u->inflight))
@@ -341,6 +342,14 @@
}
list_del(&cursor);
+ /* Now gc_candidates contains only garbage. Restore original
+ * inflight counters for these as well, and remove the skbuffs
+ * which are creating the cycle(s).
+ */
+ skb_queue_head_init(&hitlist);
+ list_for_each_entry(u, &gc_candidates, link)
+ scan_children(&u->sk, inc_inflight, &hitlist);
+
/* not_cycle_list contains those sockets which do not make up a
* cycle. Restore these to the inflight list.
*/
@@ -350,14 +359,6 @@
list_move_tail(&u->link, &gc_inflight_list);
}
- /* Now gc_candidates contains only garbage. Restore original
- * inflight counters for these as well, and remove the skbuffs
- * which are creating the cycle(s).
- */
- skb_queue_head_init(&hitlist);
- list_for_each_entry(u, &gc_candidates, link)
- scan_children(&u->sk, inc_inflight, &hitlist);
-
spin_unlock(&unix_gc_lock);
/* Here we are. Hitlist is filled. Die. */
diff --git a/net/vmw_vsock/vmci_transport.c b/net/vmw_vsock/vmci_transport.c
index 0a369bb..662bdd2 100644
--- a/net/vmw_vsock/vmci_transport.c
+++ b/net/vmw_vsock/vmci_transport.c
@@ -842,7 +842,7 @@
* qp_handle.
*/
if (vmci_handle_is_invalid(e_payload->handle) ||
- vmci_handle_is_equal(trans->qp_handle, e_payload->handle))
+ !vmci_handle_is_equal(trans->qp_handle, e_payload->handle))
return;
/* We don't ask for delayed CBs when we subscribe to this event (we
@@ -2154,7 +2154,7 @@
MODULE_AUTHOR("VMware, Inc.");
MODULE_DESCRIPTION("VMCI transport for Virtual Sockets");
-MODULE_VERSION("1.0.2.0-k");
+MODULE_VERSION("1.0.3.0-k");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("vmware_vsock");
MODULE_ALIAS_NETPROTO(PF_VSOCK);
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 15b1613..6099e94 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -509,21 +509,17 @@
{
int err;
- rtnl_lock();
-
if (!cb->args[0]) {
err = nlmsg_parse(cb->nlh, GENL_HDRLEN + nl80211_fam.hdrsize,
nl80211_fam.attrbuf, nl80211_fam.maxattr,
nl80211_policy);
if (err)
- goto out_unlock;
+ return err;
*wdev = __cfg80211_wdev_from_attrs(sock_net(skb->sk),
nl80211_fam.attrbuf);
- if (IS_ERR(*wdev)) {
- err = PTR_ERR(*wdev);
- goto out_unlock;
- }
+ if (IS_ERR(*wdev))
+ return PTR_ERR(*wdev);
*rdev = wiphy_to_rdev((*wdev)->wiphy);
/* 0 is the first index - add 1 to parse only once */
cb->args[0] = (*rdev)->wiphy_idx + 1;
@@ -533,10 +529,8 @@
struct wiphy *wiphy = wiphy_idx_to_wiphy(cb->args[0] - 1);
struct wireless_dev *tmp;
- if (!wiphy) {
- err = -ENODEV;
- goto out_unlock;
- }
+ if (!wiphy)
+ return -ENODEV;
*rdev = wiphy_to_rdev(wiphy);
*wdev = NULL;
@@ -547,21 +541,11 @@
}
}
- if (!*wdev) {
- err = -ENODEV;
- goto out_unlock;
- }
+ if (!*wdev)
+ return -ENODEV;
}
return 0;
- out_unlock:
- rtnl_unlock();
- return err;
-}
-
-static void nl80211_finish_wdev_dump(struct cfg80211_registered_device *rdev)
-{
- rtnl_unlock();
}
/* IE validation */
@@ -4278,9 +4262,10 @@
int sta_idx = cb->args[2];
int err;
+ rtnl_lock();
err = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev);
if (err)
- return err;
+ goto out_err;
if (!wdev->netdev) {
err = -EINVAL;
@@ -4316,7 +4301,7 @@
cb->args[2] = sta_idx;
err = skb->len;
out_err:
- nl80211_finish_wdev_dump(rdev);
+ rtnl_unlock();
return err;
}
@@ -5033,9 +5018,10 @@
int path_idx = cb->args[2];
int err;
+ rtnl_lock();
err = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev);
if (err)
- return err;
+ goto out_err;
if (!rdev->ops->dump_mpath) {
err = -EOPNOTSUPP;
@@ -5069,7 +5055,7 @@
cb->args[2] = path_idx;
err = skb->len;
out_err:
- nl80211_finish_wdev_dump(rdev);
+ rtnl_unlock();
return err;
}
@@ -5229,9 +5215,10 @@
int path_idx = cb->args[2];
int err;
+ rtnl_lock();
err = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev);
if (err)
- return err;
+ goto out_err;
if (!rdev->ops->dump_mpp) {
err = -EOPNOTSUPP;
@@ -5264,7 +5251,7 @@
cb->args[2] = path_idx;
err = skb->len;
out_err:
- nl80211_finish_wdev_dump(rdev);
+ rtnl_unlock();
return err;
}
@@ -6112,6 +6099,10 @@
struct nlattr *attr1, *attr2;
int n_channels = 0, tmp1, tmp2;
+ nla_for_each_nested(attr1, freqs, tmp1)
+ if (nla_len(attr1) != sizeof(u32))
+ return 0;
+
nla_for_each_nested(attr1, freqs, tmp1) {
n_channels++;
/*
@@ -7333,9 +7324,12 @@
int start = cb->args[2], idx = 0;
int err;
+ rtnl_lock();
err = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev);
- if (err)
+ if (err) {
+ rtnl_unlock();
return err;
+ }
wdev_lock(wdev);
spin_lock_bh(&rdev->bss_lock);
@@ -7358,7 +7352,7 @@
wdev_unlock(wdev);
cb->args[2] = idx;
- nl80211_finish_wdev_dump(rdev);
+ rtnl_unlock();
return skb->len;
}
@@ -7442,9 +7436,10 @@
int res;
bool radio_stats;
+ rtnl_lock();
res = nl80211_prepare_wdev_dump(skb, cb, &rdev, &wdev);
if (res)
- return res;
+ goto out_err;
/* prepare_wdev_dump parsed the attributes */
radio_stats = nl80211_fam.attrbuf[NL80211_ATTR_SURVEY_RADIO_STATS];
@@ -7485,7 +7480,7 @@
cb->args[2] = survey_idx;
res = skb->len;
out_err:
- nl80211_finish_wdev_dump(rdev);
+ rtnl_unlock();
return res;
}
@@ -10561,17 +10556,13 @@
void *data = NULL;
unsigned int data_len = 0;
- rtnl_lock();
-
if (cb->args[0]) {
/* subtract the 1 again here */
struct wiphy *wiphy = wiphy_idx_to_wiphy(cb->args[0] - 1);
struct wireless_dev *tmp;
- if (!wiphy) {
- err = -ENODEV;
- goto out_unlock;
- }
+ if (!wiphy)
+ return -ENODEV;
*rdev = wiphy_to_rdev(wiphy);
*wdev = NULL;
@@ -10592,13 +10583,11 @@
nl80211_fam.attrbuf, nl80211_fam.maxattr,
nl80211_policy);
if (err)
- goto out_unlock;
+ return err;
if (!nl80211_fam.attrbuf[NL80211_ATTR_VENDOR_ID] ||
- !nl80211_fam.attrbuf[NL80211_ATTR_VENDOR_SUBCMD]) {
- err = -EINVAL;
- goto out_unlock;
- }
+ !nl80211_fam.attrbuf[NL80211_ATTR_VENDOR_SUBCMD])
+ return -EINVAL;
*wdev = __cfg80211_wdev_from_attrs(sock_net(skb->sk),
nl80211_fam.attrbuf);
@@ -10607,10 +10596,8 @@
*rdev = __cfg80211_rdev_from_attrs(sock_net(skb->sk),
nl80211_fam.attrbuf);
- if (IS_ERR(*rdev)) {
- err = PTR_ERR(*rdev);
- goto out_unlock;
- }
+ if (IS_ERR(*rdev))
+ return PTR_ERR(*rdev);
vid = nla_get_u32(nl80211_fam.attrbuf[NL80211_ATTR_VENDOR_ID]);
subcmd = nla_get_u32(nl80211_fam.attrbuf[NL80211_ATTR_VENDOR_SUBCMD]);
@@ -10623,19 +10610,15 @@
if (vcmd->info.vendor_id != vid || vcmd->info.subcmd != subcmd)
continue;
- if (!vcmd->dumpit) {
- err = -EOPNOTSUPP;
- goto out_unlock;
- }
+ if (!vcmd->dumpit)
+ return -EOPNOTSUPP;
vcmd_idx = i;
break;
}
- if (vcmd_idx < 0) {
- err = -EOPNOTSUPP;
- goto out_unlock;
- }
+ if (vcmd_idx < 0)
+ return -EOPNOTSUPP;
if (nl80211_fam.attrbuf[NL80211_ATTR_VENDOR_DATA]) {
data = nla_data(nl80211_fam.attrbuf[NL80211_ATTR_VENDOR_DATA]);
@@ -10652,9 +10635,6 @@
/* keep rtnl locked in successful case */
return 0;
- out_unlock:
- rtnl_unlock();
- return err;
}
static int nl80211_vendor_cmd_dump(struct sk_buff *skb,
@@ -10669,9 +10649,10 @@
int err;
struct nlattr *vendor_data;
+ rtnl_lock();
err = nl80211_prepare_vendor_dump(skb, cb, &rdev, &wdev);
if (err)
- return err;
+ goto out;
vcmd_idx = cb->args[2];
data = (void *)cb->args[3];
@@ -10680,18 +10661,26 @@
if (vcmd->flags & (WIPHY_VENDOR_CMD_NEED_WDEV |
WIPHY_VENDOR_CMD_NEED_NETDEV)) {
- if (!wdev)
- return -EINVAL;
+ if (!wdev) {
+ err = -EINVAL;
+ goto out;
+ }
if (vcmd->flags & WIPHY_VENDOR_CMD_NEED_NETDEV &&
- !wdev->netdev)
- return -EINVAL;
+ !wdev->netdev) {
+ err = -EINVAL;
+ goto out;
+ }
if (vcmd->flags & WIPHY_VENDOR_CMD_NEED_RUNNING) {
if (wdev->netdev &&
- !netif_running(wdev->netdev))
- return -ENETDOWN;
- if (!wdev->netdev && !wdev->p2p_started)
- return -ENETDOWN;
+ !netif_running(wdev->netdev)) {
+ err = -ENETDOWN;
+ goto out;
+ }
+ if (!wdev->netdev && !wdev->p2p_started) {
+ err = -ENETDOWN;
+ goto out;
+ }
}
}
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index cf0193b..e8994c3 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -1214,7 +1214,7 @@
}
static struct xfrm_policy *xfrm_sk_policy_lookup(const struct sock *sk, int dir,
- const struct flowi *fl)
+ const struct flowi *fl, u16 family)
{
struct xfrm_policy *pol;
struct net *net = sock_net(sk);
@@ -1223,8 +1223,7 @@
read_lock_bh(&net->xfrm.xfrm_policy_lock);
pol = rcu_dereference(sk->sk_policy[dir]);
if (pol != NULL) {
- bool match = xfrm_selector_match(&pol->selector, fl,
- sk->sk_family);
+ bool match = xfrm_selector_match(&pol->selector, fl, family);
int err = 0;
if (match) {
@@ -1773,43 +1772,6 @@
goto out;
}
-#ifdef CONFIG_XFRM_SUB_POLICY
-static int xfrm_dst_alloc_copy(void **target, const void *src, int size)
-{
- if (!*target) {
- *target = kmalloc(size, GFP_ATOMIC);
- if (!*target)
- return -ENOMEM;
- }
-
- memcpy(*target, src, size);
- return 0;
-}
-#endif
-
-static int xfrm_dst_update_parent(struct dst_entry *dst,
- const struct xfrm_selector *sel)
-{
-#ifdef CONFIG_XFRM_SUB_POLICY
- struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
- return xfrm_dst_alloc_copy((void **)&(xdst->partner),
- sel, sizeof(*sel));
-#else
- return 0;
-#endif
-}
-
-static int xfrm_dst_update_origin(struct dst_entry *dst,
- const struct flowi *fl)
-{
-#ifdef CONFIG_XFRM_SUB_POLICY
- struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
- return xfrm_dst_alloc_copy((void **)&(xdst->origin), fl, sizeof(*fl));
-#else
- return 0;
-#endif
-}
-
static int xfrm_expand_policies(const struct flowi *fl, u16 family,
struct xfrm_policy **pols,
int *num_pols, int *num_xfrms)
@@ -1881,16 +1843,6 @@
xdst = (struct xfrm_dst *)dst;
xdst->num_xfrms = err;
- if (num_pols > 1)
- err = xfrm_dst_update_parent(dst, &pols[1]->selector);
- else
- err = xfrm_dst_update_origin(dst, fl);
- if (unlikely(err)) {
- dst_free(dst);
- XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLECHECKERROR);
- return ERR_PTR(err);
- }
-
xdst->num_pols = num_pols;
memcpy(xdst->pols, pols, sizeof(struct xfrm_policy *) * num_pols);
xdst->policy_genid = atomic_read(&pols[0]->genid);
@@ -2218,7 +2170,7 @@
sk = sk_const_to_full_sk(sk);
if (sk && sk->sk_policy[XFRM_POLICY_OUT]) {
num_pols = 1;
- pols[0] = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl);
+ pols[0] = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl, family);
err = xfrm_expand_policies(fl, family, pols,
&num_pols, &num_xfrms);
if (err < 0)
@@ -2497,7 +2449,7 @@
pol = NULL;
sk = sk_to_full_sk(sk);
if (sk && sk->sk_policy[dir]) {
- pol = xfrm_sk_policy_lookup(sk, dir, &fl);
+ pol = xfrm_sk_policy_lookup(sk, dir, &fl, family);
if (IS_ERR(pol)) {
XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
return 0;
@@ -3027,6 +2979,11 @@
{
int rv;
+ /* Initialize the per-net locks here */
+ spin_lock_init(&net->xfrm.xfrm_state_lock);
+ rwlock_init(&net->xfrm.xfrm_policy_lock);
+ mutex_init(&net->xfrm.xfrm_cfg_mutex);
+
rv = xfrm_statistics_init(net);
if (rv < 0)
goto out_statistics;
@@ -3043,11 +3000,6 @@
if (rv < 0)
goto out;
- /* Initialize the per-net locks here */
- spin_lock_init(&net->xfrm.xfrm_state_lock);
- rwlock_init(&net->xfrm.xfrm_policy_lock);
- mutex_init(&net->xfrm.xfrm_cfg_mutex);
-
return 0;
out:
@@ -3320,9 +3272,15 @@
struct xfrm_state *x_new[XFRM_MAX_DEPTH];
struct xfrm_migrate *mp;
+ /* Stage 0 - sanity checks */
if ((err = xfrm_migrate_check(m, num_migrate)) < 0)
goto out;
+ if (dir >= XFRM_POLICY_MAX) {
+ err = -EINVAL;
+ goto out;
+ }
+
/* Stage 1 - find policy */
if ((pol = xfrm_migrate_policy_find(sel, dir, type, net)) == NULL) {
err = -ENOENT;
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index 6aec368..4c64ae4 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -3410,7 +3410,7 @@
$fixedline =~ s/\s*=\s*$/ = {/;
fix_insert_line($fixlinenr, $fixedline);
$fixedline = $line;
- $fixedline =~ s/^(.\s*){\s*/$1/;
+ $fixedline =~ s/^(.\s*)\{\s*/$1/;
fix_insert_line($fixlinenr, $fixedline);
}
}
@@ -3760,7 +3760,7 @@
my $fixedline = rtrim($prevrawline) . " {";
fix_insert_line($fixlinenr, $fixedline);
$fixedline = $rawline;
- $fixedline =~ s/^(.\s*){\s*/$1\t/;
+ $fixedline =~ s/^(.\s*)\{\s*/$1\t/;
if ($fixedline !~ /^\+\s*$/) {
fix_insert_line($fixlinenr, $fixedline);
}
@@ -4249,7 +4249,7 @@
if (ERROR("SPACING",
"space required before the open brace '{'\n" . $herecurr) &&
$fix) {
- $fixed[$fixlinenr] =~ s/^(\+.*(?:do|\))){/$1 {/;
+ $fixed[$fixlinenr] =~ s/^(\+.*(?:do|\)))\{/$1 {/;
}
}
diff --git a/security/integrity/ima/ima_appraise.c b/security/integrity/ima/ima_appraise.c
index ed5a9c1..9ce9d50 100644
--- a/security/integrity/ima/ima_appraise.c
+++ b/security/integrity/ima/ima_appraise.c
@@ -203,10 +203,11 @@
cause = "missing-hash";
status = INTEGRITY_NOLABEL;
- if (opened & FILE_CREATED) {
+ if (opened & FILE_CREATED)
iint->flags |= IMA_NEW_FILE;
+ if ((iint->flags & IMA_NEW_FILE) &&
+ !(iint->flags & IMA_DIGSIG_REQUIRED))
status = INTEGRITY_PASS;
- }
goto out;
}
diff --git a/security/keys/encrypted-keys/encrypted.c b/security/keys/encrypted-keys/encrypted.c
index 696ccfa..3189885 100644
--- a/security/keys/encrypted-keys/encrypted.c
+++ b/security/keys/encrypted-keys/encrypted.c
@@ -428,7 +428,7 @@
static struct key *request_master_key(struct encrypted_key_payload *epayload,
const u8 **master_key, size_t *master_keylen)
{
- struct key *mkey = NULL;
+ struct key *mkey = ERR_PTR(-EINVAL);
if (!strncmp(epayload->master_desc, KEY_TRUSTED_PREFIX,
KEY_TRUSTED_PREFIX_LEN)) {
diff --git a/security/keys/gc.c b/security/keys/gc.c
index addf060..9cb4fe4 100644
--- a/security/keys/gc.c
+++ b/security/keys/gc.c
@@ -46,7 +46,7 @@
* immediately unlinked.
*/
struct key_type key_type_dead = {
- .name = "dead",
+ .name = ".dead",
};
/*
diff --git a/security/keys/key.c b/security/keys/key.c
index 5348089..09c10b1 100644
--- a/security/keys/key.c
+++ b/security/keys/key.c
@@ -934,12 +934,11 @@
/* the key must be writable */
ret = key_permission(key_ref, KEY_NEED_WRITE);
if (ret < 0)
- goto error;
+ return ret;
/* attempt to update it if supported */
- ret = -EOPNOTSUPP;
if (!key->type->update)
- goto error;
+ return -EOPNOTSUPP;
memset(&prep, 0, sizeof(prep));
prep.data = payload;
diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
index cf4edda..671709d8 100644
--- a/security/keys/keyctl.c
+++ b/security/keys/keyctl.c
@@ -271,7 +271,8 @@
* Create and join an anonymous session keyring or join a named session
* keyring, creating it if necessary. A named session keyring must have Search
* permission for it to be joined. Session keyrings without this permit will
- * be skipped over.
+ * be skipped over. It is not permitted for userspace to create or join
+ * keyrings whose name begin with a dot.
*
* If successful, the ID of the joined session keyring will be returned.
*/
@@ -288,12 +289,16 @@
ret = PTR_ERR(name);
goto error;
}
+
+ ret = -EPERM;
+ if (name[0] == '.')
+ goto error_name;
}
/* join the session */
ret = join_session_keyring(name);
+error_name:
kfree(name);
-
error:
return ret;
}
@@ -1223,8 +1228,8 @@
* Read or set the default keyring in which request_key() will cache keys and
* return the old setting.
*
- * If a process keyring is specified then this will be created if it doesn't
- * yet exist. The old setting will be returned if successful.
+ * If a thread or process keyring is specified then it will be created if it
+ * doesn't yet exist. The old setting will be returned if successful.
*/
long keyctl_set_reqkey_keyring(int reqkey_defl)
{
@@ -1249,11 +1254,8 @@
case KEY_REQKEY_DEFL_PROCESS_KEYRING:
ret = install_process_keyring_to_cred(new);
- if (ret < 0) {
- if (ret != -EEXIST)
- goto error;
- ret = 0;
- }
+ if (ret < 0)
+ goto error;
goto set;
case KEY_REQKEY_DEFL_DEFAULT:
diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c
index e6d50172..4ed9091 100644
--- a/security/keys/process_keys.c
+++ b/security/keys/process_keys.c
@@ -125,13 +125,18 @@
}
/*
- * Install a fresh thread keyring directly to new credentials. This keyring is
- * allowed to overrun the quota.
+ * Install a thread keyring to the given credentials struct if it didn't have
+ * one already. This is allowed to overrun the quota.
+ *
+ * Return: 0 if a thread keyring is now present; -errno on failure.
*/
int install_thread_keyring_to_cred(struct cred *new)
{
struct key *keyring;
+ if (new->thread_keyring)
+ return 0;
+
keyring = keyring_alloc("_tid", new->uid, new->gid, new,
KEY_POS_ALL | KEY_USR_VIEW,
KEY_ALLOC_QUOTA_OVERRUN, NULL);
@@ -143,7 +148,9 @@
}
/*
- * Install a fresh thread keyring, discarding the old one.
+ * Install a thread keyring to the current task if it didn't have one already.
+ *
+ * Return: 0 if a thread keyring is now present; -errno on failure.
*/
static int install_thread_keyring(void)
{
@@ -154,8 +161,6 @@
if (!new)
return -ENOMEM;
- BUG_ON(new->thread_keyring);
-
ret = install_thread_keyring_to_cred(new);
if (ret < 0) {
abort_creds(new);
@@ -166,17 +171,17 @@
}
/*
- * Install a process keyring directly to a credentials struct.
+ * Install a process keyring to the given credentials struct if it didn't have
+ * one already. This is allowed to overrun the quota.
*
- * Returns -EEXIST if there was already a process keyring, 0 if one installed,
- * and other value on any other error
+ * Return: 0 if a process keyring is now present; -errno on failure.
*/
int install_process_keyring_to_cred(struct cred *new)
{
struct key *keyring;
if (new->process_keyring)
- return -EEXIST;
+ return 0;
keyring = keyring_alloc("_pid", new->uid, new->gid, new,
KEY_POS_ALL | KEY_USR_VIEW,
@@ -189,11 +194,9 @@
}
/*
- * Make sure a process keyring is installed for the current process. The
- * existing process keyring is not replaced.
+ * Install a process keyring to the current task if it didn't have one already.
*
- * Returns 0 if there is a process keyring by the end of this function, some
- * error otherwise.
+ * Return: 0 if a process keyring is now present; -errno on failure.
*/
static int install_process_keyring(void)
{
@@ -207,14 +210,18 @@
ret = install_process_keyring_to_cred(new);
if (ret < 0) {
abort_creds(new);
- return ret != -EEXIST ? ret : 0;
+ return ret;
}
return commit_creds(new);
}
/*
- * Install a session keyring directly to a credentials struct.
+ * Install the given keyring as the session keyring of the given credentials
+ * struct, replacing the existing one if any. If the given keyring is NULL,
+ * then install a new anonymous session keyring.
+ *
+ * Return: 0 on success; -errno on failure.
*/
int install_session_keyring_to_cred(struct cred *cred, struct key *keyring)
{
@@ -249,8 +256,11 @@
}
/*
- * Install a session keyring, discarding the old one. If a keyring is not
- * supplied, an empty one is invented.
+ * Install the given keyring as the session keyring of the current task,
+ * replacing the existing one if any. If the given keyring is NULL, then
+ * install a new anonymous session keyring.
+ *
+ * Return: 0 on success; -errno on failure.
*/
static int install_session_keyring(struct key *keyring)
{
diff --git a/sound/core/control.c b/sound/core/control.c
index b4fe9b0..bd01d49 100644
--- a/sound/core/control.c
+++ b/sound/core/control.c
@@ -1126,7 +1126,7 @@
mutex_lock(&ue->card->user_ctl_lock);
change = ue->tlv_data_size != size;
if (!change)
- change = memcmp(ue->tlv_data, new_data, size);
+ change = memcmp(ue->tlv_data, new_data, size) != 0;
kfree(ue->tlv_data);
ue->tlv_data = new_data;
ue->tlv_data_size = size;
diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
index 58e79e0..e326c1d 100644
--- a/sound/core/seq/seq_clientmgr.c
+++ b/sound/core/seq/seq_clientmgr.c
@@ -1530,19 +1530,14 @@
void __user *arg)
{
struct snd_seq_queue_info info;
- int result;
struct snd_seq_queue *q;
if (copy_from_user(&info, arg, sizeof(info)))
return -EFAULT;
- result = snd_seq_queue_alloc(client->number, info.locked, info.flags);
- if (result < 0)
- return result;
-
- q = queueptr(result);
- if (q == NULL)
- return -EINVAL;
+ q = snd_seq_queue_alloc(client->number, info.locked, info.flags);
+ if (IS_ERR(q))
+ return PTR_ERR(q);
info.queue = q->queue;
info.locked = q->locked;
@@ -1552,7 +1547,7 @@
if (! info.name[0])
snprintf(info.name, sizeof(info.name), "Queue-%d", q->queue);
strlcpy(q->name, info.name, sizeof(q->name));
- queuefree(q);
+ snd_use_lock_free(&q->use_lock);
if (copy_to_user(arg, &info, sizeof(info)))
return -EFAULT;
@@ -1921,6 +1916,7 @@
info.output_pool != client->pool->size)) {
if (snd_seq_write_pool_allocated(client)) {
/* remove all existing cells */
+ snd_seq_pool_mark_closing(client->pool);
snd_seq_queue_client_leave_cells(client->number);
snd_seq_pool_done(client->pool);
}
diff --git a/sound/core/seq/seq_fifo.c b/sound/core/seq/seq_fifo.c
index 86240d0..3490d21 100644
--- a/sound/core/seq/seq_fifo.c
+++ b/sound/core/seq/seq_fifo.c
@@ -70,6 +70,9 @@
return;
*fifo = NULL;
+ if (f->pool)
+ snd_seq_pool_mark_closing(f->pool);
+
snd_seq_fifo_clear(f);
/* wake up clients if any */
@@ -262,6 +265,10 @@
/* NOTE: overflow flag is not cleared */
spin_unlock_irqrestore(&f->lock, flags);
+ /* close the old pool and wait until all users are gone */
+ snd_seq_pool_mark_closing(oldpool);
+ snd_use_lock_sync(&f->use_lock);
+
/* release cells in old pool */
for (cell = oldhead; cell; cell = next) {
next = cell->next;
diff --git a/sound/core/seq/seq_lock.c b/sound/core/seq/seq_lock.c
index 3b693e9..12ba833 100644
--- a/sound/core/seq/seq_lock.c
+++ b/sound/core/seq/seq_lock.c
@@ -28,19 +28,16 @@
/* wait until all locks are released */
void snd_use_lock_sync_helper(snd_use_lock_t *lockp, const char *file, int line)
{
- int max_count = 5 * HZ;
+ int warn_count = 5 * HZ;
if (atomic_read(lockp) < 0) {
pr_warn("ALSA: seq_lock: lock trouble [counter = %d] in %s:%d\n", atomic_read(lockp), file, line);
return;
}
while (atomic_read(lockp) > 0) {
- if (max_count == 0) {
- pr_warn("ALSA: seq_lock: timeout [%d left] in %s:%d\n", atomic_read(lockp), file, line);
- break;
- }
+ if (warn_count-- == 0)
+ pr_warn("ALSA: seq_lock: waiting [%d left] in %s:%d\n", atomic_read(lockp), file, line);
schedule_timeout_uninterruptible(1);
- max_count--;
}
}
diff --git a/sound/core/seq/seq_memory.c b/sound/core/seq/seq_memory.c
index dfa5156..5847c44 100644
--- a/sound/core/seq/seq_memory.c
+++ b/sound/core/seq/seq_memory.c
@@ -414,6 +414,18 @@
return 0;
}
+/* refuse the further insertion to the pool */
+void snd_seq_pool_mark_closing(struct snd_seq_pool *pool)
+{
+ unsigned long flags;
+
+ if (snd_BUG_ON(!pool))
+ return;
+ spin_lock_irqsave(&pool->lock, flags);
+ pool->closing = 1;
+ spin_unlock_irqrestore(&pool->lock, flags);
+}
+
/* remove events */
int snd_seq_pool_done(struct snd_seq_pool *pool)
{
@@ -424,10 +436,6 @@
return -EINVAL;
/* wait for closing all threads */
- spin_lock_irqsave(&pool->lock, flags);
- pool->closing = 1;
- spin_unlock_irqrestore(&pool->lock, flags);
-
if (waitqueue_active(&pool->output_sleep))
wake_up(&pool->output_sleep);
@@ -484,6 +492,7 @@
*ppool = NULL;
if (pool == NULL)
return 0;
+ snd_seq_pool_mark_closing(pool);
snd_seq_pool_done(pool);
kfree(pool);
return 0;
diff --git a/sound/core/seq/seq_memory.h b/sound/core/seq/seq_memory.h
index 4a2ec77..32f959c 100644
--- a/sound/core/seq/seq_memory.h
+++ b/sound/core/seq/seq_memory.h
@@ -84,6 +84,7 @@
int snd_seq_pool_init(struct snd_seq_pool *pool);
/* done pool - free events */
+void snd_seq_pool_mark_closing(struct snd_seq_pool *pool);
int snd_seq_pool_done(struct snd_seq_pool *pool);
/* create pool */
diff --git a/sound/core/seq/seq_queue.c b/sound/core/seq/seq_queue.c
index 450c518..79e0c56 100644
--- a/sound/core/seq/seq_queue.c
+++ b/sound/core/seq/seq_queue.c
@@ -184,22 +184,26 @@
static void queue_use(struct snd_seq_queue *queue, int client, int use);
/* allocate a new queue -
- * return queue index value or negative value for error
+ * return pointer to new queue or ERR_PTR(-errno) for error
+ * The new queue's use_lock is set to 1. It is the caller's responsibility to
+ * call snd_use_lock_free(&q->use_lock).
*/
-int snd_seq_queue_alloc(int client, int locked, unsigned int info_flags)
+struct snd_seq_queue *snd_seq_queue_alloc(int client, int locked, unsigned int info_flags)
{
struct snd_seq_queue *q;
q = queue_new(client, locked);
if (q == NULL)
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
q->info_flags = info_flags;
queue_use(q, client, 1);
+ snd_use_lock_use(&q->use_lock);
if (queue_list_add(q) < 0) {
+ snd_use_lock_free(&q->use_lock);
queue_delete(q);
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
}
- return q->queue;
+ return q;
}
/* delete a queue - queue must be owned by the client */
diff --git a/sound/core/seq/seq_queue.h b/sound/core/seq/seq_queue.h
index 30c8111..7190934 100644
--- a/sound/core/seq/seq_queue.h
+++ b/sound/core/seq/seq_queue.h
@@ -71,7 +71,7 @@
/* create new queue (constructor) */
-int snd_seq_queue_alloc(int client, int locked, unsigned int flags);
+struct snd_seq_queue *snd_seq_queue_alloc(int client, int locked, unsigned int flags);
/* delete queue (destructor) */
int snd_seq_queue_delete(int client, int queueid);
diff --git a/sound/firewire/lib.h b/sound/firewire/lib.h
index f3f6f84..bb5f8cd 100644
--- a/sound/firewire/lib.h
+++ b/sound/firewire/lib.h
@@ -42,7 +42,7 @@
struct snd_rawmidi_substream *substream;
snd_fw_async_midi_port_fill fill;
- unsigned int consume_bytes;
+ int consume_bytes;
};
int snd_fw_async_midi_port_init(struct snd_fw_async_midi_port *port,
diff --git a/sound/isa/msnd/msnd_midi.c b/sound/isa/msnd/msnd_midi.c
index ffc67fd..58e59cd 100644
--- a/sound/isa/msnd/msnd_midi.c
+++ b/sound/isa/msnd/msnd_midi.c
@@ -120,24 +120,24 @@
unsigned long flags;
struct snd_msndmidi *mpu = mpuv;
void *pwMIDQData = mpu->dev->mappedbase + MIDQ_DATA_BUFF;
+ u16 head, tail, size;
spin_lock_irqsave(&mpu->input_lock, flags);
- while (readw(mpu->dev->MIDQ + JQS_wTail) !=
- readw(mpu->dev->MIDQ + JQS_wHead)) {
- u16 wTmp, val;
- val = readw(pwMIDQData + 2 * readw(mpu->dev->MIDQ + JQS_wHead));
+ head = readw(mpu->dev->MIDQ + JQS_wHead);
+ tail = readw(mpu->dev->MIDQ + JQS_wTail);
+ size = readw(mpu->dev->MIDQ + JQS_wSize);
+ if (head > size || tail > size)
+ goto out;
+ while (head != tail) {
+ unsigned char val = readw(pwMIDQData + 2 * head);
- if (test_bit(MSNDMIDI_MODE_BIT_INPUT_TRIGGER,
- &mpu->mode))
- snd_rawmidi_receive(mpu->substream_input,
- (unsigned char *)&val, 1);
-
- wTmp = readw(mpu->dev->MIDQ + JQS_wHead) + 1;
- if (wTmp > readw(mpu->dev->MIDQ + JQS_wSize))
- writew(0, mpu->dev->MIDQ + JQS_wHead);
- else
- writew(wTmp, mpu->dev->MIDQ + JQS_wHead);
+ if (test_bit(MSNDMIDI_MODE_BIT_INPUT_TRIGGER, &mpu->mode))
+ snd_rawmidi_receive(mpu->substream_input, &val, 1);
+ if (++head > size)
+ head = 0;
+ writew(head, mpu->dev->MIDQ + JQS_wHead);
}
+ out:
spin_unlock_irqrestore(&mpu->input_lock, flags);
}
EXPORT_SYMBOL(snd_msndmidi_input_read);
diff --git a/sound/isa/msnd/msnd_pinnacle.c b/sound/isa/msnd/msnd_pinnacle.c
index 4c07266..a31ea6c 100644
--- a/sound/isa/msnd/msnd_pinnacle.c
+++ b/sound/isa/msnd/msnd_pinnacle.c
@@ -170,23 +170,24 @@
{
struct snd_msnd *chip = dev_id;
void *pwDSPQData = chip->mappedbase + DSPQ_DATA_BUFF;
+ u16 head, tail, size;
/* Send ack to DSP */
/* inb(chip->io + HP_RXL); */
/* Evaluate queued DSP messages */
- while (readw(chip->DSPQ + JQS_wTail) != readw(chip->DSPQ + JQS_wHead)) {
- u16 wTmp;
-
- snd_msnd_eval_dsp_msg(chip,
- readw(pwDSPQData + 2 * readw(chip->DSPQ + JQS_wHead)));
-
- wTmp = readw(chip->DSPQ + JQS_wHead) + 1;
- if (wTmp > readw(chip->DSPQ + JQS_wSize))
- writew(0, chip->DSPQ + JQS_wHead);
- else
- writew(wTmp, chip->DSPQ + JQS_wHead);
+ head = readw(chip->DSPQ + JQS_wHead);
+ tail = readw(chip->DSPQ + JQS_wTail);
+ size = readw(chip->DSPQ + JQS_wSize);
+ if (head > size || tail > size)
+ goto out;
+ while (head != tail) {
+ snd_msnd_eval_dsp_msg(chip, readw(pwDSPQData + 2 * head));
+ if (++head > size)
+ head = 0;
+ writew(head, chip->DSPQ + JQS_wHead);
}
+ out:
/* Send ack to DSP */
inb(chip->io + HP_RXL);
return IRQ_HANDLED;
diff --git a/sound/pci/au88x0/au88x0_core.c b/sound/pci/au88x0/au88x0_core.c
index 7417718..d3125c1 100644
--- a/sound/pci/au88x0/au88x0_core.c
+++ b/sound/pci/au88x0/au88x0_core.c
@@ -2150,8 +2150,7 @@
stream->resources, en,
VORTEX_RESOURCE_SRC)) < 0) {
memset(stream->resources, 0,
- sizeof(unsigned char) *
- VORTEX_RESOURCE_LAST);
+ sizeof(stream->resources));
return -EBUSY;
}
if (stream->type != VORTEX_PCM_A3D) {
@@ -2161,7 +2160,7 @@
VORTEX_RESOURCE_MIXIN)) < 0) {
memset(stream->resources,
0,
- sizeof(unsigned char) * VORTEX_RESOURCE_LAST);
+ sizeof(stream->resources));
return -EBUSY;
}
}
@@ -2174,8 +2173,7 @@
stream->resources, en,
VORTEX_RESOURCE_A3D)) < 0) {
memset(stream->resources, 0,
- sizeof(unsigned char) *
- VORTEX_RESOURCE_LAST);
+ sizeof(stream->resources));
dev_err(vortex->card->dev,
"out of A3D sources. Sorry\n");
return -EBUSY;
@@ -2289,8 +2287,7 @@
VORTEX_RESOURCE_MIXOUT))
< 0) {
memset(stream->resources, 0,
- sizeof(unsigned char) *
- VORTEX_RESOURCE_LAST);
+ sizeof(stream->resources));
return -EBUSY;
}
if ((src[i] =
@@ -2298,8 +2295,7 @@
stream->resources, en,
VORTEX_RESOURCE_SRC)) < 0) {
memset(stream->resources, 0,
- sizeof(unsigned char) *
- VORTEX_RESOURCE_LAST);
+ sizeof(stream->resources));
return -EBUSY;
}
}
diff --git a/sound/pci/ctxfi/cthw20k1.c b/sound/pci/ctxfi/cthw20k1.c
index ab4cdab..79edd88 100644
--- a/sound/pci/ctxfi/cthw20k1.c
+++ b/sound/pci/ctxfi/cthw20k1.c
@@ -1905,7 +1905,7 @@
return err;
/* Set DMA transfer mask */
- if (dma_set_mask(&pci->dev, DMA_BIT_MASK(dma_bits))) {
+ if (!dma_set_mask(&pci->dev, DMA_BIT_MASK(dma_bits))) {
dma_set_coherent_mask(&pci->dev, DMA_BIT_MASK(dma_bits));
} else {
dma_set_mask(&pci->dev, DMA_BIT_MASK(32));
diff --git a/sound/pci/hda/hda_codec.h b/sound/pci/hda/hda_codec.h
index 373fcad..776dffa 100644
--- a/sound/pci/hda/hda_codec.h
+++ b/sound/pci/hda/hda_codec.h
@@ -294,6 +294,8 @@
#define list_for_each_codec(c, bus) \
list_for_each_entry(c, &(bus)->core.codec_list, core.list)
+#define list_for_each_codec_safe(c, n, bus) \
+ list_for_each_entry_safe(c, n, &(bus)->core.codec_list, core.list)
/* snd_hda_codec_read/write optional flags */
#define HDA_RW_NO_RESPONSE_FALLBACK (1 << 0)
diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c
index 5baf8b5..9c6e10f 100644
--- a/sound/pci/hda/hda_controller.c
+++ b/sound/pci/hda/hda_controller.c
@@ -1128,8 +1128,12 @@
/* configure each codec instance */
int azx_codec_configure(struct azx *chip)
{
- struct hda_codec *codec;
- list_for_each_codec(codec, &chip->bus) {
+ struct hda_codec *codec, *next;
+
+ /* use _safe version here since snd_hda_codec_configure() deregisters
+ * the device upon error and deletes itself from the bus list.
+ */
+ list_for_each_codec_safe(codec, next, &chip->bus) {
snd_hda_codec_configure(codec);
}
return 0;
diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
index dc2fa57..689df78 100644
--- a/sound/pci/hda/hda_generic.c
+++ b/sound/pci/hda/hda_generic.c
@@ -3190,6 +3190,7 @@
spec->input_paths[i][nums]);
spec->input_paths[i][nums] =
spec->input_paths[i][n];
+ spec->input_paths[i][n] = 0;
}
}
nums++;
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 8f3e5e9..e6de496 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -2166,7 +2166,20 @@
/* cancel the pending probing work */
chip = card->private_data;
hda = container_of(chip, struct hda_intel, chip);
+ /* FIXME: below is an ugly workaround.
+ * Both device_release_driver() and driver_probe_device()
+ * take *both* the device's and its parent's lock before
+ * calling the remove() and probe() callbacks. The codec
+ * probe takes the locks of both the codec itself and its
+ * parent, i.e. the PCI controller dev. Meanwhile, when
+ * the PCI controller is unbound, it takes its lock, too
+ * ==> ouch, a deadlock!
+ * As a workaround, we unlock temporarily here the controller
+ * device during cancel_work_sync() call.
+ */
+ device_unlock(&pci->dev);
cancel_work_sync(&hda->probe_work);
+ device_lock(&pci->dev);
snd_card_free(card);
}
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index 46f7b02..ac5de43 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -854,6 +854,7 @@
SND_PCI_QUIRK(0x17aa, 0x390b, "Lenovo G50-80", CXT_FIXUP_STEREO_DMIC),
SND_PCI_QUIRK(0x17aa, 0x3975, "Lenovo U300s", CXT_FIXUP_STEREO_DMIC),
SND_PCI_QUIRK(0x17aa, 0x3977, "Lenovo IdeaPad U310", CXT_FIXUP_STEREO_DMIC),
+ SND_PCI_QUIRK(0x17aa, 0x3978, "Lenovo G50-70", CXT_FIXUP_STEREO_DMIC),
SND_PCI_QUIRK(0x17aa, 0x397b, "Lenovo S205", CXT_FIXUP_STEREO_DMIC),
SND_PCI_QUIRK_VENDOR(0x17aa, "Thinkpad", CXT_FIXUP_THINKPAD_ACPI),
SND_PCI_QUIRK(0x1c06, 0x2011, "Lemote A1004", CXT_PINCFG_LEMOTE_A1004),
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index cf0785d..5cab24f 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -2233,6 +2233,7 @@
SND_PCI_QUIRK(0x1043, 0x8691, "ASUS ROG Ranger VIII", ALC882_FIXUP_GPIO3),
SND_PCI_QUIRK(0x104d, 0x9047, "Sony Vaio TT", ALC889_FIXUP_VAIO_TT),
SND_PCI_QUIRK(0x104d, 0x905a, "Sony Vaio Z", ALC882_FIXUP_NO_PRIMARY_HP),
+ SND_PCI_QUIRK(0x104d, 0x9060, "Sony Vaio VPCL14M1R", ALC882_FIXUP_NO_PRIMARY_HP),
SND_PCI_QUIRK(0x104d, 0x9043, "Sony Vaio VGC-LN51JGB", ALC882_FIXUP_NO_PRIMARY_HP),
SND_PCI_QUIRK(0x104d, 0x9044, "Sony VAIO AiO", ALC882_FIXUP_NO_PRIMARY_HP),
@@ -4831,6 +4832,7 @@
ALC292_FIXUP_DISABLE_AAMIX,
ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK,
ALC298_FIXUP_DELL1_MIC_NO_PRESENCE,
+ ALC298_FIXUP_DELL_AIO_MIC_NO_PRESENCE,
ALC275_FIXUP_DELL_XPS,
ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE,
ALC293_FIXUP_LENOVO_SPK_NOISE,
@@ -5429,6 +5431,15 @@
.chained = true,
.chain_id = ALC269_FIXUP_HEADSET_MODE
},
+ [ALC298_FIXUP_DELL_AIO_MIC_NO_PRESENCE] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = (const struct hda_pintbl[]) {
+ { 0x18, 0x01a1913c }, /* use as headset mic, without its own jack detect */
+ { }
+ },
+ .chained = true,
+ .chain_id = ALC269_FIXUP_HEADSET_MODE
+ },
[ALC275_FIXUP_DELL_XPS] = {
.type = HDA_FIXUP_VERBS,
.v.verbs = (const struct hda_verb[]) {
@@ -5501,7 +5512,7 @@
.type = HDA_FIXUP_FUNC,
.v.func = alc298_fixup_speaker_volume,
.chained = true,
- .chain_id = ALC298_FIXUP_DELL1_MIC_NO_PRESENCE,
+ .chain_id = ALC298_FIXUP_DELL_AIO_MIC_NO_PRESENCE,
},
[ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER] = {
.type = HDA_FIXUP_PINS,
@@ -6040,6 +6051,8 @@
ALC295_STANDARD_PINS,
{0x17, 0x21014040},
{0x18, 0x21a19050}),
+ SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
+ ALC295_STANDARD_PINS),
SND_HDA_PIN_QUIRK(0x10ec0298, 0x1028, "Dell", ALC298_FIXUP_DELL1_MIC_NO_PRESENCE,
ALC298_STANDARD_PINS,
{0x17, 0x90170110}),
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
index 37b70f8..0abab79 100644
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -1537,6 +1537,8 @@
"Dell Inspiron 1501", STAC_9200_DELL_M26),
SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x01f6,
"unknown Dell", STAC_9200_DELL_M26),
+ SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0201,
+ "Dell Latitude D430", STAC_9200_DELL_M22),
/* Panasonic */
SND_PCI_QUIRK(0x10f7, 0x8338, "Panasonic CF-74", STAC_9200_PANASONIC),
/* Gateway machines needs EAPD to be set on resume */
diff --git a/sound/ppc/awacs.c b/sound/ppc/awacs.c
index 09da7b5..1468e4b 100644
--- a/sound/ppc/awacs.c
+++ b/sound/ppc/awacs.c
@@ -991,6 +991,7 @@
if (err < 0)
return err;
}
+ master_vol = NULL;
if (pm7500)
err = build_mixers(chip,
ARRAY_SIZE(snd_pmac_awacs_mixers_pmac7500),
diff --git a/sound/soc/atmel/atmel-classd.c b/sound/soc/atmel/atmel-classd.c
index 8276675..78a9856 100644
--- a/sound/soc/atmel/atmel-classd.c
+++ b/sound/soc/atmel/atmel-classd.c
@@ -343,7 +343,7 @@
}
#define CLASSD_ACLK_RATE_11M2896_MPY_8 (112896 * 100 * 8)
-#define CLASSD_ACLK_RATE_12M288_MPY_8 (12228 * 1000 * 8)
+#define CLASSD_ACLK_RATE_12M288_MPY_8 (12288 * 1000 * 8)
static struct {
int rate;
diff --git a/sound/soc/codecs/nau8825.c b/sound/soc/codecs/nau8825.c
index c1b87c5..b3fddba 100644
--- a/sound/soc/codecs/nau8825.c
+++ b/sound/soc/codecs/nau8825.c
@@ -936,7 +936,8 @@
NAU8825_FLL_INTEGER_MASK, fll_param->fll_int);
/* FLL pre-scaler */
regmap_update_bits(nau8825->regmap, NAU8825_REG_FLL4,
- NAU8825_FLL_REF_DIV_MASK, fll_param->clk_ref_div);
+ NAU8825_FLL_REF_DIV_MASK,
+ fll_param->clk_ref_div << NAU8825_FLL_REF_DIV_SFT);
/* select divided VCO input */
regmap_update_bits(nau8825->regmap, NAU8825_REG_FLL5,
NAU8825_FLL_FILTER_SW_MASK, 0x0000);
diff --git a/sound/soc/codecs/nau8825.h b/sound/soc/codecs/nau8825.h
index dff8edb..a0b2207 100644
--- a/sound/soc/codecs/nau8825.h
+++ b/sound/soc/codecs/nau8825.h
@@ -114,7 +114,8 @@
#define NAU8825_FLL_INTEGER_MASK (0x3ff << 0)
/* FLL4 (0x07) */
-#define NAU8825_FLL_REF_DIV_MASK (0x3 << 10)
+#define NAU8825_FLL_REF_DIV_SFT 10
+#define NAU8825_FLL_REF_DIV_MASK (0x3 << NAU8825_FLL_REF_DIV_SFT)
/* FLL5 (0x08) */
#define NAU8825_FLL_FILTER_SW_MASK (0x1 << 14)
diff --git a/sound/soc/codecs/tas2557/tas2557-core.c b/sound/soc/codecs/tas2557/tas2557-core.c
index a75bd16..402b0a0 100644
--- a/sound/soc/codecs/tas2557/tas2557-core.c
+++ b/sound/soc/codecs/tas2557/tas2557-core.c
@@ -126,6 +126,18 @@
0xFFFFFFFF, 0xFFFFFFFF
};
+
+static void force_vbob_pwm_mode(struct tas2557_priv *pTAS2557, bool enable)
+{
+ if (pTAS2557->vbob_regulator == NULL)
+ return;
+
+ if (regulator_set_mode(pTAS2557->vbob_regulator, enable ?
+ REGULATOR_MODE_FAST : REGULATOR_MODE_NORMAL))
+ dev_err(pTAS2557->dev, "failed to set BoB to %s\n",
+ enable ? "FAST" : "NORMAL");
+}
+
static int tas2557_dev_load_data(struct tas2557_priv *pTAS2557,
enum channel chl, unsigned int *pData)
{
@@ -752,6 +764,19 @@
if (hrtimer_active(&pTAS2557->mtimer))
hrtimer_cancel(&pTAS2557->mtimer);
+
+ if (pTAS2557->failsafe_retry < 10) {
+ pTAS2557->failsafe_retry++;
+ msleep(100);
+ dev_err(pTAS2557->dev, "I2C COMM error, restart SmartAmp.\n");
+ schedule_delayed_work(&pTAS2557->irq_work,
+ msecs_to_jiffies(100));
+ return;
+ } else {
+ pTAS2557->failsafe_retry = 0;
+ dev_err(pTAS2557->dev, "I2C COMM error, give up retry\n");
+ }
+
pTAS2557->enableIRQ(pTAS2557, channel_both, false);
tas2557_dev_load_data(pTAS2557, channel_both, p_tas2557_shutdown_data);
pTAS2557->mbPowerUp = false;
@@ -761,6 +786,7 @@
pTAS2557->write(pTAS2557, channel_both, TAS2557_SPK_CTRL_REG, 0x04);
if (pTAS2557->mpFirmware != NULL)
tas2557_clear_firmware(pTAS2557->mpFirmware);
+ pTAS2557->failsafe_retry = 0;
}
int tas2557_checkPLL(struct tas2557_priv *pTAS2557)
@@ -959,6 +985,7 @@
goto end;
if ((nValue&0xff) != TAS2557_SAFE_GUARD_PATTERN) {
dev_err(pTAS2557->dev, "ERROR Left channel safe guard failure!\n");
+ pTAS2557->mnErrCode |= ERROR_DEVA_I2C_COMM;
nResult = -EPIPE;
goto end;
}
@@ -967,6 +994,7 @@
goto end;
if ((nValue&0xff) != TAS2557_SAFE_GUARD_PATTERN) {
dev_err(pTAS2557->dev, "ERROR right channel safe guard failure!\n");
+ pTAS2557->mnErrCode |= ERROR_DEVB_I2C_COMM;
nResult = -EPIPE;
goto end;
}
@@ -1049,10 +1077,14 @@
if (nResult < 0) {
/* TAS2557_RELOAD_FIRMWARE_START */
if (pTAS2557->mnErrCode & (ERROR_DEVA_I2C_COMM | ERROR_DEVB_I2C_COMM | ERROR_PRAM_CRCCHK | ERROR_YRAM_CRCCHK))
- tas2557_set_program(pTAS2557,
- pTAS2557->mnCurrentProgram,
- pTAS2557->mnCurrentConfiguration);
+ failsafe(pTAS2557);
/* TAS2557_RELOAD_FIRMWARE_END */
+ } else {
+ if ((pTAS2557->bob_fast_profile != UINT_MAX) &&
+ (pTAS2557->bob_fast_profile ==
+ pTAS2557->mnCurrentConfiguration)) {
+ force_vbob_pwm_mode(pTAS2557, pTAS2557->mbPowerUp);
+ }
}
return nResult;
@@ -2226,9 +2258,7 @@
if (nResult < 0) {
/* TAS2557_RELOAD_FIRMWARE_START */
if (pTAS2557->mnErrCode & (ERROR_DEVA_I2C_COMM | ERROR_DEVB_I2C_COMM | ERROR_PRAM_CRCCHK | ERROR_YRAM_CRCCHK))
- tas2557_set_program(pTAS2557,
- pTAS2557->mnCurrentProgram,
- pTAS2557->mnCurrentConfiguration);
+ failsafe(pTAS2557);
/* TAS2557_RELOAD_FIRMWARE_END */
}
@@ -2272,6 +2302,11 @@
dev_dbg(pTAS2557->dev, "%s, load new conf %s\n", __func__, pConfiguration->mpName);
nResult = tas2557_load_configuration(pTAS2557, nConfiguration, false);
+ if ((pTAS2557->bob_fast_profile != UINT_MAX) && (pTAS2557->mbPowerUp)) {
+ force_vbob_pwm_mode(pTAS2557,
+ pTAS2557->bob_fast_profile ==
+ pTAS2557->mnCurrentConfiguration);
+ }
end:
return nResult;
@@ -2924,6 +2959,21 @@
else
dev_dbg(pTAS2557->dev, "ti,cal-file-name=%s\n",
pTAS2557->cal_file_name);
+
+ rc = of_property_read_u32(np, "ti,bob-fast-profile", &value);
+
+ if (rc) {
+ dev_dbg(pTAS2557->dev, "ti,bob-fast-profile not set\n");
+ pTAS2557->bob_fast_profile = UINT_MAX;
+ } else {
+ pTAS2557->bob_fast_profile = value;
+ dev_info(pTAS2557->dev, "ti,bob-fast-profile set to %d\n",
+ pTAS2557->bob_fast_profile);
+ pTAS2557->vbob_regulator = devm_regulator_get(pTAS2557->dev,
+ "pm8998_bob");
+ if (pTAS2557->vbob_regulator == NULL)
+ dev_err(pTAS2557->dev, "failed to get BoB regulator\n");
+ }
end:
return ret;
diff --git a/sound/soc/codecs/tas2557/tas2557-regmap.c b/sound/soc/codecs/tas2557/tas2557-regmap.c
index 402fb3d..486d811 100644
--- a/sound/soc/codecs/tas2557/tas2557-regmap.c
+++ b/sound/soc/codecs/tas2557/tas2557-regmap.c
@@ -696,6 +696,9 @@
mutex_lock(&pTAS2557->file_lock);
#endif
+ if (pTAS2557->mnErrCode & (ERROR_DEVA_I2C_COMM | ERROR_DEVB_I2C_COMM | ERROR_FAILSAFE))
+ goto program;
+
if (pTAS2557->system_suspend)
goto end;
@@ -1186,6 +1189,7 @@
pTAS2557->hw_reset = tas2557_hw_reset;
pTAS2557->suspend = tas2557_suspend;
pTAS2557->resume = tas2557_resume;
+ pTAS2557->failsafe_retry = 0;
mutex_init(&pTAS2557->dev_lock);
diff --git a/sound/soc/codecs/tas2557/tas2557.h b/sound/soc/codecs/tas2557/tas2557.h
index 6b426c8..c2c4c1e 100644
--- a/sound/soc/codecs/tas2557/tas2557.h
+++ b/sound/soc/codecs/tas2557/tas2557.h
@@ -536,6 +536,9 @@
*/
bool mbBypassTMax;
+ unsigned int bob_fast_profile;
+ struct regulator *vbob_regulator;
+
#ifdef CONFIG_TAS2557_CODEC_STEREO
struct mutex codec_lock;
#endif
@@ -545,6 +548,7 @@
int mnCurrentReg;
struct mutex file_lock;
#endif
+ int failsafe_retry;
const char *cal_file_name;
spinlock_t irq_lock;
};
diff --git a/sound/soc/codecs/tlv320aic3x.c b/sound/soc/codecs/tlv320aic3x.c
index a564759..5a3f544 100644
--- a/sound/soc/codecs/tlv320aic3x.c
+++ b/sound/soc/codecs/tlv320aic3x.c
@@ -126,6 +126,16 @@
{ 108, 0x00 }, { 109, 0x00 },
};
+static bool aic3x_volatile_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case AIC3X_RESET:
+ return true;
+ default:
+ return false;
+ }
+}
+
static const struct regmap_config aic3x_regmap = {
.reg_bits = 8,
.val_bits = 8,
@@ -133,6 +143,9 @@
.max_register = DAC_ICC_ADJ,
.reg_defaults = aic3x_reg,
.num_reg_defaults = ARRAY_SIZE(aic3x_reg),
+
+ .volatile_reg = aic3x_volatile_reg,
+
.cache_type = REGCACHE_RBTREE,
};
diff --git a/sound/soc/codecs/wcd-dsp-mgr.c b/sound/soc/codecs/wcd-dsp-mgr.c
index 71a2052..6e42885 100644
--- a/sound/soc/codecs/wcd-dsp-mgr.c
+++ b/sound/soc/codecs/wcd-dsp-mgr.c
@@ -410,22 +410,24 @@
/* Go through the list of segments and download one by one */
list_for_each_entry(seg, wdsp->seg_list, list) {
ret = wdsp_load_each_segment(wdsp, seg);
- if (IS_ERR_VALUE(ret)) {
- wdsp_broadcast_event_downseq(wdsp,
- WDSP_EVENT_DLOAD_FAILED,
- NULL);
+ if (ret)
goto dload_error;
- }
}
+ /* Flush the list before setting status and notifying components */
+ wdsp_flush_segment_list(wdsp->seg_list);
+
WDSP_SET_STATUS(wdsp, status);
/* Notify all components that image is downloaded */
wdsp_broadcast_event_downseq(wdsp, post, NULL);
+done:
+ return ret;
dload_error:
wdsp_flush_segment_list(wdsp->seg_list);
-done:
+ wdsp_broadcast_event_downseq(wdsp, WDSP_EVENT_DLOAD_FAILED, NULL);
+
return ret;
}
@@ -479,10 +481,14 @@
/* Make sure wdsp is in good state */
if (!WDSP_STATUS_IS_SET(wdsp, WDSP_STATUS_CODE_DLOADED)) {
WDSP_ERR(wdsp, "WDSP in invalid state 0x%x", wdsp->status);
- ret = -EINVAL;
- goto done;
+ return -EINVAL;
}
+ /*
+ * Acquire SSR mutex lock to make sure enablement of DSP
+ * does not race with SSR handling.
+ */
+ WDSP_MGR_MUTEX_LOCK(wdsp, wdsp->ssr_mutex);
/* Download the read-write sections of image */
ret = wdsp_download_segments(wdsp, WDSP_ELF_FLAG_WRITE);
if (IS_ERR_VALUE(ret)) {
@@ -503,6 +509,7 @@
wdsp_broadcast_event_downseq(wdsp, WDSP_EVENT_POST_BOOTUP, NULL);
WDSP_SET_STATUS(wdsp, WDSP_STATUS_BOOTED);
done:
+ WDSP_MGR_MUTEX_UNLOCK(wdsp, wdsp->ssr_mutex);
return ret;
}
diff --git a/sound/soc/codecs/wcd934x/wcd934x-dsp-cntl.c b/sound/soc/codecs/wcd934x/wcd934x-dsp-cntl.c
index e791bf0..29c2180 100644
--- a/sound/soc/codecs/wcd934x/wcd934x-dsp-cntl.c
+++ b/sound/soc/codecs/wcd934x/wcd934x-dsp-cntl.c
@@ -763,10 +763,6 @@
case WDSP_EVENT_DLOAD_FAILED:
case WDSP_EVENT_POST_SHUTDOWN:
- if (event == WDSP_EVENT_POST_DLOAD_CODE)
- /* Mark DSP online since code download is complete */
- wcd_cntl_change_online_state(cntl, 1);
-
/* Disable CPAR */
wcd_cntl_cpar_ctrl(cntl, false);
/* Disable all the clocks */
@@ -775,6 +771,11 @@
dev_err(codec->dev,
"%s: Failed to disable clocks, err = %d\n",
__func__, ret);
+
+ if (event == WDSP_EVENT_POST_DLOAD_CODE)
+ /* Mark DSP online since code download is complete */
+ wcd_cntl_change_online_state(cntl, 1);
+
break;
case WDSP_EVENT_PRE_DLOAD_DATA:
diff --git a/sound/soc/generic/simple-card.c b/sound/soc/generic/simple-card.c
index 54c3320..ff6fcd9 100644
--- a/sound/soc/generic/simple-card.c
+++ b/sound/soc/generic/simple-card.c
@@ -100,7 +100,7 @@
if (ret && ret != -ENOTSUPP)
goto err;
}
-
+ return 0;
err:
return ret;
}
diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c
index 7a5c9a3..daba8c5 100644
--- a/sound/soc/intel/boards/bytcr_rt5640.c
+++ b/sound/soc/intel/boards/bytcr_rt5640.c
@@ -139,7 +139,7 @@
.codec_dai_name = "snd-soc-dummy-dai",
.codec_name = "snd-soc-dummy",
.platform_name = "sst-mfld-platform",
- .ignore_suspend = 1,
+ .nonatomic = true,
.dynamic = 1,
.dpcm_playback = 1,
.dpcm_capture = 1,
@@ -166,6 +166,7 @@
| SND_SOC_DAIFMT_CBS_CFS,
.be_hw_params_fixup = byt_codec_fixup,
.ignore_suspend = 1,
+ .nonatomic = true,
.dpcm_playback = 1,
.dpcm_capture = 1,
.ops = &byt_be_ssp2_ops,
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-q6-noirq.c b/sound/soc/msm/qdsp6v2/msm-pcm-q6-noirq.c
index 1e29c5f4..08373e0 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-q6-noirq.c
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-q6-noirq.c
@@ -52,13 +52,19 @@
#define CMD_EOS_MIN_TIMEOUT_LENGTH 50
#define CMD_EOS_TIMEOUT_MULTIPLIER (HZ * 50)
+#if defined(CONFIG_TRACING) && defined(DEBUG)
+#define msm_trace_printk(...) trace_printk(__VA_ARGS__)
+#else
+#define msm_trace_printk(...)
+#endif
+
#define ATRACE_END() \
- trace_printk("tracing_mark_write: E\n")
+ msm_trace_printk("tracing_mark_write: E\n")
#define ATRACE_BEGIN(name) \
- trace_printk("tracing_mark_write: B|%d|%s\n", current->tgid, name)
+ msm_trace_printk("tracing_mark_write: B|%d|%s\n", current->tgid, name)
#define ATRACE_FUNC() ATRACE_BEGIN(__func__)
#define ATRACE_INT(name, value) \
- trace_printk("tracing_mark_write: C|%d|%s|%d\n", \
+ msm_trace_printk("tracing_mark_write: C|%d|%s|%d\n", \
current->tgid, name, (int)(value))
#define SIO_PLAYBACK_MAX_PERIOD_SIZE PLAYBACK_MAX_PERIOD_SIZE
diff --git a/sound/soc/sh/rcar/adg.c b/sound/soc/sh/rcar/adg.c
index 2a5b3a2..b123734 100644
--- a/sound/soc/sh/rcar/adg.c
+++ b/sound/soc/sh/rcar/adg.c
@@ -437,7 +437,7 @@
struct device *dev = rsnd_priv_to_dev(priv);
struct device_node *np = dev->of_node;
u32 ckr, rbgx, rbga, rbgb;
- u32 rate, req_rate, div;
+ u32 rate, req_rate = 0, div;
uint32_t count = 0;
unsigned long req_48kHz_rate, req_441kHz_rate;
int i;
diff --git a/sound/soc/sh/rcar/core.c b/sound/soc/sh/rcar/core.c
index deed48ef..362446c 100644
--- a/sound/soc/sh/rcar/core.c
+++ b/sound/soc/sh/rcar/core.c
@@ -192,19 +192,16 @@
struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
struct rsnd_dai_stream *io;
struct rsnd_dai *rdai;
- int i, j;
+ int i;
- for_each_rsnd_dai(rdai, priv, j) {
+ for_each_rsnd_dai(rdai, priv, i) {
+ io = &rdai->playback;
+ if (mod == io->mod[mod->type])
+ callback(mod, io);
- for (i = 0; i < RSND_MOD_MAX; i++) {
- io = &rdai->playback;
- if (mod == io->mod[i])
- callback(mod, io);
-
- io = &rdai->capture;
- if (mod == io->mod[i])
- callback(mod, io);
- }
+ io = &rdai->capture;
+ if (mod == io->mod[mod->type])
+ callback(mod, io);
}
}
@@ -1019,7 +1016,7 @@
}
}
- if (change)
+ if (change && cfg->update)
cfg->update(cfg->io, mod);
return change;
diff --git a/sound/soc/sh/rcar/src.c b/sound/soc/sh/rcar/src.c
index 68b439e..460d29c 100644
--- a/sound/soc/sh/rcar/src.c
+++ b/sound/soc/sh/rcar/src.c
@@ -691,13 +691,27 @@
{
rsnd_src_irq_disable_gen2(mod);
- rsnd_mod_write(mod, SRC_CTRL, 0);
+ /*
+ * stop SRC output only
+ * see rsnd_src_quit_gen2
+ */
+ rsnd_mod_write(mod, SRC_CTRL, 0x01);
rsnd_src_error_record_gen2(mod);
return rsnd_src_stop(mod);
}
+static int rsnd_src_quit_gen2(struct rsnd_mod *mod,
+ struct rsnd_dai_stream *io,
+ struct rsnd_priv *priv)
+{
+ /* stop both out/in */
+ rsnd_mod_write(mod, SRC_CTRL, 0);
+
+ return 0;
+}
+
static void __rsnd_src_interrupt_gen2(struct rsnd_mod *mod,
struct rsnd_dai_stream *io)
{
@@ -971,7 +985,7 @@
.probe = rsnd_src_probe_gen2,
.remove = rsnd_src_remove_gen2,
.init = rsnd_src_init_gen2,
- .quit = rsnd_src_quit,
+ .quit = rsnd_src_quit_gen2,
.start = rsnd_src_start_gen2,
.stop = rsnd_src_stop_gen2,
.hw_params = rsnd_src_hw_params,
diff --git a/sound/soc/sh/rcar/ssi.c b/sound/soc/sh/rcar/ssi.c
index 1427ec2..c62a294 100644
--- a/sound/soc/sh/rcar/ssi.c
+++ b/sound/soc/sh/rcar/ssi.c
@@ -39,6 +39,7 @@
#define SCKP (1 << 13) /* Serial Bit Clock Polarity */
#define SWSP (1 << 12) /* Serial WS Polarity */
#define SDTA (1 << 10) /* Serial Data Alignment */
+#define PDTA (1 << 9) /* Parallel Data Alignment */
#define DEL (1 << 8) /* Serial Data Delay */
#define CKDV(v) (v << 4) /* Serial Clock Division Ratio */
#define TRMD (1 << 1) /* Transmit/Receive Mode Select */
@@ -286,7 +287,7 @@
struct snd_pcm_runtime *runtime = rsnd_io_to_runtime(io);
u32 cr;
- cr = FORCE;
+ cr = FORCE | PDTA;
/*
* always use 32bit system word for easy clock calculation.
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index cac2d49..e63b434 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -1792,6 +1792,9 @@
for (i = 0; i < card->num_aux_devs; i++)
soc_remove_aux_dev(card, i);
+ /* free the ALSA card at first; this syncs with pending operations */
+ snd_card_free(card->snd_card);
+
/* remove and free each DAI */
soc_remove_dai_links(card);
@@ -1803,9 +1806,7 @@
snd_soc_dapm_free(&card->dapm);
- snd_card_free(card->snd_card);
return 0;
-
}
/* removes a socdev */
diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
index 0ba9dfb..e1541c1 100644
--- a/sound/soc/soc-pcm.c
+++ b/sound/soc/soc-pcm.c
@@ -203,11 +203,12 @@
dev_dbg(be->dev, "ASoC: BE %s event %d dir %d\n",
be->dai_link->name, event, dir);
- if (event == SND_SOC_DAPM_STREAM_STOP && be->dpcm[dir].users >= 1) {
- pr_debug("%s Don't close BE \n", __func__);
+
+ if ((event == SND_SOC_DAPM_STREAM_STOP) &&
+ (be->dpcm[dir].users >= 1))
continue;
- }
- snd_soc_dapm_stream_event(be, dir, event);
+
+ snd_soc_dapm_stream_event(be, dir, event);
}
snd_soc_dapm_stream_event(fe, dir, event);
diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c
index 2bba7db..61f64dc 100644
--- a/sound/usb/endpoint.c
+++ b/sound/usb/endpoint.c
@@ -384,6 +384,9 @@
if (unlikely(atomic_read(&ep->chip->shutdown)))
goto exit_clear;
+ if (unlikely(!test_bit(EP_FLAG_RUNNING, &ep->flags)))
+ goto exit_clear;
+
if (usb_pipeout(ep->pipe)) {
retire_outbound_urb(ep, ctx);
/* can be stopped during retire callback */
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
index b147bc5..6db190a 100644
--- a/sound/usb/mixer.c
+++ b/sound/usb/mixer.c
@@ -559,6 +559,8 @@
if (size < sizeof(scale))
return -ENOMEM;
+ if (cval->min_mute)
+ scale[0] = SNDRV_CTL_TLVT_DB_MINMAX_MUTE;
scale[2] = cval->dBmin;
scale[3] = cval->dBmax;
if (copy_to_user(_tlv, scale, sizeof(scale)))
diff --git a/sound/usb/mixer.h b/sound/usb/mixer.h
index 3417ef3..2b4b067 100644
--- a/sound/usb/mixer.h
+++ b/sound/usb/mixer.h
@@ -64,6 +64,7 @@
int cached;
int cache_val[MAX_CHANNELS];
u8 initialized;
+ u8 min_mute;
void *private_data;
};
diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c
index 04991b0..5d2fc5f 100644
--- a/sound/usb/mixer_quirks.c
+++ b/sound/usb/mixer_quirks.c
@@ -1873,6 +1873,12 @@
if (unitid == 7 && cval->control == UAC_FU_VOLUME)
snd_dragonfly_quirk_db_scale(mixer, cval, kctl);
break;
+ /* lowest playback value is muted on C-Media devices */
+ case USB_ID(0x0d8c, 0x000c):
+ case USB_ID(0x0d8c, 0x0014):
+ if (strstr(kctl->id.name, "Playback"))
+ cval->min_mute = 1;
+ break;
}
}
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index 29f38e2..1cc20d1 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -1143,6 +1143,7 @@
case USB_ID(0x0556, 0x0014): /* Phoenix Audio TMX320VC */
case USB_ID(0x05A3, 0x9420): /* ELP HD USB Camera */
case USB_ID(0x074D, 0x3553): /* Outlaw RR2150 (Micronas UAC3553B) */
+ case USB_ID(0x1395, 0x740a): /* Sennheiser DECT */
case USB_ID(0x1901, 0x0191): /* GE B850V3 CP2114 audio interface */
case USB_ID(0x1de7, 0x0013): /* Phoenix Audio MT202exe */
case USB_ID(0x1de7, 0x0014): /* Phoenix Audio TMX320 */
diff --git a/tools/hv/hv_kvp_daemon.c b/tools/hv/hv_kvp_daemon.c
index 0d9f48e..bc7adb8 100644
--- a/tools/hv/hv_kvp_daemon.c
+++ b/tools/hv/hv_kvp_daemon.c
@@ -1433,7 +1433,7 @@
openlog("KVP", 0, LOG_USER);
syslog(LOG_INFO, "KVP starting; pid is:%d", getpid());
- kvp_fd = open("/dev/vmbus/hv_kvp", O_RDWR);
+ kvp_fd = open("/dev/vmbus/hv_kvp", O_RDWR | O_CLOEXEC);
if (kvp_fd < 0) {
syslog(LOG_ERR, "open /dev/vmbus/hv_kvp failed; error: %d %s",
diff --git a/tools/include/linux/compiler.h b/tools/include/linux/compiler.h
index fa7208a..8a679b2 100644
--- a/tools/include/linux/compiler.h
+++ b/tools/include/linux/compiler.h
@@ -115,4 +115,13 @@
#define WRITE_ONCE(x, val) \
({ union { typeof(x) __val; char __c[1]; } __u = { .__val = (val) }; __write_once_size(&(x), __u.__c, sizeof(x)); __u.__val; })
+
+#ifndef __fallthrough
+# if defined(__GNUC__) && __GNUC__ >= 7
+# define __fallthrough __attribute__ ((fallthrough))
+# else
+# define __fallthrough
+# endif
+#endif
+
#endif /* _TOOLS_LINUX_COMPILER_H */
diff --git a/tools/include/linux/log2.h b/tools/include/linux/log2.h
index 4144666..d5677d3 100644
--- a/tools/include/linux/log2.h
+++ b/tools/include/linux/log2.h
@@ -13,12 +13,6 @@
#define _TOOLS_LINUX_LOG2_H
/*
- * deal with unrepresentable constant logarithms
- */
-extern __attribute__((const, noreturn))
-int ____ilog2_NaN(void);
-
-/*
* non-constant log of base 2 calculators
* - the arch may override these in asm/bitops.h if they can be implemented
* more efficiently than using fls() and fls64()
@@ -78,7 +72,7 @@
#define ilog2(n) \
( \
__builtin_constant_p(n) ? ( \
- (n) < 1 ? ____ilog2_NaN() : \
+ (n) < 2 ? 0 : \
(n) & (1ULL << 63) ? 63 : \
(n) & (1ULL << 62) ? 62 : \
(n) & (1ULL << 61) ? 61 : \
@@ -141,10 +135,7 @@
(n) & (1ULL << 4) ? 4 : \
(n) & (1ULL << 3) ? 3 : \
(n) & (1ULL << 2) ? 2 : \
- (n) & (1ULL << 1) ? 1 : \
- (n) & (1ULL << 0) ? 0 : \
- ____ilog2_NaN() \
- ) : \
+ 1 ) : \
(sizeof(n) <= 4) ? \
__ilog2_u32(n) : \
__ilog2_u64(n) \
diff --git a/tools/lib/lockdep/uinclude/linux/lockdep.h b/tools/lib/lockdep/uinclude/linux/lockdep.h
index c808c7d..e69118b2 100644
--- a/tools/lib/lockdep/uinclude/linux/lockdep.h
+++ b/tools/lib/lockdep/uinclude/linux/lockdep.h
@@ -8,7 +8,7 @@
#include <linux/utsname.h>
#include <linux/compiler.h>
-#define MAX_LOCK_DEPTH 2000UL
+#define MAX_LOCK_DEPTH 255UL
#define asmlinkage
#define __visible
diff --git a/tools/lib/traceevent/plugin_sched_switch.c b/tools/lib/traceevent/plugin_sched_switch.c
index f1ce600..ec30c2f 100644
--- a/tools/lib/traceevent/plugin_sched_switch.c
+++ b/tools/lib/traceevent/plugin_sched_switch.c
@@ -111,7 +111,7 @@
trace_seq_printf(s, "%lld ", val);
if (pevent_get_field_val(s, event, "prev_prio", record, &val, 0) == 0)
- trace_seq_printf(s, "[%lld] ", val);
+ trace_seq_printf(s, "[%d] ", (int) val);
if (pevent_get_field_val(s, event, "prev_state", record, &val, 0) == 0)
write_state(s, val);
@@ -129,7 +129,7 @@
trace_seq_printf(s, "%lld", val);
if (pevent_get_field_val(s, event, "next_prio", record, &val, 0) == 0)
- trace_seq_printf(s, " [%lld]", val);
+ trace_seq_printf(s, " [%d]", (int) val);
return 0;
}
diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
index 74c265e..fb1c9dd 100644
--- a/tools/perf/Makefile.perf
+++ b/tools/perf/Makefile.perf
@@ -566,9 +566,9 @@
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/attr'; \
$(INSTALL) tests/attr/* '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/attr'
-install-bin: install-tools install-tests
+install-bin: install-tools install-tests install-traceevent-plugins
-install: install-bin try-install-man install-traceevent-plugins
+install: install-bin try-install-man
install-python_ext:
$(PYTHON_WORD) util/setup.py --quiet install --root='/$(DESTDIR_SQ)'
diff --git a/tools/perf/arch/x86/tests/intel-cqm.c b/tools/perf/arch/x86/tests/intel-cqm.c
index d28c1b6..fa5d17a 100644
--- a/tools/perf/arch/x86/tests/intel-cqm.c
+++ b/tools/perf/arch/x86/tests/intel-cqm.c
@@ -17,7 +17,7 @@
if (pid)
return pid;
- while(1);
+ while(1)
sleep(5);
return 0;
}
diff --git a/tools/perf/arch/x86/util/dwarf-regs.c b/tools/perf/arch/x86/util/dwarf-regs.c
index 9223c16..1f86ee8 100644
--- a/tools/perf/arch/x86/util/dwarf-regs.c
+++ b/tools/perf/arch/x86/util/dwarf-regs.c
@@ -63,6 +63,8 @@
# define REG_OFFSET_NAME_32(n, r) {.name = n, .offset = offsetof(struct pt_regs, r)}
#endif
+/* TODO: switching by dwarf address size */
+#ifndef __x86_64__
static const struct pt_regs_offset x86_32_regoffset_table[] = {
REG_OFFSET_NAME_32("%ax", eax),
REG_OFFSET_NAME_32("%cx", ecx),
@@ -75,6 +77,8 @@
REG_OFFSET_END,
};
+#define regoffset_table x86_32_regoffset_table
+#else
static const struct pt_regs_offset x86_64_regoffset_table[] = {
REG_OFFSET_NAME_64("%ax", rax),
REG_OFFSET_NAME_64("%dx", rdx),
@@ -95,11 +99,7 @@
REG_OFFSET_END,
};
-/* TODO: switching by dwarf address size */
-#ifdef __x86_64__
#define regoffset_table x86_64_regoffset_table
-#else
-#define regoffset_table x86_32_regoffset_table
#endif
/* Minus 1 for the ending REG_OFFSET_END */
diff --git a/tools/perf/bench/numa.c b/tools/perf/bench/numa.c
index 492df27..b4eb5b6 100644
--- a/tools/perf/bench/numa.c
+++ b/tools/perf/bench/numa.c
@@ -1570,13 +1570,13 @@
"GB/sec,", "total-speed", "GB/sec total speed");
if (g->p.show_details >= 2) {
- char tname[32];
+ char tname[14 + 2 * 10 + 1];
struct thread_data *td;
for (p = 0; p < g->p.nr_proc; p++) {
for (t = 0; t < g->p.nr_threads; t++) {
- memset(tname, 0, 32);
+ memset(tname, 0, sizeof(tname));
td = g->threads + p*g->p.nr_threads + t;
- snprintf(tname, 32, "process%d:thread%d", p, t);
+ snprintf(tname, sizeof(tname), "process%d:thread%d", p, t);
print_res(tname, td->speed_gbs,
"GB/sec", "thread-speed", "GB/sec/thread speed");
print_res(tname, td->system_time_ns / 1e9,
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
index 368d1e1..4884055 100644
--- a/tools/perf/builtin-script.c
+++ b/tools/perf/builtin-script.c
@@ -1253,21 +1253,19 @@
return S_ISDIR(st.st_mode);
}
-#define for_each_lang(scripts_path, scripts_dir, lang_dirent, lang_next)\
- while (!readdir_r(scripts_dir, &lang_dirent, &lang_next) && \
- lang_next) \
- if ((lang_dirent.d_type == DT_DIR || \
- (lang_dirent.d_type == DT_UNKNOWN && \
- is_directory(scripts_path, &lang_dirent))) && \
- (strcmp(lang_dirent.d_name, ".")) && \
- (strcmp(lang_dirent.d_name, "..")))
+#define for_each_lang(scripts_path, scripts_dir, lang_dirent) \
+ while ((lang_dirent = readdir(scripts_dir)) != NULL) \
+ if ((lang_dirent->d_type == DT_DIR || \
+ (lang_dirent->d_type == DT_UNKNOWN && \
+ is_directory(scripts_path, lang_dirent))) && \
+ (strcmp(lang_dirent->d_name, ".")) && \
+ (strcmp(lang_dirent->d_name, "..")))
-#define for_each_script(lang_path, lang_dir, script_dirent, script_next)\
- while (!readdir_r(lang_dir, &script_dirent, &script_next) && \
- script_next) \
- if (script_dirent.d_type != DT_DIR && \
- (script_dirent.d_type != DT_UNKNOWN || \
- !is_directory(lang_path, &script_dirent)))
+#define for_each_script(lang_path, lang_dir, script_dirent) \
+ while ((script_dirent = readdir(lang_dir)) != NULL) \
+ if (script_dirent->d_type != DT_DIR && \
+ (script_dirent->d_type != DT_UNKNOWN || \
+ !is_directory(lang_path, script_dirent)))
#define RECORD_SUFFIX "-record"
@@ -1413,7 +1411,7 @@
const char *s __maybe_unused,
int unset __maybe_unused)
{
- struct dirent *script_next, *lang_next, script_dirent, lang_dirent;
+ struct dirent *script_dirent, *lang_dirent;
char scripts_path[MAXPATHLEN];
DIR *scripts_dir, *lang_dir;
char script_path[MAXPATHLEN];
@@ -1428,19 +1426,19 @@
if (!scripts_dir)
return -1;
- for_each_lang(scripts_path, scripts_dir, lang_dirent, lang_next) {
+ for_each_lang(scripts_path, scripts_dir, lang_dirent) {
snprintf(lang_path, MAXPATHLEN, "%s/%s/bin", scripts_path,
- lang_dirent.d_name);
+ lang_dirent->d_name);
lang_dir = opendir(lang_path);
if (!lang_dir)
continue;
- for_each_script(lang_path, lang_dir, script_dirent, script_next) {
- script_root = get_script_root(&script_dirent, REPORT_SUFFIX);
+ for_each_script(lang_path, lang_dir, script_dirent) {
+ script_root = get_script_root(script_dirent, REPORT_SUFFIX);
if (script_root) {
desc = script_desc__findnew(script_root);
snprintf(script_path, MAXPATHLEN, "%s/%s",
- lang_path, script_dirent.d_name);
+ lang_path, script_dirent->d_name);
read_script_info(desc, script_path);
free(script_root);
}
@@ -1528,7 +1526,7 @@
*/
int find_scripts(char **scripts_array, char **scripts_path_array)
{
- struct dirent *script_next, *lang_next, script_dirent, lang_dirent;
+ struct dirent *script_dirent, *lang_dirent;
char scripts_path[MAXPATHLEN], lang_path[MAXPATHLEN];
DIR *scripts_dir, *lang_dir;
struct perf_session *session;
@@ -1551,9 +1549,9 @@
return -1;
}
- for_each_lang(scripts_path, scripts_dir, lang_dirent, lang_next) {
+ for_each_lang(scripts_path, scripts_dir, lang_dirent) {
snprintf(lang_path, MAXPATHLEN, "%s/%s", scripts_path,
- lang_dirent.d_name);
+ lang_dirent->d_name);
#ifdef NO_LIBPERL
if (strstr(lang_path, "perl"))
continue;
@@ -1567,16 +1565,16 @@
if (!lang_dir)
continue;
- for_each_script(lang_path, lang_dir, script_dirent, script_next) {
+ for_each_script(lang_path, lang_dir, script_dirent) {
/* Skip those real time scripts: xxxtop.p[yl] */
- if (strstr(script_dirent.d_name, "top."))
+ if (strstr(script_dirent->d_name, "top."))
continue;
sprintf(scripts_path_array[i], "%s/%s", lang_path,
- script_dirent.d_name);
- temp = strchr(script_dirent.d_name, '.');
+ script_dirent->d_name);
+ temp = strchr(script_dirent->d_name, '.');
snprintf(scripts_array[i],
- (temp - script_dirent.d_name) + 1,
- "%s", script_dirent.d_name);
+ (temp - script_dirent->d_name) + 1,
+ "%s", script_dirent->d_name);
if (check_ev_match(lang_path,
scripts_array[i], session))
@@ -1594,7 +1592,7 @@
static char *get_script_path(const char *script_root, const char *suffix)
{
- struct dirent *script_next, *lang_next, script_dirent, lang_dirent;
+ struct dirent *script_dirent, *lang_dirent;
char scripts_path[MAXPATHLEN];
char script_path[MAXPATHLEN];
DIR *scripts_dir, *lang_dir;
@@ -1607,21 +1605,21 @@
if (!scripts_dir)
return NULL;
- for_each_lang(scripts_path, scripts_dir, lang_dirent, lang_next) {
+ for_each_lang(scripts_path, scripts_dir, lang_dirent) {
snprintf(lang_path, MAXPATHLEN, "%s/%s/bin", scripts_path,
- lang_dirent.d_name);
+ lang_dirent->d_name);
lang_dir = opendir(lang_path);
if (!lang_dir)
continue;
- for_each_script(lang_path, lang_dir, script_dirent, script_next) {
- __script_root = get_script_root(&script_dirent, suffix);
+ for_each_script(lang_path, lang_dir, script_dirent) {
+ __script_root = get_script_root(script_dirent, suffix);
if (__script_root && !strcmp(script_root, __script_root)) {
free(__script_root);
closedir(lang_dir);
closedir(scripts_dir);
snprintf(script_path, MAXPATHLEN, "%s/%s",
- lang_path, script_dirent.d_name);
+ lang_path, script_dirent->d_name);
return strdup(script_path);
}
free(__script_root);
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index 7e2e72e..4a8a02c 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -636,7 +636,7 @@
case -1:
if (errno == EINTR)
continue;
- /* Fall trhu */
+ __fallthrough;
default:
c = getc(stdin);
tcsetattr(0, TCSAFLUSH, &save);
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index c783d8f..ebe7115 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -1617,6 +1617,7 @@
color_fprintf(trace->output, PERF_COLOR_RED,
"LOST %" PRIu64 " events!\n", event->lost.lost);
ret = machine__process_lost_event(machine, event, sample);
+ break;
default:
ret = machine__process_event(machine, event, sample);
break;
diff --git a/tools/perf/tests/parse-events.c b/tools/perf/tests/parse-events.c
index 636d7b4..54af2f2 100644
--- a/tools/perf/tests/parse-events.c
+++ b/tools/perf/tests/parse-events.c
@@ -1727,15 +1727,14 @@
}
while (!ret && (ent = readdir(dir))) {
-#define MAX_NAME 100
struct evlist_test e;
- char name[MAX_NAME];
+ char name[2 * NAME_MAX + 1 + 12 + 3];
if (!strcmp(ent->d_name, ".") ||
!strcmp(ent->d_name, ".."))
continue;
- snprintf(name, MAX_NAME, "cpu/event=%s/u", ent->d_name);
+ snprintf(name, sizeof(name), "cpu/event=%s/u", ent->d_name);
e.name = name;
e.check = test__checkevent_pmu_events;
@@ -1743,11 +1742,10 @@
ret = test_event(&e);
if (ret)
break;
- snprintf(name, MAX_NAME, "%s:u,cpu/event=%s/u", ent->d_name, ent->d_name);
+ snprintf(name, sizeof(name), "%s:u,cpu/event=%s/u", ent->d_name, ent->d_name);
e.name = name;
e.check = test__checkevent_pmu_events_mix;
ret = test_event(&e);
-#undef MAX_NAME
}
closedir(dir);
diff --git a/tools/perf/ui/browser.c b/tools/perf/ui/browser.c
index e9703c0..07b5f59 100644
--- a/tools/perf/ui/browser.c
+++ b/tools/perf/ui/browser.c
@@ -702,7 +702,7 @@
ui_browser__gotorc(browser, row, column + 1);
SLsmg_draw_hline(2);
- if (row++ == 0)
+ if (++row == 0)
goto out;
} else
row = 0;
diff --git a/tools/perf/ui/browsers/annotate.c b/tools/perf/ui/browsers/annotate.c
index d4d7cc2..718bd46 100644
--- a/tools/perf/ui/browsers/annotate.c
+++ b/tools/perf/ui/browsers/annotate.c
@@ -755,11 +755,11 @@
nd = browser->curr_hot;
break;
case K_UNTAB:
- if (nd != NULL)
+ if (nd != NULL) {
nd = rb_next(nd);
if (nd == NULL)
nd = rb_first(&browser->entries);
- else
+ } else
nd = browser->curr_hot;
break;
case K_F1:
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
index 956187b..26cba6434 100644
--- a/tools/perf/util/event.c
+++ b/tools/perf/util/event.c
@@ -416,7 +416,7 @@
{
char filename[PATH_MAX];
DIR *tasks;
- struct dirent dirent, *next;
+ struct dirent *dirent;
pid_t tgid, ppid;
int rc = 0;
@@ -445,11 +445,11 @@
return 0;
}
- while (!readdir_r(tasks, &dirent, &next) && next) {
+ while ((dirent = readdir(tasks)) != NULL) {
char *end;
pid_t _pid;
- _pid = strtol(dirent.d_name, &end, 10);
+ _pid = strtol(dirent->d_name, &end, 10);
if (*end)
continue;
@@ -558,7 +558,7 @@
{
DIR *proc;
char proc_path[PATH_MAX];
- struct dirent dirent, *next;
+ struct dirent *dirent;
union perf_event *comm_event, *mmap_event, *fork_event;
int err = -1;
@@ -583,9 +583,9 @@
if (proc == NULL)
goto out_free_fork;
- while (!readdir_r(proc, &dirent, &next) && next) {
+ while ((dirent = readdir(proc)) != NULL) {
char *end;
- pid_t pid = strtol(dirent.d_name, &end, 10);
+ pid_t pid = strtol(dirent->d_name, &end, 10);
if (*end) /* only interested in proper numerical dirents */
continue;
diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
index 71df7ac..eeeae06 100644
--- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
+++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c
@@ -22,6 +22,7 @@
#include <errno.h>
#include <stdint.h>
#include <inttypes.h>
+#include <linux/compiler.h>
#include "../cache.h"
#include "../util.h"
@@ -63,6 +64,25 @@
INTEL_PT_STATE_FUP_NO_TIP,
};
+static inline bool intel_pt_sample_time(enum intel_pt_pkt_state pkt_state)
+{
+ switch (pkt_state) {
+ case INTEL_PT_STATE_NO_PSB:
+ case INTEL_PT_STATE_NO_IP:
+ case INTEL_PT_STATE_ERR_RESYNC:
+ case INTEL_PT_STATE_IN_SYNC:
+ case INTEL_PT_STATE_TNT:
+ return true;
+ case INTEL_PT_STATE_TIP:
+ case INTEL_PT_STATE_TIP_PGD:
+ case INTEL_PT_STATE_FUP:
+ case INTEL_PT_STATE_FUP_NO_TIP:
+ return false;
+ default:
+ return true;
+ };
+}
+
#ifdef INTEL_PT_STRICT
#define INTEL_PT_STATE_ERR1 INTEL_PT_STATE_NO_PSB
#define INTEL_PT_STATE_ERR2 INTEL_PT_STATE_NO_PSB
@@ -90,6 +110,7 @@
bool have_tma;
bool have_cyc;
bool fixup_last_mtc;
+ bool have_last_ip;
uint64_t pos;
uint64_t last_ip;
uint64_t ip;
@@ -97,6 +118,7 @@
uint64_t timestamp;
uint64_t tsc_timestamp;
uint64_t ref_timestamp;
+ uint64_t sample_timestamp;
uint64_t ret_addr;
uint64_t ctc_timestamp;
uint64_t ctc_delta;
@@ -124,8 +146,6 @@
bool have_calc_cyc_to_tsc;
int exec_mode;
unsigned int insn_bytes;
- uint64_t sign_bit;
- uint64_t sign_bits;
uint64_t period;
enum intel_pt_period_type period_type;
uint64_t tot_insn_cnt;
@@ -139,6 +159,7 @@
unsigned int fup_tx_flags;
unsigned int tx_flags;
uint64_t timestamp_insn_cnt;
+ uint64_t sample_insn_cnt;
uint64_t stuck_ip;
int no_progress;
int stuck_ip_prd;
@@ -192,9 +213,6 @@
decoder->data = params->data;
decoder->return_compression = params->return_compression;
- decoder->sign_bit = (uint64_t)1 << 47;
- decoder->sign_bits = ~(((uint64_t)1 << 48) - 1);
-
decoder->period = params->period;
decoder->period_type = params->period_type;
@@ -363,21 +381,30 @@
return 0;
}
-static uint64_t intel_pt_calc_ip(struct intel_pt_decoder *decoder,
- const struct intel_pt_pkt *packet,
+static uint64_t intel_pt_calc_ip(const struct intel_pt_pkt *packet,
uint64_t last_ip)
{
uint64_t ip;
switch (packet->count) {
- case 2:
+ case 1:
ip = (last_ip & (uint64_t)0xffffffffffff0000ULL) |
packet->payload;
break;
- case 4:
+ case 2:
ip = (last_ip & (uint64_t)0xffffffff00000000ULL) |
packet->payload;
break;
+ case 3:
+ ip = packet->payload;
+ /* Sign-extend 6-byte ip */
+ if (ip & (uint64_t)0x800000000000ULL)
+ ip |= (uint64_t)0xffff000000000000ULL;
+ break;
+ case 4:
+ ip = (last_ip & (uint64_t)0xffff000000000000ULL) |
+ packet->payload;
+ break;
case 6:
ip = packet->payload;
break;
@@ -385,16 +412,13 @@
return 0;
}
- if (ip & decoder->sign_bit)
- return ip | decoder->sign_bits;
-
return ip;
}
static inline void intel_pt_set_last_ip(struct intel_pt_decoder *decoder)
{
- decoder->last_ip = intel_pt_calc_ip(decoder, &decoder->packet,
- decoder->last_ip);
+ decoder->last_ip = intel_pt_calc_ip(&decoder->packet, decoder->last_ip);
+ decoder->have_last_ip = true;
}
static inline void intel_pt_set_ip(struct intel_pt_decoder *decoder)
@@ -895,6 +919,7 @@
decoder->tot_insn_cnt += insn_cnt;
decoder->timestamp_insn_cnt += insn_cnt;
+ decoder->sample_insn_cnt += insn_cnt;
decoder->period_insn_cnt += insn_cnt;
if (err) {
@@ -1413,7 +1438,8 @@
case INTEL_PT_FUP:
decoder->pge = true;
- intel_pt_set_last_ip(decoder);
+ if (decoder->packet.count)
+ intel_pt_set_last_ip(decoder);
break;
case INTEL_PT_MODE_TSX:
@@ -1617,6 +1643,8 @@
break;
case INTEL_PT_PSB:
+ decoder->last_ip = 0;
+ decoder->have_last_ip = true;
intel_pt_clear_stack(&decoder->stack);
err = intel_pt_walk_psbend(decoder);
if (err == -EAGAIN)
@@ -1695,6 +1723,13 @@
}
}
+static inline bool intel_pt_have_ip(struct intel_pt_decoder *decoder)
+{
+ return decoder->packet.count &&
+ (decoder->have_last_ip || decoder->packet.count == 3 ||
+ decoder->packet.count == 6);
+}
+
/* Walk PSB+ packets to get in sync. */
static int intel_pt_walk_psb(struct intel_pt_decoder *decoder)
{
@@ -1708,6 +1743,7 @@
switch (decoder->packet.type) {
case INTEL_PT_TIP_PGD:
decoder->continuous_period = false;
+ __fallthrough;
case INTEL_PT_TIP_PGE:
case INTEL_PT_TIP:
intel_pt_log("ERROR: Unexpected packet\n");
@@ -1715,8 +1751,7 @@
case INTEL_PT_FUP:
decoder->pge = true;
- if (decoder->last_ip || decoder->packet.count == 6 ||
- decoder->packet.count == 0) {
+ if (intel_pt_have_ip(decoder)) {
uint64_t current_ip = decoder->ip;
intel_pt_set_ip(decoder);
@@ -1762,6 +1797,8 @@
decoder->pge = false;
decoder->continuous_period = false;
intel_pt_clear_tx_flags(decoder);
+ __fallthrough;
+
case INTEL_PT_TNT:
decoder->have_tma = false;
intel_pt_log("ERROR: Unexpected packet\n");
@@ -1802,27 +1839,21 @@
switch (decoder->packet.type) {
case INTEL_PT_TIP_PGD:
decoder->continuous_period = false;
+ __fallthrough;
case INTEL_PT_TIP_PGE:
case INTEL_PT_TIP:
decoder->pge = decoder->packet.type != INTEL_PT_TIP_PGD;
- if (decoder->last_ip || decoder->packet.count == 6 ||
- decoder->packet.count == 0)
+ if (intel_pt_have_ip(decoder))
intel_pt_set_ip(decoder);
if (decoder->ip)
return 0;
break;
case INTEL_PT_FUP:
- if (decoder->overflow) {
- if (decoder->last_ip ||
- decoder->packet.count == 6 ||
- decoder->packet.count == 0)
- intel_pt_set_ip(decoder);
- if (decoder->ip)
- return 0;
- }
- if (decoder->packet.count)
- intel_pt_set_last_ip(decoder);
+ if (intel_pt_have_ip(decoder))
+ intel_pt_set_ip(decoder);
+ if (decoder->ip)
+ return 0;
break;
case INTEL_PT_MTC:
@@ -1871,6 +1902,9 @@
break;
case INTEL_PT_PSB:
+ decoder->last_ip = 0;
+ decoder->have_last_ip = true;
+ intel_pt_clear_stack(&decoder->stack);
err = intel_pt_walk_psb(decoder);
if (err)
return err;
@@ -1896,6 +1930,8 @@
{
int err;
+ decoder->set_fup_tx_flags = false;
+
intel_pt_log("Scanning for full IP\n");
err = intel_pt_walk_to_ip(decoder);
if (err)
@@ -2004,6 +2040,7 @@
decoder->pge = false;
decoder->continuous_period = false;
+ decoder->have_last_ip = false;
decoder->last_ip = 0;
decoder->ip = 0;
intel_pt_clear_stack(&decoder->stack);
@@ -2012,6 +2049,7 @@
if (err)
return err;
+ decoder->have_last_ip = true;
decoder->pkt_state = INTEL_PT_STATE_NO_IP;
err = intel_pt_walk_psb(decoder);
@@ -2030,7 +2068,7 @@
static uint64_t intel_pt_est_timestamp(struct intel_pt_decoder *decoder)
{
- uint64_t est = decoder->timestamp_insn_cnt << 1;
+ uint64_t est = decoder->sample_insn_cnt << 1;
if (!decoder->cbr || !decoder->max_non_turbo_ratio)
goto out;
@@ -2038,7 +2076,7 @@
est *= decoder->max_non_turbo_ratio;
est /= decoder->cbr;
out:
- return decoder->timestamp + est;
+ return decoder->sample_timestamp + est;
}
const struct intel_pt_state *intel_pt_decode(struct intel_pt_decoder *decoder)
@@ -2054,7 +2092,9 @@
err = intel_pt_sync(decoder);
break;
case INTEL_PT_STATE_NO_IP:
+ decoder->have_last_ip = false;
decoder->last_ip = 0;
+ decoder->ip = 0;
/* Fall through */
case INTEL_PT_STATE_ERR_RESYNC:
err = intel_pt_sync_ip(decoder);
@@ -2091,15 +2131,24 @@
}
} while (err == -ENOLINK);
- decoder->state.err = err ? intel_pt_ext_err(err) : 0;
- decoder->state.timestamp = decoder->timestamp;
+ if (err) {
+ decoder->state.err = intel_pt_ext_err(err);
+ decoder->state.from_ip = decoder->ip;
+ decoder->sample_timestamp = decoder->timestamp;
+ decoder->sample_insn_cnt = decoder->timestamp_insn_cnt;
+ } else {
+ decoder->state.err = 0;
+ if (intel_pt_sample_time(decoder->pkt_state)) {
+ decoder->sample_timestamp = decoder->timestamp;
+ decoder->sample_insn_cnt = decoder->timestamp_insn_cnt;
+ }
+ }
+
+ decoder->state.timestamp = decoder->sample_timestamp;
decoder->state.est_timestamp = intel_pt_est_timestamp(decoder);
decoder->state.cr3 = decoder->cr3;
decoder->state.tot_insn_cnt = decoder->tot_insn_cnt;
- if (err)
- decoder->state.from_ip = decoder->ip;
-
return &decoder->state;
}
diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c
index b1257c8..7528ae4 100644
--- a/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c
+++ b/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c
@@ -17,6 +17,7 @@
#include <string.h>
#include <endian.h>
#include <byteswap.h>
+#include <linux/compiler.h>
#include "intel-pt-pkt-decoder.h"
@@ -292,36 +293,46 @@
const unsigned char *buf, size_t len,
struct intel_pt_pkt *packet)
{
- switch (byte >> 5) {
+ int ip_len;
+
+ packet->count = byte >> 5;
+
+ switch (packet->count) {
case 0:
- packet->count = 0;
+ ip_len = 0;
break;
case 1:
if (len < 3)
return INTEL_PT_NEED_MORE_BYTES;
- packet->count = 2;
+ ip_len = 2;
packet->payload = le16_to_cpu(*(uint16_t *)(buf + 1));
break;
case 2:
if (len < 5)
return INTEL_PT_NEED_MORE_BYTES;
- packet->count = 4;
+ ip_len = 4;
packet->payload = le32_to_cpu(*(uint32_t *)(buf + 1));
break;
case 3:
- case 6:
+ case 4:
if (len < 7)
return INTEL_PT_NEED_MORE_BYTES;
- packet->count = 6;
+ ip_len = 6;
memcpy_le64(&packet->payload, buf + 1, 6);
break;
+ case 6:
+ if (len < 9)
+ return INTEL_PT_NEED_MORE_BYTES;
+ ip_len = 8;
+ packet->payload = le64_to_cpu(*(uint64_t *)(buf + 1));
+ break;
default:
return INTEL_PT_BAD_PACKET;
}
packet->type = type;
- return packet->count + 1;
+ return ip_len + 1;
}
static int intel_pt_get_mode(const unsigned char *buf, size_t len,
@@ -488,6 +499,7 @@
case INTEL_PT_FUP:
if (!(packet->count))
return snprintf(buf, buf_len, "%s no ip", name);
+ __fallthrough;
case INTEL_PT_CYC:
case INTEL_PT_VMCS:
case INTEL_PT_MTC:
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index 854dd21..881bbb5 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -138,11 +138,11 @@
#define PERF_EVENT_TYPE(config) __PERF_EVENT_FIELD(config, TYPE)
#define PERF_EVENT_ID(config) __PERF_EVENT_FIELD(config, EVENT)
-#define for_each_subsystem(sys_dir, sys_dirent, sys_next) \
- while (!readdir_r(sys_dir, &sys_dirent, &sys_next) && sys_next) \
- if (sys_dirent.d_type == DT_DIR && \
- (strcmp(sys_dirent.d_name, ".")) && \
- (strcmp(sys_dirent.d_name, "..")))
+#define for_each_subsystem(sys_dir, sys_dirent) \
+ while ((sys_dirent = readdir(sys_dir)) != NULL) \
+ if (sys_dirent->d_type == DT_DIR && \
+ (strcmp(sys_dirent->d_name, ".")) && \
+ (strcmp(sys_dirent->d_name, "..")))
static int tp_event_has_id(struct dirent *sys_dir, struct dirent *evt_dir)
{
@@ -159,12 +159,12 @@
return 0;
}
-#define for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) \
- while (!readdir_r(evt_dir, &evt_dirent, &evt_next) && evt_next) \
- if (evt_dirent.d_type == DT_DIR && \
- (strcmp(evt_dirent.d_name, ".")) && \
- (strcmp(evt_dirent.d_name, "..")) && \
- (!tp_event_has_id(&sys_dirent, &evt_dirent)))
+#define for_each_event(sys_dirent, evt_dir, evt_dirent) \
+ while ((evt_dirent = readdir(evt_dir)) != NULL) \
+ if (evt_dirent->d_type == DT_DIR && \
+ (strcmp(evt_dirent->d_name, ".")) && \
+ (strcmp(evt_dirent->d_name, "..")) && \
+ (!tp_event_has_id(sys_dirent, evt_dirent)))
#define MAX_EVENT_LENGTH 512
@@ -173,7 +173,7 @@
{
struct tracepoint_path *path = NULL;
DIR *sys_dir, *evt_dir;
- struct dirent *sys_next, *evt_next, sys_dirent, evt_dirent;
+ struct dirent *sys_dirent, *evt_dirent;
char id_buf[24];
int fd;
u64 id;
@@ -184,18 +184,18 @@
if (!sys_dir)
return NULL;
- for_each_subsystem(sys_dir, sys_dirent, sys_next) {
+ for_each_subsystem(sys_dir, sys_dirent) {
snprintf(dir_path, MAXPATHLEN, "%s/%s", tracing_events_path,
- sys_dirent.d_name);
+ sys_dirent->d_name);
evt_dir = opendir(dir_path);
if (!evt_dir)
continue;
- for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) {
+ for_each_event(sys_dirent, evt_dir, evt_dirent) {
snprintf(evt_path, MAXPATHLEN, "%s/%s/id", dir_path,
- evt_dirent.d_name);
+ evt_dirent->d_name);
fd = open(evt_path, O_RDONLY);
if (fd < 0)
continue;
@@ -220,9 +220,9 @@
free(path);
return NULL;
}
- strncpy(path->system, sys_dirent.d_name,
+ strncpy(path->system, sys_dirent->d_name,
MAX_EVENT_LENGTH);
- strncpy(path->name, evt_dirent.d_name,
+ strncpy(path->name, evt_dirent->d_name,
MAX_EVENT_LENGTH);
return path;
}
@@ -1662,7 +1662,7 @@
bool name_only)
{
DIR *sys_dir, *evt_dir;
- struct dirent *sys_next, *evt_next, sys_dirent, evt_dirent;
+ struct dirent *sys_dirent, *evt_dirent;
char evt_path[MAXPATHLEN];
char dir_path[MAXPATHLEN];
char **evt_list = NULL;
@@ -1680,20 +1680,20 @@
goto out_close_sys_dir;
}
- for_each_subsystem(sys_dir, sys_dirent, sys_next) {
+ for_each_subsystem(sys_dir, sys_dirent) {
if (subsys_glob != NULL &&
- !strglobmatch(sys_dirent.d_name, subsys_glob))
+ !strglobmatch(sys_dirent->d_name, subsys_glob))
continue;
snprintf(dir_path, MAXPATHLEN, "%s/%s", tracing_events_path,
- sys_dirent.d_name);
+ sys_dirent->d_name);
evt_dir = opendir(dir_path);
if (!evt_dir)
continue;
- for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) {
+ for_each_event(sys_dirent, evt_dir, evt_dirent) {
if (event_glob != NULL &&
- !strglobmatch(evt_dirent.d_name, event_glob))
+ !strglobmatch(evt_dirent->d_name, event_glob))
continue;
if (!evt_num_known) {
@@ -1702,7 +1702,7 @@
}
snprintf(evt_path, MAXPATHLEN, "%s:%s",
- sys_dirent.d_name, evt_dirent.d_name);
+ sys_dirent->d_name, evt_dirent->d_name);
evt_list[evt_i] = strdup(evt_path);
if (evt_list[evt_i] == NULL)
@@ -1755,7 +1755,7 @@
int is_valid_tracepoint(const char *event_string)
{
DIR *sys_dir, *evt_dir;
- struct dirent *sys_next, *evt_next, sys_dirent, evt_dirent;
+ struct dirent *sys_dirent, *evt_dirent;
char evt_path[MAXPATHLEN];
char dir_path[MAXPATHLEN];
@@ -1763,17 +1763,17 @@
if (!sys_dir)
return 0;
- for_each_subsystem(sys_dir, sys_dirent, sys_next) {
+ for_each_subsystem(sys_dir, sys_dirent) {
snprintf(dir_path, MAXPATHLEN, "%s/%s", tracing_events_path,
- sys_dirent.d_name);
+ sys_dirent->d_name);
evt_dir = opendir(dir_path);
if (!evt_dir)
continue;
- for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) {
+ for_each_event(sys_dirent, evt_dir, evt_dirent) {
snprintf(evt_path, MAXPATHLEN, "%s:%s",
- sys_dirent.d_name, evt_dirent.d_name);
+ sys_dirent->d_name, evt_dirent->d_name);
if (!strcmp(evt_path, event_string)) {
closedir(evt_dir);
closedir(sys_dir);
diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
index 6f2a027..593066c 100644
--- a/tools/perf/util/pmu.c
+++ b/tools/perf/util/pmu.c
@@ -153,7 +153,7 @@
if (fd == -1)
return -1;
- sret = read(fd, alias->unit, UNIT_MAX_LEN);
+ sret = read(fd, alias->unit, UNIT_MAX_LEN);
if (sret < 0)
goto error;
diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c
index 05012bb..fdd87c7 100644
--- a/tools/perf/util/probe-finder.c
+++ b/tools/perf/util/probe-finder.c
@@ -1460,16 +1460,12 @@
Dwarf_Addr _addr = 0, baseaddr = 0;
const char *fname = NULL, *func = NULL, *basefunc = NULL, *tmp;
int baseline = 0, lineno = 0, ret = 0;
- bool reloc = false;
-retry:
+ /* We always need to relocate the address for aranges */
+ if (debuginfo__get_text_offset(dbg, &baseaddr) == 0)
+ addr += baseaddr;
/* Find cu die */
if (!dwarf_addrdie(dbg->dbg, (Dwarf_Addr)addr, &cudie)) {
- if (!reloc && debuginfo__get_text_offset(dbg, &baseaddr) == 0) {
- addr += baseaddr;
- reloc = true;
- goto retry;
- }
pr_warning("Failed to find debug information for address %lx\n",
addr);
ret = -EINVAL;
diff --git a/tools/perf/util/scripting-engines/Build b/tools/perf/util/scripting-engines/Build
index 6516e22..82d28c6 100644
--- a/tools/perf/util/scripting-engines/Build
+++ b/tools/perf/util/scripting-engines/Build
@@ -1,6 +1,6 @@
libperf-$(CONFIG_LIBPERL) += trace-event-perl.o
libperf-$(CONFIG_LIBPYTHON) += trace-event-python.o
-CFLAGS_trace-event-perl.o += $(PERL_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-shadow -Wno-undef -Wno-switch-default
+CFLAGS_trace-event-perl.o += $(PERL_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-shadow -Wno-nested-externs -Wno-undef -Wno-switch-default
CFLAGS_trace-event-python.o += $(PYTHON_EMBED_CCOPTS) -Wno-redundant-decls -Wno-strict-prototypes -Wno-unused-parameter -Wno-shadow
diff --git a/tools/perf/util/strfilter.c b/tools/perf/util/strfilter.c
index bcae659..efb5377 100644
--- a/tools/perf/util/strfilter.c
+++ b/tools/perf/util/strfilter.c
@@ -269,6 +269,7 @@
len = strfilter_node__sprint_pt(node->l, buf);
if (len < 0)
return len;
+ __fallthrough;
case '!':
if (buf) {
*(buf + len++) = *node->p;
diff --git a/tools/perf/util/string.c b/tools/perf/util/string.c
index fc8781d..accb7ec 100644
--- a/tools/perf/util/string.c
+++ b/tools/perf/util/string.c
@@ -21,6 +21,8 @@
case 'b': case 'B':
if (*p)
goto out_err;
+
+ __fallthrough;
case '\0':
return length;
default:
diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
index 27ae382..7c97eca 100644
--- a/tools/perf/util/symbol-elf.c
+++ b/tools/perf/util/symbol-elf.c
@@ -488,6 +488,12 @@
break;
} else {
int n = namesz + descsz;
+
+ if (n > (int)sizeof(bf)) {
+ n = sizeof(bf);
+ pr_debug("%s: truncating reading of build id in sysfs file %s: n_namesz=%u, n_descsz=%u.\n",
+ __func__, filename, nhdr.n_namesz, nhdr.n_descsz);
+ }
if (read(fd, bf, n) != n)
break;
}
diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c
index 0a9ae80..829508a 100644
--- a/tools/perf/util/thread.c
+++ b/tools/perf/util/thread.c
@@ -227,7 +227,7 @@
struct addr_location *al)
{
size_t i;
- const u8 const cpumodes[] = {
+ const u8 cpumodes[] = {
PERF_RECORD_MISC_USER,
PERF_RECORD_MISC_KERNEL,
PERF_RECORD_MISC_GUEST_USER,
diff --git a/tools/perf/util/thread_map.c b/tools/perf/util/thread_map.c
index 6ec3c5c..4e666b9 100644
--- a/tools/perf/util/thread_map.c
+++ b/tools/perf/util/thread_map.c
@@ -92,8 +92,8 @@
{
DIR *proc;
int max_threads = 32, items, i;
- char path[256];
- struct dirent dirent, *next, **namelist = NULL;
+ char path[NAME_MAX + 1 + 6];
+ struct dirent *dirent, **namelist = NULL;
struct thread_map *threads = thread_map__alloc(max_threads);
if (threads == NULL)
@@ -106,16 +106,16 @@
threads->nr = 0;
atomic_set(&threads->refcnt, 1);
- while (!readdir_r(proc, &dirent, &next) && next) {
+ while ((dirent = readdir(proc)) != NULL) {
char *end;
bool grow = false;
struct stat st;
- pid_t pid = strtol(dirent.d_name, &end, 10);
+ pid_t pid = strtol(dirent->d_name, &end, 10);
if (*end) /* only interested in proper numerical dirents */
continue;
- snprintf(path, sizeof(path), "/proc/%s", dirent.d_name);
+ snprintf(path, sizeof(path), "/proc/%s", dirent->d_name);
if (stat(path, &st) != 0)
continue;
diff --git a/tools/power/cpupower/utils/helpers/cpuid.c b/tools/power/cpupower/utils/helpers/cpuid.c
index 93b0aa7..39c2c7d 100644
--- a/tools/power/cpupower/utils/helpers/cpuid.c
+++ b/tools/power/cpupower/utils/helpers/cpuid.c
@@ -156,6 +156,7 @@
*/
case 0x2C: /* Westmere EP - Gulftown */
cpu_info->caps |= CPUPOWER_CAP_HAS_TURBO_RATIO;
+ break;
case 0x2A: /* SNB */
case 0x2D: /* SNB Xeon */
case 0x3A: /* IVB */
diff --git a/tools/testing/selftests/capabilities/test_execve.c b/tools/testing/selftests/capabilities/test_execve.c
index 10a21a9..763f37f 100644
--- a/tools/testing/selftests/capabilities/test_execve.c
+++ b/tools/testing/selftests/capabilities/test_execve.c
@@ -138,9 +138,6 @@
if (chdir(cwd) != 0)
err(1, "chdir to private tmpfs");
-
- if (umount2(".", MNT_DETACH) != 0)
- err(1, "detach private tmpfs");
}
static void copy_fromat_to(int fromfd, const char *fromname, const char *toname)
@@ -248,7 +245,7 @@
err(1, "chown");
if (chmod("validate_cap_sgidnonroot", S_ISGID | 0710) != 0)
err(1, "chmod");
-}
+ }
capng_get_caps_process();
@@ -384,7 +381,7 @@
} else {
printf("[RUN]\tNon-root +ia, sgidnonroot => i\n");
exec_other_validate_cap("./validate_cap_sgidnonroot",
- false, false, true, false);
+ false, false, true, false);
if (fork_wait()) {
printf("[RUN]\tNon-root +ia, sgidroot => i\n");
diff --git a/tools/testing/selftests/x86/ldt_gdt.c b/tools/testing/selftests/x86/ldt_gdt.c
index 31a3035..923e59e 100644
--- a/tools/testing/selftests/x86/ldt_gdt.c
+++ b/tools/testing/selftests/x86/ldt_gdt.c
@@ -394,6 +394,51 @@
}
}
+#ifdef __i386__
+
+#ifndef SA_RESTORE
+#define SA_RESTORER 0x04000000
+#endif
+
+/*
+ * The UAPI header calls this 'struct sigaction', which conflicts with
+ * glibc. Sigh.
+ */
+struct fake_ksigaction {
+ void *handler; /* the real type is nasty */
+ unsigned long sa_flags;
+ void (*sa_restorer)(void);
+ unsigned char sigset[8];
+};
+
+static void fix_sa_restorer(int sig)
+{
+ struct fake_ksigaction ksa;
+
+ if (syscall(SYS_rt_sigaction, sig, NULL, &ksa, 8) == 0) {
+ /*
+ * glibc has a nasty bug: it sometimes writes garbage to
+ * sa_restorer. This interacts quite badly with anything
+ * that fiddles with SS because it can trigger legacy
+ * stack switching. Patch it up. See:
+ *
+ * https://sourceware.org/bugzilla/show_bug.cgi?id=21269
+ */
+ if (!(ksa.sa_flags & SA_RESTORER) && ksa.sa_restorer) {
+ ksa.sa_restorer = NULL;
+ if (syscall(SYS_rt_sigaction, sig, &ksa, NULL,
+ sizeof(ksa.sigset)) != 0)
+ err(1, "rt_sigaction");
+ }
+ }
+}
+#else
+static void fix_sa_restorer(int sig)
+{
+ /* 64-bit glibc works fine. */
+}
+#endif
+
static void sethandler(int sig, void (*handler)(int, siginfo_t *, void *),
int flags)
{
@@ -405,6 +450,7 @@
if (sigaction(sig, &sa, 0))
err(1, "sigaction");
+ fix_sa_restorer(sig);
}
static jmp_buf jmpbuf;
diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c
index 46dbc0a..49001fa8 100644
--- a/virt/kvm/eventfd.c
+++ b/virt/kvm/eventfd.c
@@ -868,7 +868,8 @@
continue;
kvm_io_bus_unregister_dev(kvm, bus_idx, &p->dev);
- kvm->buses[bus_idx]->ioeventfd_count--;
+ if (kvm->buses[bus_idx])
+ kvm->buses[bus_idx]->ioeventfd_count--;
ioeventfd_release(p);
ret = 0;
break;
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 336ed26..cb092bd 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -654,8 +654,11 @@
list_del(&kvm->vm_list);
spin_unlock(&kvm_lock);
kvm_free_irq_routing(kvm);
- for (i = 0; i < KVM_NR_BUSES; i++)
- kvm_io_bus_destroy(kvm->buses[i]);
+ for (i = 0; i < KVM_NR_BUSES; i++) {
+ if (kvm->buses[i])
+ kvm_io_bus_destroy(kvm->buses[i]);
+ kvm->buses[i] = NULL;
+ }
kvm_coalesced_mmio_free(kvm);
#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm);
@@ -3271,6 +3274,8 @@
};
bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
+ if (!bus)
+ return -ENOMEM;
r = __kvm_io_bus_write(vcpu, bus, &range, val);
return r < 0 ? r : 0;
}
@@ -3288,6 +3293,8 @@
};
bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
+ if (!bus)
+ return -ENOMEM;
/* First try the device referenced by cookie. */
if ((cookie >= 0) && (cookie < bus->dev_count) &&
@@ -3338,6 +3345,8 @@
};
bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
+ if (!bus)
+ return -ENOMEM;
r = __kvm_io_bus_read(vcpu, bus, &range, val);
return r < 0 ? r : 0;
}
@@ -3350,6 +3359,9 @@
struct kvm_io_bus *new_bus, *bus;
bus = kvm->buses[bus_idx];
+ if (!bus)
+ return -ENOMEM;
+
/* exclude ioeventfd which is limited by maximum fd */
if (bus->dev_count - bus->ioeventfd_count > NR_IOBUS_DEVS - 1)
return -ENOSPC;
@@ -3369,37 +3381,41 @@
}
/* Caller must hold slots_lock. */
-int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
- struct kvm_io_device *dev)
+void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
+ struct kvm_io_device *dev)
{
- int i, r;
+ int i;
struct kvm_io_bus *new_bus, *bus;
bus = kvm->buses[bus_idx];
- r = -ENOENT;
+ if (!bus)
+ return;
+
for (i = 0; i < bus->dev_count; i++)
if (bus->range[i].dev == dev) {
- r = 0;
break;
}
- if (r)
- return r;
+ if (i == bus->dev_count)
+ return;
new_bus = kmalloc(sizeof(*bus) + ((bus->dev_count - 1) *
sizeof(struct kvm_io_range)), GFP_KERNEL);
- if (!new_bus)
- return -ENOMEM;
+ if (!new_bus) {
+ pr_err("kvm: failed to shrink bus, removing it completely\n");
+ goto broken;
+ }
memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range));
new_bus->dev_count--;
memcpy(new_bus->range + i, bus->range + i + 1,
(new_bus->dev_count - i) * sizeof(struct kvm_io_range));
+broken:
rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
synchronize_srcu_expedited(&kvm->srcu);
kfree(bus);
- return r;
+ return;
}
static struct notifier_block kvm_cpu_notifier = {
diff --git a/virt/kvm/vfio.c b/virt/kvm/vfio.c
index 1dd087d..111e09c 100644
--- a/virt/kvm/vfio.c
+++ b/virt/kvm/vfio.c
@@ -47,6 +47,22 @@
return vfio_group;
}
+static bool kvm_vfio_external_group_match_file(struct vfio_group *group,
+ struct file *filep)
+{
+ bool ret, (*fn)(struct vfio_group *, struct file *);
+
+ fn = symbol_get(vfio_external_group_match_file);
+ if (!fn)
+ return false;
+
+ ret = fn(group, filep);
+
+ symbol_put(vfio_external_group_match_file);
+
+ return ret;
+}
+
static void kvm_vfio_group_put_external_user(struct vfio_group *vfio_group)
{
void (*fn)(struct vfio_group *);
@@ -171,18 +187,13 @@
if (!f.file)
return -EBADF;
- vfio_group = kvm_vfio_group_get_external_user(f.file);
- fdput(f);
-
- if (IS_ERR(vfio_group))
- return PTR_ERR(vfio_group);
-
ret = -ENOENT;
mutex_lock(&kv->lock);
list_for_each_entry(kvg, &kv->group_list, node) {
- if (kvg->vfio_group != vfio_group)
+ if (!kvm_vfio_external_group_match_file(kvg->vfio_group,
+ f.file))
continue;
list_del(&kvg->node);
@@ -196,7 +207,7 @@
mutex_unlock(&kv->lock);
- kvm_vfio_group_put_external_user(vfio_group);
+ fdput(f);
kvm_vfio_update_coherency(dev);